From d0b2f91bede3bd5e3d24dd6803e56eee959c1797 Mon Sep 17 00:00:00 2001 From: AndrĂ© Fabian Silva Delgado Date: Thu, 20 Oct 2016 00:10:27 -0300 Subject: Linux-libre 4.8.2-gnu --- drivers/net/ethernet/8390/ax88796.c | 43 +- drivers/net/ethernet/adi/bfin_mac.c | 48 +- drivers/net/ethernet/adi/bfin_mac.h | 1 - drivers/net/ethernet/agere/et131x.c | 60 +- drivers/net/ethernet/allwinner/sun4i-emac.c | 54 +- drivers/net/ethernet/altera/altera_tse.h | 1 - drivers/net/ethernet/altera/altera_tse_ethtool.c | 26 +- drivers/net/ethernet/altera/altera_tse_main.c | 17 +- drivers/net/ethernet/amd/au1000_eth.c | 55 +- drivers/net/ethernet/amd/au1000_eth.h | 1 - drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 2 +- drivers/net/ethernet/apm/xgene/Kconfig | 1 + .../net/ethernet/apm/xgene/xgene_enet_ethtool.c | 22 +- drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | 257 +- drivers/net/ethernet/apm/xgene/xgene_enet_hw.h | 11 +- drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 215 +- drivers/net/ethernet/apm/xgene/xgene_enet_main.h | 33 +- drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c | 239 +- drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h | 8 + drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c | 66 +- drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h | 3 + drivers/net/ethernet/arc/emac.h | 1 - drivers/net/ethernet/arc/emac_main.c | 86 +- drivers/net/ethernet/atheros/alx/main.c | 14 +- drivers/net/ethernet/atheros/alx/reg.h | 1 + drivers/net/ethernet/aurora/nb8800.c | 73 +- drivers/net/ethernet/aurora/nb8800.h | 1 - drivers/net/ethernet/broadcom/Kconfig | 44 +- drivers/net/ethernet/broadcom/Makefile | 2 + drivers/net/ethernet/broadcom/bcm63xx_enet.c | 2 +- drivers/net/ethernet/broadcom/bcmsysport.c | 49 +- drivers/net/ethernet/broadcom/bcmsysport.h | 1 - drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c | 266 + drivers/net/ethernet/broadcom/bgmac-bcma.c | 315 + drivers/net/ethernet/broadcom/bgmac-platform.c | 185 + drivers/net/ethernet/broadcom/bgmac.c | 808 +- drivers/net/ethernet/broadcom/bgmac.h | 117 +- drivers/net/ethernet/broadcom/bnx2.c | 12 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 113 +- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 743 +- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 90 +- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 265 +- drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 87 +- drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 3 + drivers/net/ethernet/broadcom/tg3.c | 26 +- drivers/net/ethernet/brocade/bna/bnad_debugfs.c | 6 +- drivers/net/ethernet/brocade/bna/bnad_ethtool.c | 57 +- drivers/net/ethernet/cadence/macb.c | 77 +- drivers/net/ethernet/cadence/macb.h | 1 - .../net/ethernet/cavium/liquidio/cn66xx_device.c | 61 +- .../net/ethernet/cavium/liquidio/cn66xx_device.h | 5 +- .../net/ethernet/cavium/liquidio/cn68xx_device.c | 13 +- .../net/ethernet/cavium/liquidio/cn68xx_device.h | 1 - drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h | 1 - drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | 1009 +- drivers/net/ethernet/cavium/liquidio/lio_main.c | 1421 ++- .../net/ethernet/cavium/liquidio/liquidio_common.h | 408 +- .../net/ethernet/cavium/liquidio/octeon_config.h | 16 +- .../net/ethernet/cavium/liquidio/octeon_console.c | 50 +- .../net/ethernet/cavium/liquidio/octeon_device.c | 262 +- .../net/ethernet/cavium/liquidio/octeon_device.h | 52 +- drivers/net/ethernet/cavium/liquidio/octeon_droq.c | 213 +- drivers/net/ethernet/cavium/liquidio/octeon_droq.h | 41 +- drivers/net/ethernet/cavium/liquidio/octeon_iq.h | 85 +- drivers/net/ethernet/cavium/liquidio/octeon_main.h | 25 +- .../net/ethernet/cavium/liquidio/octeon_mem_ops.c | 24 +- .../net/ethernet/cavium/liquidio/octeon_network.h | 252 +- drivers/net/ethernet/cavium/liquidio/octeon_nic.c | 67 +- drivers/net/ethernet/cavium/liquidio/octeon_nic.h | 154 +- .../net/ethernet/cavium/liquidio/request_manager.c | 313 +- .../ethernet/cavium/liquidio/response_manager.c | 30 +- drivers/net/ethernet/cavium/octeon/octeon_mgmt.c | 106 +- drivers/net/ethernet/cavium/thunder/nic.h | 1 + drivers/net/ethernet/cavium/thunder/nic_main.c | 11 +- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 20 +- drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 64 +- drivers/net/ethernet/chelsio/Kconfig | 16 +- drivers/net/ethernet/chelsio/Makefile | 1 + drivers/net/ethernet/chelsio/cxgb4/Makefile | 1 - drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 5 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 375 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 97 +- drivers/net/ethernet/chelsio/cxgb4/sge.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 9 +- drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | 2 + drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 30 +- drivers/net/ethernet/chelsio/cxgb4vf/adapter.h | 8 + .../net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 286 +- drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 2 +- drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h | 16 +- drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 67 +- drivers/net/ethernet/chelsio/libcxgb/Makefile | 3 + drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c | 498 + drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h | 334 + drivers/net/ethernet/cirrus/cs89x0.c | 12 +- drivers/net/ethernet/cisco/enic/enic_ethtool.c | 28 +- drivers/net/ethernet/cisco/enic/enic_main.c | 4 +- drivers/net/ethernet/davicom/dm9000.c | 8 +- drivers/net/ethernet/dec/tulip/de4x5.c | 2 +- drivers/net/ethernet/dnet.c | 48 +- drivers/net/ethernet/dnet.h | 1 - drivers/net/ethernet/emulex/benet/Kconfig | 8 - drivers/net/ethernet/emulex/benet/be.h | 58 +- drivers/net/ethernet/emulex/benet/be_cmds.c | 160 +- drivers/net/ethernet/emulex/benet/be_cmds.h | 16 +- drivers/net/ethernet/emulex/benet/be_ethtool.c | 66 +- drivers/net/ethernet/emulex/benet/be_main.c | 334 +- drivers/net/ethernet/emulex/benet/be_roce.c | 2 +- drivers/net/ethernet/emulex/benet/be_roce.h | 2 +- drivers/net/ethernet/ethoc.c | 40 +- drivers/net/ethernet/ezchip/nps_enet.c | 27 +- drivers/net/ethernet/faraday/ftgmac100.c | 271 +- drivers/net/ethernet/freescale/fec.h | 4 + drivers/net/ethernet/freescale/fec_main.c | 61 +- drivers/net/ethernet/freescale/gianfar.c | 22 +- drivers/net/ethernet/freescale/gianfar.h | 3 +- drivers/net/ethernet/hisilicon/Kconfig | 14 +- drivers/net/ethernet/hisilicon/Makefile | 1 + drivers/net/ethernet/hisilicon/hisi_femac.c | 1007 ++ drivers/net/ethernet/hisilicon/hix5hd2_gmac.c | 44 +- drivers/net/ethernet/hisilicon/hns/hnae.c | 19 +- drivers/net/ethernet/hisilicon/hns/hnae.h | 20 +- drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c | 60 +- drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c | 8 +- drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c | 291 +- drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h | 5 +- drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | 334 +- drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h | 45 +- drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c | 302 +- drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h | 7 +- drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c | 16 +- drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c | 7 +- drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h | 2 +- drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h | 21 +- .../net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c | 10 +- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 160 +- drivers/net/ethernet/hisilicon/hns/hns_enet.h | 2 +- drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | 63 +- drivers/net/ethernet/hisilicon/hns_mdio.c | 204 +- drivers/net/ethernet/ibm/emac/core.c | 34 +- drivers/net/ethernet/intel/Kconfig | 43 - drivers/net/ethernet/intel/e1000e/82571.c | 6 +- drivers/net/ethernet/intel/e1000e/e1000.h | 1 + drivers/net/ethernet/intel/e1000e/ich8lan.c | 3 +- drivers/net/ethernet/intel/e1000e/netdev.c | 71 +- drivers/net/ethernet/intel/fm10k/fm10k.h | 4 +- drivers/net/ethernet/intel/fm10k/fm10k_common.c | 6 +- drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c | 7 +- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 19 +- drivers/net/ethernet/intel/fm10k/fm10k_mbx.h | 2 + drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 40 +- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 333 +- drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 38 +- drivers/net/ethernet/intel/fm10k/fm10k_type.h | 2 + drivers/net/ethernet/intel/fm10k/fm10k_vf.c | 12 +- drivers/net/ethernet/intel/i40e/i40e.h | 15 +- drivers/net/ethernet/intel/i40e/i40e_client.c | 45 +- drivers/net/ethernet/intel/i40e/i40e_common.c | 61 +- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 16 +- drivers/net/ethernet/intel/i40e/i40e_devids.h | 1 - drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 52 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 998 +- drivers/net/ethernet/intel/i40e/i40e_prototype.h | 2 + drivers/net/ethernet/intel/i40e/i40e_txrx.c | 4 +- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 6 + drivers/net/ethernet/intel/i40evf/i40e_common.c | 1 - drivers/net/ethernet/intel/i40evf/i40e_devids.h | 1 - drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 3 +- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 8 +- .../net/ethernet/intel/i40evf/i40evf_virtchnl.c | 8 + drivers/net/ethernet/intel/igb/igb.h | 7 +- drivers/net/ethernet/intel/igb/igb_main.c | 22 +- drivers/net/ethernet/intel/igb/igb_ptp.c | 92 +- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 2 - drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 3 - drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 89 +- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 12 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 192 +- drivers/net/ethernet/intel/ixgbe/ixgbe_model.h | 4 + drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 4 +- drivers/net/ethernet/intel/ixgbevf/defines.h | 1 + drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 2 + drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 24 +- drivers/net/ethernet/intel/ixgbevf/vf.c | 96 +- drivers/net/ethernet/intel/ixgbevf/vf.h | 3 +- drivers/net/ethernet/lantiq_etop.c | 37 +- drivers/net/ethernet/marvell/mvneta.c | 1 + drivers/net/ethernet/marvell/mvpp2.c | 50 +- drivers/net/ethernet/marvell/pxa168_eth.c | 72 +- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 308 +- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 38 +- drivers/net/ethernet/mellanox/mlx4/Kconfig | 7 - drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c | 280 +- drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 34 +- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 190 +- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 126 +- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 282 +- drivers/net/ethernet/mellanox/mlx4/eq.c | 4 +- drivers/net/ethernet/mellanox/mlx4/fw.c | 41 + drivers/net/ethernet/mellanox/mlx4/fw.h | 1 + drivers/net/ethernet/mellanox/mlx4/intf.c | 5 +- drivers/net/ethernet/mellanox/mlx4/main.c | 6 +- drivers/net/ethernet/mellanox/mlx4/mcg.c | 8 +- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 63 +- drivers/net/ethernet/mellanox/mlx4/mr.c | 2 +- drivers/net/ethernet/mellanox/mlx4/pd.c | 4 +- drivers/net/ethernet/mellanox/mlx4/port.c | 12 + .../net/ethernet/mellanox/mlx4/resource_tracker.c | 22 +- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 1 + drivers/net/ethernet/mellanox/mlx5/core/Makefile | 12 +- drivers/net/ethernet/mellanox/mlx5/core/en.h | 201 +- drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c | 82 +- .../net/ethernet/mellanox/mlx5/core/en_common.c | 161 + drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | 30 +- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 640 +- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 101 +- .../ethernet/mellanox/mlx5/core/en_fs_ethtool.c | 586 + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 951 +- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 431 + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 41 +- drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c | 335 + drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 31 +- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 153 +- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 117 +- drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | 11 +- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 207 +- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 86 +- .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 646 ++ drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 67 + drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h | 12 + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 307 +- drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 9 + .../net/ethernet/mellanox/mlx5/core/fs_counters.c | 150 +- drivers/net/ethernet/mellanox/mlx5/core/fw.c | 6 + drivers/net/ethernet/mellanox/mlx5/core/health.c | 7 +- drivers/net/ethernet/mellanox/mlx5/core/main.c | 36 +- drivers/net/ethernet/mellanox/mlx5/core/port.c | 48 +- drivers/net/ethernet/mellanox/mlx5/core/rl.c | 209 + drivers/net/ethernet/mellanox/mlx5/core/sriov.c | 5 +- drivers/net/ethernet/mellanox/mlx5/core/srq.c | 265 +- drivers/net/ethernet/mellanox/mlx5/core/transobj.c | 4 + drivers/net/ethernet/mellanox/mlx5/core/vport.c | 12 + drivers/net/ethernet/mellanox/mlxsw/Makefile | 3 +- drivers/net/ethernet/mellanox/mlxsw/cmd.h | 75 + drivers/net/ethernet/mellanox/mlxsw/core.c | 30 +- drivers/net/ethernet/mellanox/mlxsw/core.h | 17 +- drivers/net/ethernet/mellanox/mlxsw/pci.c | 78 +- drivers/net/ethernet/mellanox/mlxsw/port.h | 1 + drivers/net/ethernet/mellanox/mlxsw/reg.h | 1397 ++- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2468 +++-- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 268 +- .../net/ethernet/mellanox/mlxsw/spectrum_buffers.c | 14 +- drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c | 10 +- .../net/ethernet/mellanox/mlxsw/spectrum_kvdl.c | 91 + .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 1863 ++++ .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 512 +- drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 1 + drivers/net/ethernet/mellanox/mlxsw/trap.h | 9 + drivers/net/ethernet/neterion/s2io.c | 2 +- drivers/net/ethernet/netronome/nfp/nfp_net.h | 2 +- .../net/ethernet/netronome/nfp/nfp_net_common.c | 50 +- .../net/ethernet/netronome/nfp/nfp_net_ethtool.c | 2 +- .../net/ethernet/netronome/nfp/nfp_netvf_main.c | 11 +- drivers/net/ethernet/nxp/lpc_eth.c | 65 +- drivers/net/ethernet/pasemi/pasemi_mac.c | 28 +- drivers/net/ethernet/pasemi/pasemi_mac.h | 1 - drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c | 30 +- drivers/net/ethernet/qlogic/Kconfig | 30 - drivers/net/ethernet/qlogic/qed/qed.h | 28 +- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 1347 ++- drivers/net/ethernet/qlogic/qed/qed_cxt.h | 24 + drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 1814 ++- drivers/net/ethernet/qlogic/qed/qed_dcbx.h | 28 + drivers/net/ethernet/qlogic/qed/qed_dev.c | 649 +- drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 55 +- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 10956 +++++++++++-------- drivers/net/ethernet/qlogic/qed/qed_hw.c | 55 +- drivers/net/ethernet/qlogic/qed/qed_hw.h | 12 +- .../net/ethernet/qlogic/qed/qed_init_fw_funcs.c | 184 +- drivers/net/ethernet/qlogic/qed/qed_init_ops.c | 9 +- drivers/net/ethernet/qlogic/qed/qed_int.c | 75 +- drivers/net/ethernet/qlogic/qed/qed_int.h | 3 + drivers/net/ethernet/qlogic/qed/qed_l2.c | 128 +- drivers/net/ethernet/qlogic/qed/qed_main.c | 68 +- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 61 +- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 3 + drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 43 +- drivers/net/ethernet/qlogic/qed/qed_sp.h | 26 + drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 26 +- drivers/net/ethernet/qlogic/qed/qed_spq.c | 40 +- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 508 +- drivers/net/ethernet/qlogic/qed/qed_sriov.h | 9 +- drivers/net/ethernet/qlogic/qed/qed_vf.c | 99 +- drivers/net/ethernet/qlogic/qed/qed_vf.h | 13 +- drivers/net/ethernet/qlogic/qede/Makefile | 1 + drivers/net/ethernet/qlogic/qede/qede.h | 9 +- drivers/net/ethernet/qlogic/qede/qede_dcbnl.c | 348 + drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 104 + drivers/net/ethernet/qlogic/qede/qede_main.c | 295 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 6 +- .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 1 - .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | 4 - drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h | 1 - drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | 2 - drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 30 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h | 9 +- .../ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | 95 +- drivers/net/ethernet/qlogic/qlge/qlge_main.c | 1 - drivers/net/ethernet/rdc/r6040.c | 91 +- drivers/net/ethernet/realtek/8139cp.c | 2 +- drivers/net/ethernet/realtek/8139too.c | 12 +- drivers/net/ethernet/realtek/r8169.c | 37 +- drivers/net/ethernet/renesas/ravb_main.c | 10 +- drivers/net/ethernet/renesas/sh_eth.c | 9 +- drivers/net/ethernet/rocker/rocker_main.c | 3 +- drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h | 1 - drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c | 31 +- drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c | 32 +- drivers/net/ethernet/sfc/ef10.c | 747 +- drivers/net/ethernet/sfc/ef10_sriov.c | 44 +- drivers/net/ethernet/sfc/ef10_sriov.h | 3 + drivers/net/ethernet/sfc/efx.c | 66 +- drivers/net/ethernet/sfc/efx.h | 9 + drivers/net/ethernet/sfc/mcdi_pcol.h | 1327 ++- drivers/net/ethernet/sfc/net_driver.h | 19 +- drivers/net/ethernet/sfc/nic.h | 5 + drivers/net/ethernet/smsc/smc91x.c | 13 +- drivers/net/ethernet/smsc/smc91x.h | 2 + drivers/net/ethernet/smsc/smsc911x.c | 287 +- drivers/net/ethernet/smsc/smsc9420.c | 60 +- drivers/net/ethernet/stmicro/stmmac/Kconfig | 14 +- drivers/net/ethernet/stmicro/stmmac/Makefile | 3 +- drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c | 274 + drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h | 36 + drivers/net/ethernet/stmicro/stmmac/common.h | 19 +- drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 165 +- .../net/ethernet/stmicro/stmmac/dwmac-socfpga.c | 147 +- drivers/net/ethernet/stmicro/stmmac/dwmac1000.h | 86 +- .../net/ethernet/stmicro/stmmac/dwmac1000_core.c | 149 +- drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 43 +- drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | 98 +- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 1 - .../net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 60 +- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 51 +- drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h | 159 + .../net/ethernet/stmicro/stmmac/stmmac_platform.c | 17 +- drivers/net/ethernet/synopsys/dwc_eth_qos.c | 128 +- drivers/net/ethernet/tehuti/tehuti.c | 2 +- drivers/net/ethernet/ti/Kconfig | 3 +- drivers/net/ethernet/ti/cpmac.c | 70 +- drivers/net/ethernet/ti/cpsw.c | 118 +- drivers/net/ethernet/ti/cpsw.h | 1 - drivers/net/ethernet/ti/davinci_cpdma.c | 261 +- drivers/net/ethernet/ti/davinci_cpdma.h | 3 +- drivers/net/ethernet/ti/davinci_emac.c | 189 +- drivers/net/ethernet/ti/davinci_mdio.c | 169 +- drivers/net/ethernet/ti/tlan.c | 1 - drivers/net/ethernet/tile/tilepro.c | 4 +- drivers/net/ethernet/toshiba/tc35815.c | 65 +- drivers/net/ethernet/tundra/tsi108_eth.c | 2 + drivers/net/ethernet/wiznet/w5100.c | 3 +- drivers/net/ethernet/xilinx/ll_temac.h | 1 - drivers/net/ethernet/xilinx/ll_temac_main.c | 47 +- drivers/net/ethernet/xilinx/xilinx_axienet.h | 2 - drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 82 +- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 8 +- drivers/net/ethernet/xircom/xirc2ps_cs.c | 4 +- drivers/net/ethernet/xscale/ixp4xx_eth.c | 46 +- 372 files changed, 39456 insertions(+), 14948 deletions(-) create mode 100644 drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c create mode 100644 drivers/net/ethernet/broadcom/bgmac-bcma.c create mode 100644 drivers/net/ethernet/broadcom/bgmac-platform.c create mode 100644 drivers/net/ethernet/chelsio/libcxgb/Makefile create mode 100644 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c create mode 100644 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h create mode 100644 drivers/net/ethernet/hisilicon/hisi_femac.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_common.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/rl.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c create mode 100644 drivers/net/ethernet/qlogic/qede/qede_dcbnl.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h create mode 100644 drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index c89b9aeec..39ca9350d 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -84,7 +84,6 @@ static u32 ax_msg_enable; struct ax_device { struct mii_bus *mii_bus; struct mdiobb_ctrl bb_ctrl; - struct phy_device *phy_dev; void __iomem *addr_memr; u8 reg_memr; int link; @@ -320,7 +319,7 @@ static void ax_block_output(struct net_device *dev, int count, static void ax_handle_link_change(struct net_device *dev) { struct ax_device *ax = to_ax_dev(dev); - struct phy_device *phy_dev = ax->phy_dev; + struct phy_device *phy_dev = dev->phydev; int status_change = 0; if (phy_dev->link && ((ax->speed != phy_dev->speed) || @@ -369,8 +368,6 @@ static int ax_mii_probe(struct net_device *dev) phy_dev->supported &= PHY_BASIC_FEATURES; phy_dev->advertising = phy_dev->supported; - ax->phy_dev = phy_dev; - netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", phy_dev->drv->name, phydev_name(phy_dev), phy_dev->irq); @@ -410,7 +407,7 @@ static int ax_open(struct net_device *dev) ret = ax_mii_probe(dev); if (ret) goto failed_mii_probe; - phy_start(ax->phy_dev); + phy_start(dev->phydev); ret = ax_ei_open(dev); if (ret) @@ -421,7 +418,7 @@ static int ax_open(struct net_device *dev) return 0; failed_ax_ei_open: - phy_disconnect(ax->phy_dev); + phy_disconnect(dev->phydev); failed_mii_probe: ax_phy_switch(dev, 0); free_irq(dev->irq, dev); @@ -442,7 +439,7 @@ static int ax_close(struct net_device *dev) /* turn the phy off */ ax_phy_switch(dev, 0); - phy_disconnect(ax->phy_dev); + phy_disconnect(dev->phydev); free_irq(dev->irq, dev); return 0; @@ -450,8 +447,7 @@ static int ax_close(struct net_device *dev) static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { - struct ax_device *ax = to_ax_dev(dev); - struct phy_device *phy_dev = ax->phy_dev; + struct phy_device *phy_dev = dev->phydev; if (!netif_running(dev)) return -EINVAL; @@ -474,28 +470,6 @@ static void ax_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info)); } -static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ax_device *ax = to_ax_dev(dev); - struct phy_device *phy_dev = ax->phy_dev; - - if (!phy_dev) - return -ENODEV; - - return phy_ethtool_gset(phy_dev, cmd); -} - -static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ax_device *ax = to_ax_dev(dev); - struct phy_device *phy_dev = ax->phy_dev; - - if (!phy_dev) - return -ENODEV; - - return phy_ethtool_sset(phy_dev, cmd); -} - static u32 ax_get_msglevel(struct net_device *dev) { struct ei_device *ei_local = netdev_priv(dev); @@ -512,12 +486,12 @@ static void ax_set_msglevel(struct net_device *dev, u32 v) static const struct ethtool_ops ax_ethtool_ops = { .get_drvinfo = ax_get_drvinfo, - .get_settings = ax_get_settings, - .set_settings = ax_set_settings, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, .get_msglevel = ax_get_msglevel, .set_msglevel = ax_set_msglevel, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; #ifdef CONFIG_AX88796_93CX6 @@ -936,7 +910,8 @@ static int ax_probe(struct platform_device *pdev) iounmap(ax->map2); exit_mem2: - release_mem_region(mem2->start, mem2_size); + if (mem2) + release_mem_region(mem2->start, mem2_size); exit_mem1: iounmap(ei_local->mem); diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index 3d2245fdc..38eaea18d 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -310,7 +310,7 @@ static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, static void bfin_mac_adjust_link(struct net_device *dev) { struct bfin_mac_local *lp = netdev_priv(dev); - struct phy_device *phydev = lp->phydev; + struct phy_device *phydev = dev->phydev; unsigned long flags; int new_state = 0; @@ -430,7 +430,6 @@ static int mii_probe(struct net_device *dev, int phy_mode) lp->old_link = 0; lp->old_speed = 0; lp->old_duplex = -1; - lp->phydev = phydev; phy_attached_print(phydev, "mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n", MDC_CLK, mdc_div, sclk / 1000000); @@ -450,31 +449,6 @@ static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static int -bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct bfin_mac_local *lp = netdev_priv(dev); - - if (lp->phydev) - return phy_ethtool_gset(lp->phydev, cmd); - - return -EINVAL; -} - -static int -bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct bfin_mac_local *lp = netdev_priv(dev); - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (lp->phydev) - return phy_ethtool_sset(lp->phydev, cmd); - - return -EINVAL; -} - static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -552,8 +526,6 @@ static int bfin_mac_ethtool_get_ts_info(struct net_device *dev, #endif static const struct ethtool_ops bfin_mac_ethtool_ops = { - .get_settings = bfin_mac_ethtool_getsettings, - .set_settings = bfin_mac_ethtool_setsettings, .get_link = ethtool_op_get_link, .get_drvinfo = bfin_mac_ethtool_getdrvinfo, .get_wol = bfin_mac_ethtool_getwol, @@ -561,6 +533,8 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = { #ifdef CONFIG_BFIN_MAC_USE_HWSTAMP .get_ts_info = bfin_mac_ethtool_get_ts_info, #endif + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /**************************************************************************/ @@ -1427,7 +1401,7 @@ static void bfin_mac_timeout(struct net_device *dev) if (netif_queue_stopped(dev)) netif_wake_queue(dev); - bfin_mac_enable(lp->phydev); + bfin_mac_enable(dev->phydev); /* We can accept TX packets again */ netif_trans_update(dev); /* prevent tx timeout */ @@ -1491,8 +1465,6 @@ static void bfin_mac_set_multicast_list(struct net_device *dev) static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { - struct bfin_mac_local *lp = netdev_priv(netdev); - if (!netif_running(netdev)) return -EINVAL; @@ -1502,8 +1474,8 @@ static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) case SIOCGHWTSTAMP: return bfin_mac_hwtstamp_get(netdev, ifr); default: - if (lp->phydev) - return phy_mii_ioctl(lp->phydev, ifr, cmd); + if (netdev->phydev) + return phy_mii_ioctl(netdev->phydev, ifr, cmd); else return -EOPNOTSUPP; } @@ -1547,12 +1519,12 @@ static int bfin_mac_open(struct net_device *dev) if (ret) return ret; - phy_start(lp->phydev); + phy_start(dev->phydev); setup_system_regs(dev); setup_mac_addr(dev->dev_addr); bfin_mac_disable(); - ret = bfin_mac_enable(lp->phydev); + ret = bfin_mac_enable(dev->phydev); if (ret) return ret; pr_debug("hardware init finished\n"); @@ -1578,8 +1550,8 @@ static int bfin_mac_close(struct net_device *dev) napi_disable(&lp->napi); netif_carrier_off(dev); - phy_stop(lp->phydev); - phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN); + phy_stop(dev->phydev); + phy_write(dev->phydev, MII_BMCR, BMCR_PDOWN); /* clear everything */ bfin_mac_shutdown(dev); diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h index d1217db70..8c3b56198 100644 --- a/drivers/net/ethernet/adi/bfin_mac.h +++ b/drivers/net/ethernet/adi/bfin_mac.h @@ -92,7 +92,6 @@ struct bfin_mac_local { int old_speed; int old_duplex; - struct phy_device *phydev; struct mii_bus *mii_bus; #if defined(CONFIG_BFIN_MAC_USE_HWSTAMP) diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 821d86c38..c83ebae73 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -440,7 +440,6 @@ struct et131x_adapter { struct net_device *netdev; struct pci_dev *pdev; struct mii_bus *mii_bus; - struct phy_device *phydev; struct napi_struct napi; /* Flags that indicate current state of the adapter */ @@ -864,7 +863,7 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter) { int32_t delay = 0; struct mac_regs __iomem *mac = &adapter->regs->mac; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; u32 cfg1; u32 cfg2; u32 ifctrl; @@ -1035,7 +1034,7 @@ static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) static void et1310_config_rxmac_regs(struct et131x_adapter *adapter) { struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; u32 sa_lo; u32 sa_hi = 0; u32 pf_ctrl = 0; @@ -1230,7 +1229,7 @@ out: static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) { - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; if (!phydev) return -EIO; @@ -1311,7 +1310,7 @@ static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter, static void et1310_config_flow_control(struct et131x_adapter *adapter) { - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; if (phydev->duplex == DUPLEX_HALF) { adapter->flow = FLOW_NONE; @@ -1456,7 +1455,7 @@ static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) { u16 data; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; et131x_mii_read(adapter, MII_BMCR, &data); data &= ~BMCR_PDOWN; @@ -1469,7 +1468,7 @@ static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) static void et131x_xcvr_init(struct et131x_adapter *adapter) { u16 lcr2; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; /* Set the LED behavior such that LED 1 indicates speed (off = * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates @@ -2111,7 +2110,7 @@ static int et131x_init_recv(struct et131x_adapter *adapter) /* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */ static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) { - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; /* For version B silicon, we do not use the RxDMA timer for 10 and 100 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. @@ -2426,7 +2425,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) struct sk_buff *skb = tcb->skb; u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; dma_addr_t dma_addr; struct tx_ring *tx_ring = &adapter->tx_ring; @@ -2791,22 +2790,6 @@ static void et131x_handle_send_pkts(struct et131x_adapter *adapter) spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); } -static int et131x_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - struct et131x_adapter *adapter = netdev_priv(netdev); - - return phy_ethtool_gset(adapter->phydev, cmd); -} - -static int et131x_set_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - struct et131x_adapter *adapter = netdev_priv(netdev); - - return phy_ethtool_sset(adapter->phydev, cmd); -} - static int et131x_get_regs_len(struct net_device *netdev) { #define ET131X_REGS_LEN 256 @@ -2979,12 +2962,12 @@ static void et131x_get_drvinfo(struct net_device *netdev, } static struct ethtool_ops et131x_ethtool_ops = { - .get_settings = et131x_get_settings, - .set_settings = et131x_set_settings, .get_drvinfo = et131x_get_drvinfo, .get_regs_len = et131x_get_regs_len, .get_regs = et131x_get_regs, .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /* et131x_hwaddr_init - set up the MAC Address */ @@ -3098,7 +3081,7 @@ err_out: static void et131x_error_timer_handler(unsigned long data) { struct et131x_adapter *adapter = (struct et131x_adapter *)data; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; if (et1310_in_phy_coma(adapter)) { /* Bring the device immediately out of coma, to @@ -3168,7 +3151,7 @@ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) static void et131x_adjust_link(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = netdev->phydev; if (!phydev) return; @@ -3287,7 +3270,6 @@ static int et131x_mii_probe(struct net_device *netdev) phydev->advertising = phydev->supported; phydev->autoneg = AUTONEG_ENABLE; - adapter->phydev = phydev; phy_attached_info(phydev); @@ -3323,7 +3305,7 @@ static void et131x_pci_remove(struct pci_dev *pdev) unregister_netdev(netdev); netif_napi_del(&adapter->napi); - phy_disconnect(adapter->phydev); + phy_disconnect(netdev->phydev); mdiobus_unregister(adapter->mii_bus); mdiobus_free(adapter->mii_bus); @@ -3338,20 +3320,16 @@ static void et131x_pci_remove(struct pci_dev *pdev) static void et131x_up(struct net_device *netdev) { - struct et131x_adapter *adapter = netdev_priv(netdev); - et131x_enable_txrx(netdev); - phy_start(adapter->phydev); + phy_start(netdev->phydev); } static void et131x_down(struct net_device *netdev) { - struct et131x_adapter *adapter = netdev_priv(netdev); - /* Save the timestamp for the TX watchdog, prevent a timeout */ netif_trans_update(netdev); - phy_stop(adapter->phydev); + phy_stop(netdev->phydev); et131x_disable_txrx(netdev); } @@ -3684,12 +3662,10 @@ static int et131x_close(struct net_device *netdev) static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd) { - struct et131x_adapter *adapter = netdev_priv(netdev); - - if (!adapter->phydev) + if (!netdev->phydev) return -EINVAL; - return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); + return phy_mii_ioctl(netdev->phydev, reqbuf, cmd); } /* et131x_set_packet_filter - Configures the Rx Packet filtering */ @@ -4073,7 +4049,7 @@ out: return rc; err_phy_disconnect: - phy_disconnect(adapter->phydev); + phy_disconnect(netdev->phydev); err_mdio_unregister: mdiobus_unregister(adapter->mii_bus); err_mdio_free: diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index de2c4bf5f..6ffdff68b 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -77,7 +77,6 @@ struct emac_board_info { int emacrx_completed_flag; - struct phy_device *phy_dev; struct device_node *phy_node; unsigned int link; unsigned int speed; @@ -115,7 +114,7 @@ static void emac_update_duplex(struct net_device *dev) static void emac_handle_link_change(struct net_device *dev) { struct emac_board_info *db = netdev_priv(dev); - struct phy_device *phydev = db->phy_dev; + struct phy_device *phydev = dev->phydev; unsigned long flags; int status_change = 0; @@ -154,21 +153,22 @@ static void emac_handle_link_change(struct net_device *dev) static int emac_mdio_probe(struct net_device *dev) { struct emac_board_info *db = netdev_priv(dev); + struct phy_device *phydev; /* to-do: PHY interrupts are currently not supported */ /* attach the mac to the phy */ - db->phy_dev = of_phy_connect(db->ndev, db->phy_node, - &emac_handle_link_change, 0, - db->phy_interface); - if (!db->phy_dev) { + phydev = of_phy_connect(db->ndev, db->phy_node, + &emac_handle_link_change, 0, + db->phy_interface); + if (!phydev) { netdev_err(db->ndev, "could not find the PHY\n"); return -ENODEV; } /* mask with MAC supported features */ - db->phy_dev->supported &= PHY_BASIC_FEATURES; - db->phy_dev->advertising = db->phy_dev->supported; + phydev->supported &= PHY_BASIC_FEATURES; + phydev->advertising = phydev->supported; db->link = 0; db->speed = 0; @@ -179,10 +179,7 @@ static int emac_mdio_probe(struct net_device *dev) static void emac_mdio_remove(struct net_device *dev) { - struct emac_board_info *db = netdev_priv(dev); - - phy_disconnect(db->phy_dev); - db->phy_dev = NULL; + phy_disconnect(dev->phydev); } static void emac_reset(struct emac_board_info *db) @@ -208,8 +205,7 @@ static void emac_inblk_32bit(void __iomem *reg, void *data, int count) static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct emac_board_info *dm = netdev_priv(dev); - struct phy_device *phydev = dm->phy_dev; + struct phy_device *phydev = dev->phydev; if (!netif_running(dev)) return -EINVAL; @@ -229,33 +225,11 @@ static void emac_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info)); } -static int emac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct emac_board_info *dm = netdev_priv(dev); - struct phy_device *phydev = dm->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_gset(phydev, cmd); -} - -static int emac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct emac_board_info *dm = netdev_priv(dev); - struct phy_device *phydev = dm->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_sset(phydev, cmd); -} - static const struct ethtool_ops emac_ethtool_ops = { .get_drvinfo = emac_get_drvinfo, - .get_settings = emac_get_settings, - .set_settings = emac_set_settings, .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static unsigned int emac_setup(struct net_device *ndev) @@ -744,7 +718,7 @@ static int emac_open(struct net_device *dev) return ret; } - phy_start(db->phy_dev); + phy_start(dev->phydev); netif_start_queue(dev); return 0; @@ -781,7 +755,7 @@ static int emac_stop(struct net_device *ndev) netif_stop_queue(ndev); netif_carrier_off(ndev); - phy_stop(db->phy_dev); + phy_stop(ndev->phydev); emac_mdio_remove(ndev); diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h index 103c30ddd..e0052003d 100644 --- a/drivers/net/ethernet/altera/altera_tse.h +++ b/drivers/net/ethernet/altera/altera_tse.h @@ -473,7 +473,6 @@ struct altera_tse_private { int phy_addr; /* PHY's MDIO address, -1 for autodetection */ phy_interface_t phy_iface; struct mii_bus *mdio; - struct phy_device *phydev; int oldspeed; int oldduplex; int oldlink; diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c index be72e1e64..7c367713c 100644 --- a/drivers/net/ethernet/altera/altera_tse_ethtool.c +++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c @@ -233,40 +233,18 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs, buf[i] = csrrd32(priv->mac_dev, i * 4); } -static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct altera_tse_private *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; - - if (phydev == NULL) - return -ENODEV; - - return phy_ethtool_gset(phydev, cmd); -} - -static int tse_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct altera_tse_private *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; - - if (phydev == NULL) - return -ENODEV; - - return phy_ethtool_sset(phydev, cmd); -} - static const struct ethtool_ops tse_ethtool_ops = { .get_drvinfo = tse_get_drvinfo, .get_regs_len = tse_reglen, .get_regs = tse_get_regs, .get_link = ethtool_op_get_link, - .get_settings = tse_get_settings, - .set_settings = tse_set_settings, .get_strings = tse_gstrings, .get_sset_count = tse_sset_count, .get_ethtool_stats = tse_fill_stats, .get_msglevel = tse_get_msglevel, .set_msglevel = tse_set_msglevel, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; void altera_tse_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index f749e4d38..bda31f308 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -625,7 +625,7 @@ out: static void altera_tse_adjust_link(struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; int new_state = 0; /* only change config if there is a link */ @@ -815,6 +815,7 @@ static int init_phy(struct net_device *dev) phydev = of_phy_connect(dev, phynode, &altera_tse_adjust_link, 0, priv->phy_iface); } + of_node_put(phynode); if (!phydev) { netdev_err(dev, "Could not find the PHY\n"); @@ -845,7 +846,6 @@ static int init_phy(struct net_device *dev) netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n", phydev->mdio.addr, phydev->phy_id, phydev->link); - priv->phydev = phydev; return 0; } @@ -1172,8 +1172,8 @@ static int tse_open(struct net_device *dev) spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); - if (priv->phydev) - phy_start(priv->phydev); + if (dev->phydev) + phy_start(dev->phydev); napi_enable(&priv->napi); netif_start_queue(dev); @@ -1205,8 +1205,8 @@ static int tse_shutdown(struct net_device *dev) unsigned long int flags; /* Stop the PHY */ - if (priv->phydev) - phy_stop(priv->phydev); + if (dev->phydev) + phy_stop(dev->phydev); netif_stop_queue(dev); napi_disable(&priv->napi); @@ -1545,10 +1545,9 @@ err_free_netdev: static int altera_tse_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); - struct altera_tse_private *priv = netdev_priv(ndev); - if (priv->phydev) - phy_disconnect(priv->phydev); + if (ndev->phydev) + phy_disconnect(ndev->phydev); platform_set_drvdata(pdev, NULL); altera_tse_mdio_destroy(ndev); diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 20760e102..df664187c 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -412,13 +412,13 @@ static void au1000_adjust_link(struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); - struct phy_device *phydev = aup->phy_dev; + struct phy_device *phydev = dev->phydev; unsigned long flags; u32 reg; int status_change = 0; - BUG_ON(!aup->phy_dev); + BUG_ON(!phydev); spin_lock_irqsave(&aup->lock, flags); @@ -579,7 +579,6 @@ static int au1000_mii_probe(struct net_device *dev) aup->old_link = 0; aup->old_speed = 0; aup->old_duplex = -1; - aup->phy_dev = phydev; phy_attached_info(phydev); @@ -678,29 +677,6 @@ au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base) * ethtool operations */ -static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct au1000_private *aup = netdev_priv(dev); - - if (aup->phy_dev) - return phy_ethtool_gset(aup->phy_dev, cmd); - - return -EINVAL; -} - -static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct au1000_private *aup = netdev_priv(dev); - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (aup->phy_dev) - return phy_ethtool_sset(aup->phy_dev, cmd); - - return -EINVAL; -} - static void au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -725,12 +701,12 @@ static u32 au1000_get_msglevel(struct net_device *dev) } static const struct ethtool_ops au1000_ethtool_ops = { - .get_settings = au1000_get_settings, - .set_settings = au1000_set_settings, .get_drvinfo = au1000_get_drvinfo, .get_link = ethtool_op_get_link, .get_msglevel = au1000_get_msglevel, .set_msglevel = au1000_set_msglevel, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; @@ -778,8 +754,8 @@ static int au1000_init(struct net_device *dev) #ifndef CONFIG_CPU_LITTLE_ENDIAN control |= MAC_BIG_ENDIAN; #endif - if (aup->phy_dev) { - if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex)) + if (dev->phydev) { + if (dev->phydev->link && (DUPLEX_FULL == dev->phydev->duplex)) control |= MAC_FULL_DUPLEX; else control |= MAC_DISABLE_RX_OWN; @@ -891,11 +867,10 @@ static int au1000_rx(struct net_device *dev) static void au1000_update_tx_stats(struct net_device *dev, u32 status) { - struct au1000_private *aup = netdev_priv(dev); struct net_device_stats *ps = &dev->stats; if (status & TX_FRAME_ABORTED) { - if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { + if (!dev->phydev || (DUPLEX_FULL == dev->phydev->duplex)) { if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) { /* any other tx errors are only valid * in half duplex mode @@ -975,10 +950,10 @@ static int au1000_open(struct net_device *dev) return retval; } - if (aup->phy_dev) { + if (dev->phydev) { /* cause the PHY state machine to schedule a link state check */ - aup->phy_dev->state = PHY_CHANGELINK; - phy_start(aup->phy_dev); + dev->phydev->state = PHY_CHANGELINK; + phy_start(dev->phydev); } netif_start_queue(dev); @@ -995,8 +970,8 @@ static int au1000_close(struct net_device *dev) netif_dbg(aup, drv, dev, "close: dev=%p\n", dev); - if (aup->phy_dev) - phy_stop(aup->phy_dev); + if (dev->phydev) + phy_stop(dev->phydev); spin_lock_irqsave(&aup->lock, flags); @@ -1110,15 +1085,13 @@ static void au1000_multicast_list(struct net_device *dev) static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct au1000_private *aup = netdev_priv(dev); - if (!netif_running(dev)) return -EINVAL; - if (!aup->phy_dev) + if (!dev->phydev) return -EINVAL; /* PHY not controllable */ - return phy_mii_ioctl(aup->phy_dev, rq, cmd); + return phy_mii_ioctl(dev->phydev, rq, cmd); } static const struct net_device_ops au1000_netdev_ops = { diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h index ca53024f0..4c47c2377 100644 --- a/drivers/net/ethernet/amd/au1000_eth.h +++ b/drivers/net/ethernet/amd/au1000_eth.h @@ -106,7 +106,6 @@ struct au1000_private { int old_speed; int old_duplex; - struct phy_device *phy_dev; struct mii_bus *mii_bus; /* PHY configuration */ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index ebf9224b2..a9b270956 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -154,7 +154,7 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) goto err_rx_ring; for (i = 0, channel = channel_mem; i < count; i++, channel++) { - snprintf(channel->name, sizeof(channel->name), "channel-%d", i); + snprintf(channel->name, sizeof(channel->name), "channel-%u", i); channel->pdata = pdata; channel->queue_index = i; channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE + diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig index 19e38afbc..300e3b5c5 100644 --- a/drivers/net/ethernet/apm/xgene/Kconfig +++ b/drivers/net/ethernet/apm/xgene/Kconfig @@ -3,6 +3,7 @@ config NET_XGENE depends on HAS_DMA depends on ARCH_XGENE || COMPILE_TEST select PHYLIB + select MDIO_XGENE help This is the Ethernet driver for the on-chip ethernet interface on the APM X-Gene SoC. diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c index 416d6ebfc..22a7b26ca 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c @@ -65,8 +65,15 @@ static int xgene_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) return phy_ethtool_gset(phydev, cmd); } else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { - cmd->supported = SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg | SUPPORTED_MII; + if (pdata->mdio_driver) { + if (!phydev) + return -ENODEV; + + return phy_ethtool_gset(phydev, cmd); + } + + cmd->supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | + SUPPORTED_MII; cmd->advertising = cmd->supported; ethtool_cmd_speed_set(cmd, SPEED_1000); cmd->duplex = DUPLEX_FULL; @@ -92,12 +99,21 @@ static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) struct phy_device *phydev = pdata->phy_dev; if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { - if (phydev == NULL) + if (!phydev) return -ENODEV; return phy_ethtool_sset(phydev, cmd); } + if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { + if (pdata->mdio_driver) { + if (!phydev) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); + } + } + return -EINVAL; } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 2f5638f7f..18bb9556d 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -381,59 +381,6 @@ static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata, rd_addr); } -static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id, - u32 reg, u16 data) -{ - u32 addr = 0, wr_data = 0; - u32 done; - u8 wait = 10; - - PHY_ADDR_SET(&addr, phy_id); - REG_ADDR_SET(&addr, reg); - xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); - - PHY_CONTROL_SET(&wr_data, data); - xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data); - do { - usleep_range(5, 10); - xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); - } while ((done & BUSY_MASK) && wait--); - - if (done & BUSY_MASK) { - netdev_err(pdata->ndev, "MII_MGMT write failed\n"); - return -EBUSY; - } - - return 0; -} - -static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata, - u8 phy_id, u32 reg) -{ - u32 addr = 0; - u32 data, done; - u8 wait = 10; - - PHY_ADDR_SET(&addr, phy_id); - REG_ADDR_SET(&addr, reg); - xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); - xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK); - do { - usleep_range(5, 10); - xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); - } while ((done & BUSY_MASK) && wait--); - - if (done & BUSY_MASK) { - netdev_err(pdata->ndev, "MII_MGMT read failed\n"); - return -EBUSY; - } - - xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data); - xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0); - - return data; -} - static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) { u32 addr0, addr1; @@ -512,14 +459,11 @@ static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata) #endif } -static void xgene_gmac_init(struct xgene_enet_pdata *pdata) +static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; - u32 value, mc2; - u32 intf_ctl, rgmii; - u32 icm0, icm2; - - xgene_gmac_reset(pdata); + u32 icm0, icm2, mc2; + u32 intf_ctl, rgmii, value; xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0); xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2); @@ -564,7 +508,21 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata) mc2 |= FULL_DUPLEX2 | PAD_CRC; xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2); xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl); + xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); + xgene_enet_configure_clock(pdata); + + xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0); + xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2); +} + +static void xgene_gmac_init(struct xgene_enet_pdata *pdata) +{ + u32 value; + if (!pdata->mdio_driver) + xgene_gmac_reset(pdata); + + xgene_gmac_set_speed(pdata); xgene_gmac_set_mac_addr(pdata); /* Adjust MDC clock frequency */ @@ -579,15 +537,10 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata) /* Rtype should be copied from FP */ xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0); - xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); - xgene_enet_configure_clock(pdata); /* Rx-Tx traffic resume */ xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); - xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0); - xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2); - xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value); value &= ~TX_DV_GATE_EN0; value &= ~RX_DV_GATE_EN0; @@ -671,93 +624,155 @@ bool xgene_ring_mgr_init(struct xgene_enet_pdata *p) static int xgene_enet_reset(struct xgene_enet_pdata *pdata) { - u32 val; + struct device *dev = &pdata->pdev->dev; if (!xgene_ring_mgr_init(pdata)) return -ENODEV; - if (!IS_ERR(pdata->clk)) { + if (pdata->mdio_driver) { + xgene_enet_config_ring_if_assoc(pdata); + return 0; + } + + if (dev->of_node) { clk_prepare_enable(pdata->clk); + udelay(5); clk_disable_unprepare(pdata->clk); + udelay(5); clk_prepare_enable(pdata->clk); - xgene_enet_ecc_init(pdata); + udelay(5); + } else { +#ifdef CONFIG_ACPI + if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) { + acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), + "_RST", NULL, NULL); + } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), + "_INI")) { + acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), + "_INI", NULL, NULL); + } +#endif } - xgene_enet_config_ring_if_assoc(pdata); - /* Enable auto-incr for scanning */ - xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val); - val |= SCAN_AUTO_INCR; - MGMT_CLOCK_SEL_SET(&val, 1); - xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); + xgene_enet_ecc_init(pdata); + xgene_enet_config_ring_if_assoc(pdata); return 0; } -static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) +static void xgene_enet_clear(struct xgene_enet_pdata *pdata, + struct xgene_enet_desc_ring *ring) { - if (!IS_ERR(pdata->clk)) - clk_disable_unprepare(pdata->clk); -} + u32 addr, val, data; -static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) -{ - struct xgene_enet_pdata *pdata = bus->priv; - u32 val; + val = xgene_enet_ring_bufnum(ring->id); - val = xgene_mii_phy_read(pdata, mii_id, regnum); - netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n", - mii_id, regnum, val); + if (xgene_enet_is_bufpool(ring->id)) { + addr = ENET_CFGSSQMIFPRESET_ADDR; + data = BIT(val - 0x20); + } else { + addr = ENET_CFGSSQMIWQRESET_ADDR; + data = BIT(val); + } - return val; + xgene_enet_wr_ring_if(pdata, addr, data); } -static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, - u16 val) +static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) { - struct xgene_enet_pdata *pdata = bus->priv; + struct device *dev = &pdata->pdev->dev; + struct xgene_enet_desc_ring *ring; + u32 pb, val; + int i; + + pb = 0; + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]->buf_pool; + + val = xgene_enet_ring_bufnum(ring->id); + pb |= BIT(val - 0x20); + } + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb); + + pb = 0; + for (i = 0; i < pdata->txq_cnt; i++) { + ring = pdata->tx_ring[i]; + + val = xgene_enet_ring_bufnum(ring->id); + pb |= BIT(val); + } + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb); - netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n", - mii_id, regnum, val); - return xgene_mii_phy_write(pdata, mii_id, regnum, val); + if (dev->of_node) { + if (!IS_ERR(pdata->clk)) + clk_disable_unprepare(pdata->clk); + } } static void xgene_enet_adjust_link(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); + const struct xgene_mac_ops *mac_ops = pdata->mac_ops; struct phy_device *phydev = pdata->phy_dev; if (phydev->link) { if (pdata->phy_speed != phydev->speed) { pdata->phy_speed = phydev->speed; - xgene_gmac_init(pdata); - xgene_gmac_rx_enable(pdata); - xgene_gmac_tx_enable(pdata); + mac_ops->set_speed(pdata); + mac_ops->rx_enable(pdata); + mac_ops->tx_enable(pdata); phy_print_status(phydev); } } else { - xgene_gmac_rx_disable(pdata); - xgene_gmac_tx_disable(pdata); + mac_ops->rx_disable(pdata); + mac_ops->tx_disable(pdata); pdata->phy_speed = SPEED_UNKNOWN; phy_print_status(phydev); } } -static int xgene_enet_phy_connect(struct net_device *ndev) +#ifdef CONFIG_ACPI +static struct acpi_device *acpi_phy_find_device(struct device *dev) +{ + struct acpi_reference_args args; + struct fwnode_handle *fw_node; + int status; + + fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev)); + status = acpi_node_get_property_reference(fw_node, "phy-handle", 0, + &args); + if (ACPI_FAILURE(status)) { + dev_dbg(dev, "No matching phy in ACPI table\n"); + return NULL; + } + + return args.adev; +} +#endif + +int xgene_enet_phy_connect(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); - struct device_node *phy_np; + struct device_node *np; struct phy_device *phy_dev; struct device *dev = &pdata->pdev->dev; + int i; if (dev->of_node) { - phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0); - if (!phy_np) { + for (i = 0 ; i < 2; i++) { + np = of_parse_phandle(dev->of_node, "phy-handle", i); + if (np) + break; + } + + if (!np) { netdev_dbg(ndev, "No phy-handle found in DT\n"); return -ENODEV; } - phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link, + phy_dev = of_phy_connect(ndev, np, &xgene_enet_adjust_link, 0, pdata->phy_mode); + of_node_put(np); if (!phy_dev) { netdev_err(ndev, "Could not connect to PHY\n"); return -ENODEV; @@ -765,6 +780,11 @@ static int xgene_enet_phy_connect(struct net_device *ndev) pdata->phy_dev = phy_dev; } else { +#ifdef CONFIG_ACPI + struct acpi_device *adev = acpi_phy_find_device(dev); + if (adev) + pdata->phy_dev = adev->driver_data; + phy_dev = pdata->phy_dev; if (!phy_dev || @@ -773,6 +793,9 @@ static int xgene_enet_phy_connect(struct net_device *ndev) netdev_err(ndev, "Could not connect to PHY\n"); return -ENODEV; } +#else + return -ENODEV; +#endif } pdata->phy_speed = SPEED_UNKNOWN; @@ -792,8 +815,8 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata, struct phy_device *phy; struct device_node *child_np; struct device_node *mdio_np = NULL; + u32 phy_addr; int ret; - u32 phy_id; if (dev->of_node) { for_each_child_of_node(dev->of_node, child_np) { @@ -820,21 +843,17 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata, if (ret) return ret; - ret = device_property_read_u32(dev, "phy-channel", &phy_id); + ret = device_property_read_u32(dev, "phy-channel", &phy_addr); if (ret) - ret = device_property_read_u32(dev, "phy-addr", &phy_id); + ret = device_property_read_u32(dev, "phy-addr", &phy_addr); if (ret) return -EINVAL; - phy = get_phy_device(mdio, phy_id, false); - if (IS_ERR(phy)) + phy = xgene_enet_phy_register(mdio, phy_addr); + if (!phy) return -EIO; - ret = phy_device_register(phy); - if (ret) - phy_device_free(phy); - else - pdata->phy_dev = phy; + pdata->phy_dev = phy; return ret; } @@ -850,13 +869,13 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) return -ENOMEM; mdio_bus->name = "APM X-Gene MDIO bus"; - mdio_bus->read = xgene_enet_mdio_read; - mdio_bus->write = xgene_enet_mdio_write; + mdio_bus->read = xgene_mdio_rgmii_read; + mdio_bus->write = xgene_mdio_rgmii_write; snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii", ndev->name); - mdio_bus->priv = pdata; - mdio_bus->parent = &ndev->dev; + mdio_bus->priv = (void __force *)pdata->mcx_mac_addr; + mdio_bus->parent = &pdata->pdev->dev; ret = xgene_mdiobus_register(pdata, mdio_bus); if (ret) { @@ -873,6 +892,12 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) return ret; } +void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata) +{ + if (pdata->phy_dev) + phy_disconnect(pdata->phy_dev); +} + void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) { if (pdata->phy_dev) @@ -890,11 +915,13 @@ const struct xgene_mac_ops xgene_gmac_ops = { .tx_enable = xgene_gmac_tx_enable, .rx_disable = xgene_gmac_rx_disable, .tx_disable = xgene_gmac_tx_disable, + .set_speed = xgene_gmac_set_speed, .set_mac_addr = xgene_gmac_set_mac_addr, }; const struct xgene_port_ops xgene_gport_ops = { .reset = xgene_enet_reset, + .clear = xgene_enet_clear, .cle_bypass = xgene_enet_cle_bypass, .shutdown = xgene_gport_shutdown, }; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 45220be31..179a44dce 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -104,6 +104,8 @@ enum xgene_enet_rm { #define RECOMBBUF BIT(27) #define MAC_OFFSET 0x30 +#define OFFSET_4 0x04 +#define OFFSET_8 0x08 #define BLOCK_ETH_CSR_OFFSET 0x2000 #define BLOCK_ETH_CLE_CSR_OFFSET 0x6000 @@ -165,6 +167,8 @@ enum xgene_enet_rm { #define TX_DV_GATE_EN0 BIT(2) #define RX_DV_GATE_EN0 BIT(1) #define RESUME_RX0 BIT(0) +#define ENET_CFGSSQMIFPRESET_ADDR 0x14 +#define ENET_CFGSSQMIWQRESET_ADDR 0x1c #define ENET_CFGSSQMIWQASSOC_ADDR 0xe0 #define ENET_CFGSSQMIFPQASSOC_ADDR 0xdc #define ENET_CFGSSQMIQMLITEFPQASSOC_ADDR 0xf0 @@ -297,11 +301,6 @@ enum xgene_enet_ring_bufnum { RING_BUFNUM_INVALID }; -enum xgene_enet_cmd { - XGENE_ENET_WR_CMD = BIT(31), - XGENE_ENET_RD_CMD = BIT(30) -}; - enum xgene_enet_err_code { HBF_READ_DATA = 3, HBF_LL_READ = 4, @@ -347,6 +346,8 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); bool xgene_ring_mgr_init(struct xgene_enet_pdata *p); +int xgene_enet_phy_connect(struct net_device *ndev); +void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata); extern const struct xgene_mac_ops xgene_gmac_ops; extern const struct xgene_port_ops xgene_gport_ops; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index d208b172f..d1d6b5eeb 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -102,25 +102,13 @@ static u8 xgene_enet_hdr_len(const void *data) static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) { - struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev); - struct xgene_enet_raw_desc16 *raw_desc; - u32 slots = buf_pool->slots - 1; - u32 tail = buf_pool->tail; - u32 userinfo; - int i, len; - - len = pdata->ring_ops->len(buf_pool); - for (i = 0; i < len; i++) { - tail = (tail - 1) & slots; - raw_desc = &buf_pool->raw_desc16[tail]; + int i; - /* Hardware stores descriptor in little endian format */ - userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); - dev_kfree_skb_any(buf_pool->rx_skb[userinfo]); + /* Free up the buffers held by hardware */ + for (i = 0; i < buf_pool->slots; i++) { + if (buf_pool->rx_skb[i]) + dev_kfree_skb_any(buf_pool->rx_skb[i]); } - - pdata->ring_ops->wr_cmd(buf_pool, -len); - buf_pool->tail = tail; } static irqreturn_t xgene_enet_rx_irq(const int irq, void *data) @@ -481,6 +469,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE); skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); skb = buf_pool->rx_skb[skb_index]; + buf_pool->rx_skb[skb_index] = NULL; /* checking for error */ status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) || @@ -619,6 +608,30 @@ static void xgene_enet_timeout(struct net_device *ndev) } } +static void xgene_enet_set_irq_name(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct xgene_enet_desc_ring *ring; + int i; + + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]; + if (!pdata->cq_cnt) { + snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", + ndev->name); + } else { + snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d", + ndev->name, i); + } + } + + for (i = 0; i < pdata->cq_cnt; i++) { + ring = pdata->tx_ring[i]->cp_ring; + snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d", + ndev->name, i); + } +} + static int xgene_enet_register_irq(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); @@ -626,6 +639,7 @@ static int xgene_enet_register_irq(struct net_device *ndev) struct xgene_enet_desc_ring *ring; int ret = 0, i; + xgene_enet_set_irq_name(ndev); for (i = 0; i < pdata->rxq_cnt; i++) { ring = pdata->rx_ring[i]; irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); @@ -720,20 +734,21 @@ static int xgene_enet_open(struct net_device *ndev) if (ret) return ret; - mac_ops->tx_enable(pdata); - mac_ops->rx_enable(pdata); - xgene_enet_napi_enable(pdata); ret = xgene_enet_register_irq(ndev); if (ret) return ret; - if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + if (pdata->phy_dev) { phy_start(pdata->phy_dev); - else + } else { schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF); + netif_carrier_off(ndev); + } - netif_start_queue(ndev); + mac_ops->tx_enable(pdata); + mac_ops->rx_enable(pdata); + netif_tx_start_all_queues(ndev); return ret; } @@ -744,16 +759,15 @@ static int xgene_enet_close(struct net_device *ndev) const struct xgene_mac_ops *mac_ops = pdata->mac_ops; int i; - netif_stop_queue(ndev); + netif_tx_stop_all_queues(ndev); + mac_ops->tx_disable(pdata); + mac_ops->rx_disable(pdata); - if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + if (pdata->phy_dev) phy_stop(pdata->phy_dev); else cancel_delayed_work_sync(&pdata->link_work); - mac_ops->tx_disable(pdata); - mac_ops->rx_disable(pdata); - xgene_enet_free_irq(ndev); xgene_enet_napi_disable(pdata); for (i = 0; i < pdata->rxq_cnt; i++) @@ -761,7 +775,6 @@ static int xgene_enet_close(struct net_device *ndev) return 0; } - static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) { struct xgene_enet_pdata *pdata; @@ -771,7 +784,7 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) dev = ndev_to_dev(ring->ndev); pdata->ring_ops->clear(ring); - dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); + dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); } static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) @@ -784,6 +797,9 @@ static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) ring = pdata->tx_ring[i]; if (ring) { xgene_enet_delete_ring(ring); + pdata->port_ops->clear(pdata, ring); + if (pdata->cq_cnt) + xgene_enet_delete_ring(ring->cp_ring); pdata->tx_ring[i] = NULL; } } @@ -794,6 +810,7 @@ static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) buf_pool = ring->buf_pool; xgene_enet_delete_bufpool(buf_pool); xgene_enet_delete_ring(buf_pool); + pdata->port_ops->clear(pdata, buf_pool); xgene_enet_delete_ring(ring); pdata->rx_ring[i] = NULL; } @@ -842,7 +859,7 @@ static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring) if (ring->desc_addr) { pdata->ring_ops->clear(ring); - dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); + dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); } devm_kfree(dev, ring); } @@ -900,9 +917,10 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( struct net_device *ndev, u32 ring_num, enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id) { - struct xgene_enet_desc_ring *ring; struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct device *dev = ndev_to_dev(ndev); + struct xgene_enet_desc_ring *ring; + void *irq_mbox_addr; int size; size = xgene_enet_get_ring_size(dev, cfgsize); @@ -919,8 +937,8 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( ring->cfgsize = cfgsize; ring->id = ring_id; - ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma, - GFP_KERNEL); + ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma, + GFP_KERNEL | __GFP_ZERO); if (!ring->desc_addr) { devm_kfree(dev, ring); return NULL; @@ -928,14 +946,16 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( ring->size = size; if (is_irq_mbox_required(pdata, ring)) { - ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE, - &ring->irq_mbox_dma, GFP_KERNEL); - if (!ring->irq_mbox_addr) { - dma_free_coherent(dev, size, ring->desc_addr, - ring->dma); + irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE, + &ring->irq_mbox_dma, + GFP_KERNEL | __GFP_ZERO); + if (!irq_mbox_addr) { + dmam_free_coherent(dev, size, ring->desc_addr, + ring->dma); devm_kfree(dev, ring); return NULL; } + ring->irq_mbox_addr = irq_mbox_addr; } ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring); @@ -996,6 +1016,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) u8 eth_bufnum = pdata->eth_bufnum; u8 bp_bufnum = pdata->bp_bufnum; u16 ring_num = pdata->ring_num; + __le64 *exp_bufs; u16 ring_id; int i, ret, size; @@ -1027,13 +1048,6 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) rx_ring->nbufpool = NUM_BUFPOOL; rx_ring->buf_pool = buf_pool; rx_ring->irq = pdata->irqs[i]; - if (!pdata->cq_cnt) { - snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", - ndev->name); - } else { - snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx%d", - ndev->name, i); - } buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, sizeof(struct sk_buff *), GFP_KERNEL); @@ -1060,13 +1074,13 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) } size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS; - tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, - &dma_exp_bufs, - GFP_KERNEL); - if (!tx_ring->exp_bufs) { + exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs, + GFP_KERNEL | __GFP_ZERO); + if (!exp_bufs) { ret = -ENOMEM; goto err; } + tx_ring->exp_bufs = exp_bufs; pdata->tx_ring[i] = tx_ring; @@ -1086,8 +1100,6 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i]; cp_ring->index = i; - snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc%d", - ndev->name, i); } cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, @@ -1283,6 +1295,23 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) return 0; } +static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata) +{ + int ret; + + if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) + return 0; + + if (!IS_ENABLED(CONFIG_MDIO_XGENE)) + return 0; + + ret = xgene_enet_phy_connect(pdata->ndev); + if (!ret) + pdata->mdio_driver = true; + + return 0; +} + static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) { struct platform_device *pdev; @@ -1368,6 +1397,10 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) if (ret) return ret; + ret = xgene_enet_check_phy_handle(pdata); + if (ret) + return ret; + pdata->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pdata->clk)) { /* Firmware may have set up the clock already. */ @@ -1447,6 +1480,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); } + pdata->phy_speed = SPEED_UNKNOWN; pdata->mac_ops->init(pdata); return ret; @@ -1556,28 +1590,12 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) } } -static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata) -{ - struct napi_struct *napi; - int i; - - for (i = 0; i < pdata->rxq_cnt; i++) { - napi = &pdata->rx_ring[i]->napi; - netif_napi_del(napi); - } - - for (i = 0; i < pdata->cq_cnt; i++) { - napi = &pdata->tx_ring[i]->cp_ring->napi; - netif_napi_del(napi); - } -} - static int xgene_enet_probe(struct platform_device *pdev) { struct net_device *ndev; struct xgene_enet_pdata *pdata; struct device *dev = &pdev->dev; - const struct xgene_mac_ops *mac_ops; + void (*link_state)(struct work_struct *); const struct of_device_id *of_id; int ret; @@ -1635,27 +1653,31 @@ static int xgene_enet_probe(struct platform_device *pdev) goto err; } - ret = register_netdev(ndev); - if (ret) { - netdev_err(ndev, "Failed to register netdev\n"); - goto err; - } - ret = xgene_enet_init_hw(pdata); if (ret) goto err_netdev; - mac_ops = pdata->mac_ops; - if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { - ret = xgene_enet_mdio_config(pdata); - if (ret) - goto err_netdev; - } else { - INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); + link_state = pdata->mac_ops->link_state; + if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { + INIT_DELAYED_WORK(&pdata->link_work, link_state); + } else if (!pdata->mdio_driver) { + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + ret = xgene_enet_mdio_config(pdata); + else + INIT_DELAYED_WORK(&pdata->link_work, link_state); } + if (ret) + goto err; xgene_enet_napi_add(pdata); + ret = register_netdev(ndev); + if (ret) { + netdev_err(ndev, "Failed to register netdev\n"); + goto err; + } + return 0; + err_netdev: unregister_netdev(ndev); err: @@ -1673,20 +1695,38 @@ static int xgene_enet_remove(struct platform_device *pdev) mac_ops = pdata->mac_ops; ndev = pdata->ndev; - mac_ops->rx_disable(pdata); - mac_ops->tx_disable(pdata); + rtnl_lock(); + if (netif_running(ndev)) + dev_close(ndev); + rtnl_unlock(); - xgene_enet_napi_del(pdata); - if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + if (pdata->mdio_driver) + xgene_enet_phy_disconnect(pdata); + else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) xgene_enet_mdio_remove(pdata); + unregister_netdev(ndev); - xgene_enet_delete_desc_rings(pdata); pdata->port_ops->shutdown(pdata); + xgene_enet_delete_desc_rings(pdata); free_netdev(ndev); return 0; } +static void xgene_enet_shutdown(struct platform_device *pdev) +{ + struct xgene_enet_pdata *pdata; + + pdata = platform_get_drvdata(pdev); + if (!pdata) + return; + + if (!pdata->ndev) + return; + + xgene_enet_remove(pdev); +} + #ifdef CONFIG_ACPI static const struct acpi_device_id xgene_enet_acpi_match[] = { { "APMC0D05", XGENE_ENET1}, @@ -1721,6 +1761,7 @@ static struct platform_driver xgene_enet_driver = { }, .probe = xgene_enet_probe, .remove = xgene_enet_remove, + .shutdown = xgene_enet_shutdown, }; module_platform_driver(xgene_enet_driver); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 092fbecca..217546e57 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -38,6 +38,7 @@ #include "xgene_enet_hw.h" #include "xgene_enet_cle.h" #include "xgene_enet_ring2.h" +#include "../../../phy/mdio-xgene.h" #define XGENE_DRV_VERSION "v1.0" #define XGENE_ENET_MAX_MTU 1536 @@ -140,6 +141,7 @@ struct xgene_mac_ops { void (*rx_enable)(struct xgene_enet_pdata *pdata); void (*tx_disable)(struct xgene_enet_pdata *pdata); void (*rx_disable)(struct xgene_enet_pdata *pdata); + void (*set_speed)(struct xgene_enet_pdata *pdata); void (*set_mac_addr)(struct xgene_enet_pdata *pdata); void (*set_mss)(struct xgene_enet_pdata *pdata); void (*link_state)(struct work_struct *work); @@ -147,6 +149,8 @@ struct xgene_mac_ops { struct xgene_port_ops { int (*reset)(struct xgene_enet_pdata *pdata); + void (*clear)(struct xgene_enet_pdata *pdata, + struct xgene_enet_desc_ring *ring); void (*cle_bypass)(struct xgene_enet_pdata *pdata, u32 dst_ring_num, u16 bufpool_id); void (*shutdown)(struct xgene_enet_pdata *pdata); @@ -211,6 +215,7 @@ struct xgene_enet_pdata { u32 mss; u8 tx_delay; u8 rx_delay; + bool mdio_driver; }; struct xgene_indirect_ctl { @@ -220,34 +225,6 @@ struct xgene_indirect_ctl { void __iomem *cmd_done; }; -/* Set the specified value into a bit-field defined by its starting position - * and length within a single u64. - */ -static inline u64 xgene_enet_set_field_value(int pos, int len, u64 val) -{ - return (val & ((1ULL << len) - 1)) << pos; -} - -#define SET_VAL(field, val) \ - xgene_enet_set_field_value(field ## _POS, field ## _LEN, val) - -#define SET_BIT(field) \ - xgene_enet_set_field_value(field ## _POS, 1, 1) - -/* Get the value from a bit-field defined by its starting position - * and length within the specified u64. - */ -static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src) -{ - return (src >> pos) & ((1ULL << len) - 1); -} - -#define GET_VAL(field, src) \ - xgene_enet_get_field_value(field ## _POS, field ## _LEN, src) - -#define GET_BIT(field, src) \ - xgene_enet_get_field_value(field ## _POS, 1, src) - static inline struct device *ndev_to_dev(struct net_device *ndev) { return ndev->dev.parent; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c index 78475512b..d12e9cbae 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -28,6 +28,12 @@ static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val) iowrite32(val, p->eth_csr_addr + offset); } +static void xgene_enet_wr_clkrst_csr(struct xgene_enet_pdata *p, u32 offset, + u32 val) +{ + iowrite32(val, p->base_addr + offset); +} + static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p, u32 offset, u32 val) { @@ -93,6 +99,11 @@ static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset) return ioread32(p->eth_diag_csr_addr + offset); } +static u32 xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *p, u32 offset) +{ + return ioread32(p->mcx_mac_csr_addr + offset); +} + static u32 xgene_enet_rd_indirect(struct xgene_indirect_ctl *ctl, u32 rd_addr) { u32 rd_data; @@ -132,9 +143,17 @@ static u32 xgene_enet_rd_mac(struct xgene_enet_pdata *p, u32 rd_addr) static int xgene_enet_ecc_init(struct xgene_enet_pdata *p) { struct net_device *ndev = p->ndev; - u32 data; + u32 data, shutdown; int i = 0; + shutdown = xgene_enet_rd_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR); + data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR); + + if (!shutdown && data == ~0U) { + netdev_dbg(ndev, "+ ecc_init done, skipping\n"); + return 0; + } + xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0); do { usleep_range(100, 110); @@ -230,21 +249,105 @@ static u32 xgene_enet_link_status(struct xgene_enet_pdata *p) data = xgene_mii_phy_read(p, INT_PHY_ADDR, SGMII_BASE_PAGE_ABILITY_ADDR >> 2); + if (LINK_SPEED(data) == PHY_SPEED_1000) + p->phy_speed = SPEED_1000; + else if (LINK_SPEED(data) == PHY_SPEED_100) + p->phy_speed = SPEED_100; + else + p->phy_speed = SPEED_10; + return data & LINK_UP; } -static void xgene_sgmac_init(struct xgene_enet_pdata *p) +static void xgene_sgmii_configure(struct xgene_enet_pdata *p) { - u32 data, loop = 10; - u32 offset = p->port_id * 4; - u32 enet_spare_cfg_reg, rsif_config_reg; - u32 cfg_bypass_reg, rx_dv_gate_reg; - - xgene_sgmac_reset(p); + xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, + 0x8000); + xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x9000); + xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0); +} - /* Enable auto-negotiation */ - xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x1000); +static void xgene_sgmii_tbi_control_reset(struct xgene_enet_pdata *p) +{ + xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, + 0x8000); xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0); +} + +static void xgene_sgmii_reset(struct xgene_enet_pdata *p) +{ + u32 value; + + if (p->phy_speed == SPEED_UNKNOWN) + return; + + value = xgene_mii_phy_read(p, INT_PHY_ADDR, + SGMII_BASE_PAGE_ABILITY_ADDR >> 2); + if (!(value & LINK_UP)) + xgene_sgmii_tbi_control_reset(p); +} + +static void xgene_sgmac_set_speed(struct xgene_enet_pdata *p) +{ + u32 icm0_addr, icm2_addr, debug_addr; + u32 icm0, icm2, intf_ctl; + u32 mc2, value; + + xgene_sgmii_reset(p); + + if (p->enet_id == XGENE_ENET1) { + icm0_addr = ICM_CONFIG0_REG_0_ADDR + p->port_id * OFFSET_8; + icm2_addr = ICM_CONFIG2_REG_0_ADDR + p->port_id * OFFSET_4; + debug_addr = DEBUG_REG_ADDR; + } else { + icm0_addr = XG_MCX_ICM_CONFIG0_REG_0_ADDR; + icm2_addr = XG_MCX_ICM_CONFIG2_REG_0_ADDR; + debug_addr = XG_DEBUG_REG_ADDR; + } + + icm0 = xgene_enet_rd_mcx_csr(p, icm0_addr); + icm2 = xgene_enet_rd_mcx_csr(p, icm2_addr); + mc2 = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR); + intf_ctl = xgene_enet_rd_mac(p, INTERFACE_CONTROL_ADDR); + + switch (p->phy_speed) { + case SPEED_10: + ENET_INTERFACE_MODE2_SET(&mc2, 1); + intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE); + CFG_MACMODE_SET(&icm0, 0); + CFG_WAITASYNCRD_SET(&icm2, 500); + break; + case SPEED_100: + ENET_INTERFACE_MODE2_SET(&mc2, 1); + intf_ctl &= ~ENET_GHD_MODE; + intf_ctl |= ENET_LHD_MODE; + CFG_MACMODE_SET(&icm0, 1); + CFG_WAITASYNCRD_SET(&icm2, 80); + break; + default: + ENET_INTERFACE_MODE2_SET(&mc2, 2); + intf_ctl &= ~ENET_LHD_MODE; + intf_ctl |= ENET_GHD_MODE; + CFG_MACMODE_SET(&icm0, 2); + CFG_WAITASYNCRD_SET(&icm2, 16); + value = xgene_enet_rd_csr(p, debug_addr); + value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; + xgene_enet_wr_csr(p, debug_addr, value); + break; + } + + mc2 |= FULL_DUPLEX2 | PAD_CRC; + xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, mc2); + xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, intf_ctl); + xgene_enet_wr_mcx_csr(p, icm0_addr, icm0); + xgene_enet_wr_mcx_csr(p, icm2_addr, icm2); +} + +static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p) +{ + u32 data, loop = 10; + + xgene_sgmii_configure(p); while (loop--) { data = xgene_mii_phy_read(p, INT_PHY_ADDR, @@ -255,17 +358,27 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p) } if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS)) netdev_err(p->ndev, "Auto-negotiation failed\n"); +} + +static void xgene_sgmac_init(struct xgene_enet_pdata *p) +{ + u32 enet_spare_cfg_reg, rsif_config_reg; + u32 cfg_bypass_reg, rx_dv_gate_reg; + u32 data, offset; - data = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR); - ENET_INTERFACE_MODE2_SET(&data, 2); - xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2); - xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE); + if (!(p->enet_id == XGENE_ENET2 && p->mdio_driver)) + xgene_sgmac_reset(p); + + xgene_sgmii_enable_autoneg(p); + xgene_sgmac_set_speed(p); + xgene_sgmac_set_mac_addr(p); if (p->enet_id == XGENE_ENET1) { enet_spare_cfg_reg = ENET_SPARE_CFG_REG_ADDR; rsif_config_reg = RSIF_CONFIG_REG_ADDR; cfg_bypass_reg = CFG_BYPASS_ADDR; - rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR; + offset = p->port_id * OFFSET_4; + rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR + offset; } else { enet_spare_cfg_reg = XG_ENET_SPARE_CFG_REG_ADDR; rsif_config_reg = XG_RSIF_CONFIG_REG_ADDR; @@ -277,8 +390,6 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p) data |= MPA_IDLE_WITH_QMI_EMPTY; xgene_enet_wr_csr(p, enet_spare_cfg_reg, data); - xgene_sgmac_set_mac_addr(p); - /* Adjust MDC clock frequency */ data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR); MGMT_CLOCK_SEL_SET(&data, 7); @@ -292,7 +403,7 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p) /* Bypass traffic gating */ xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84); xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX); - xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg + offset, RESUME_RX0); + xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg, RESUME_RX0); } static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set) @@ -331,17 +442,43 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p) static int xgene_enet_reset(struct xgene_enet_pdata *p) { + struct device *dev = &p->pdev->dev; + if (!xgene_ring_mgr_init(p)) return -ENODEV; - if (!IS_ERR(p->clk)) { - clk_prepare_enable(p->clk); - clk_disable_unprepare(p->clk); - clk_prepare_enable(p->clk); + if (p->mdio_driver && p->enet_id == XGENE_ENET2) { + xgene_enet_config_ring_if_assoc(p); + return 0; } - xgene_enet_ecc_init(p); - xgene_enet_config_ring_if_assoc(p); + if (p->enet_id == XGENE_ENET2) + xgene_enet_wr_clkrst_csr(p, XGENET_CONFIG_REG_ADDR, SGMII_EN); + + if (dev->of_node) { + if (!IS_ERR(p->clk)) { + clk_prepare_enable(p->clk); + udelay(5); + clk_disable_unprepare(p->clk); + udelay(5); + clk_prepare_enable(p->clk); + udelay(5); + } + } else { +#ifdef CONFIG_ACPI + if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_RST")) + acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev), + "_RST", NULL, NULL); + else if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_INI")) + acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev), + "_INI", NULL, NULL); +#endif + } + + if (!p->port_id) { + xgene_enet_ecc_init(p); + xgene_enet_config_ring_if_assoc(p); + } return 0; } @@ -369,10 +506,53 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data); } +static void xgene_enet_clear(struct xgene_enet_pdata *pdata, + struct xgene_enet_desc_ring *ring) +{ + u32 addr, val, data; + + val = xgene_enet_ring_bufnum(ring->id); + + if (xgene_enet_is_bufpool(ring->id)) { + addr = ENET_CFGSSQMIFPRESET_ADDR; + data = BIT(val - 0x20); + } else { + addr = ENET_CFGSSQMIWQRESET_ADDR; + data = BIT(val); + } + + xgene_enet_wr_ring_if(pdata, addr, data); +} + static void xgene_enet_shutdown(struct xgene_enet_pdata *p) { - if (!IS_ERR(p->clk)) - clk_disable_unprepare(p->clk); + struct device *dev = &p->pdev->dev; + struct xgene_enet_desc_ring *ring; + u32 pb, val; + int i; + + pb = 0; + for (i = 0; i < p->rxq_cnt; i++) { + ring = p->rx_ring[i]->buf_pool; + + val = xgene_enet_ring_bufnum(ring->id); + pb |= BIT(val - 0x20); + } + xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPRESET_ADDR, pb); + + pb = 0; + for (i = 0; i < p->txq_cnt; i++) { + ring = p->tx_ring[i]; + + val = xgene_enet_ring_bufnum(ring->id); + pb |= BIT(val); + } + xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQRESET_ADDR, pb); + + if (dev->of_node) { + if (!IS_ERR(p->clk)) + clk_disable_unprepare(p->clk); + } } static void xgene_enet_link_state(struct work_struct *work) @@ -386,10 +566,11 @@ static void xgene_enet_link_state(struct work_struct *work) if (link) { if (!netif_carrier_ok(ndev)) { netif_carrier_on(ndev); - xgene_sgmac_init(p); + xgene_sgmac_set_speed(p); xgene_sgmac_rx_enable(p); xgene_sgmac_tx_enable(p); - netdev_info(ndev, "Link is Up - 1Gbps\n"); + netdev_info(ndev, "Link is Up - %dMbps\n", + p->phy_speed); } poll_interval = PHY_POLL_LINK_ON; } else { @@ -412,12 +593,14 @@ const struct xgene_mac_ops xgene_sgmac_ops = { .tx_enable = xgene_sgmac_tx_enable, .rx_disable = xgene_sgmac_rx_disable, .tx_disable = xgene_sgmac_tx_disable, + .set_speed = xgene_sgmac_set_speed, .set_mac_addr = xgene_sgmac_set_mac_addr, .link_state = xgene_enet_link_state }; const struct xgene_port_ops xgene_sgport_ops = { .reset = xgene_enet_reset, + .clear = xgene_enet_clear, .cle_bypass = xgene_enet_cle_bypass, .shutdown = xgene_enet_shutdown }; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h index 002df5a67..3d0ba3744 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h @@ -24,6 +24,7 @@ #define PHY_ADDR(src) (((src)<<8) & GENMASK(12, 8)) #define REG_ADDR(src) ((src) & GENMASK(4, 0)) #define PHY_CONTROL(src) ((src) & GENMASK(15, 0)) +#define LINK_SPEED(src) (((src) & GENMASK(11, 10)) >> 10) #define INT_PHY_ADDR 0x1e #define SGMII_TBI_CONTROL_ADDR 0x44 #define SGMII_CONTROL_ADDR 0x00 @@ -34,6 +35,13 @@ #define LINK_UP BIT(15) #define MPA_IDLE_WITH_QMI_EMPTY BIT(12) #define SG_RX_DV_GATE_REG_0_ADDR 0x05fc +#define SGMII_EN 0x1 + +enum xgene_phy_speed { + PHY_SPEED_10, + PHY_SPEED_100, + PHY_SPEED_1000 +}; extern const struct xgene_mac_ops xgene_sgmac_ops; extern const struct xgene_port_ops xgene_sgport_ops; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index ba030dc19..9c6ad0dce 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -258,13 +258,29 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata) static int xgene_enet_reset(struct xgene_enet_pdata *pdata) { + struct device *dev = &pdata->pdev->dev; + if (!xgene_ring_mgr_init(pdata)) return -ENODEV; - if (!IS_ERR(pdata->clk)) { + if (dev->of_node) { clk_prepare_enable(pdata->clk); + udelay(5); clk_disable_unprepare(pdata->clk); + udelay(5); clk_prepare_enable(pdata->clk); + udelay(5); + } else { +#ifdef CONFIG_ACPI + if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) { + acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), + "_RST", NULL, NULL); + } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), + "_INI")) { + acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), + "_INI", NULL, NULL); + } +#endif } xgene_enet_ecc_init(pdata); @@ -292,8 +308,51 @@ static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata, static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata) { - if (!IS_ERR(pdata->clk)) - clk_disable_unprepare(pdata->clk); + struct device *dev = &pdata->pdev->dev; + struct xgene_enet_desc_ring *ring; + u32 pb, val; + int i; + + pb = 0; + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]->buf_pool; + + val = xgene_enet_ring_bufnum(ring->id); + pb |= BIT(val - 0x20); + } + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb); + + pb = 0; + for (i = 0; i < pdata->txq_cnt; i++) { + ring = pdata->tx_ring[i]; + + val = xgene_enet_ring_bufnum(ring->id); + pb |= BIT(val); + } + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb); + + if (dev->of_node) { + if (!IS_ERR(pdata->clk)) + clk_disable_unprepare(pdata->clk); + } +} + +static void xgene_enet_clear(struct xgene_enet_pdata *pdata, + struct xgene_enet_desc_ring *ring) +{ + u32 addr, val, data; + + val = xgene_enet_ring_bufnum(ring->id); + + if (xgene_enet_is_bufpool(ring->id)) { + addr = ENET_CFGSSQMIFPRESET_ADDR; + data = BIT(val - 0x20); + } else { + addr = ENET_CFGSSQMIWQRESET_ADDR; + data = BIT(val); + } + + xgene_enet_wr_ring_if(pdata, addr, data); } static void xgene_enet_link_state(struct work_struct *work) @@ -340,6 +399,7 @@ const struct xgene_mac_ops xgene_xgmac_ops = { const struct xgene_port_ops xgene_xgport_ops = { .reset = xgene_enet_reset, + .clear = xgene_enet_clear, .cle_bypass = xgene_enet_xgcle_bypass, .shutdown = xgene_enet_shutdown, }; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h index 0a2dca8a1..f1ea485f9 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h @@ -65,9 +65,12 @@ #define XG_CFG_LINK_AGGR_RESUME_0_ADDR 0x0214 #define XG_LINK_STATUS_ADDR 0x0228 #define XG_TSIF_MSS_REG0_ADDR 0x02a4 +#define XG_DEBUG_REG_ADDR 0x0400 #define XG_ENET_SPARE_CFG_REG_ADDR 0x040c #define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410 #define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804 +#define XG_MCX_ICM_CONFIG0_REG_0_ADDR 0x00e0 +#define XG_MCX_ICM_CONFIG2_REG_0_ADDR 0x00e8 extern const struct xgene_mac_ops xgene_xgmac_ops; extern const struct xgene_port_ops xgene_xgport_ops; diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h index ca562bc03..e4feb712d 100644 --- a/drivers/net/ethernet/arc/emac.h +++ b/drivers/net/ethernet/arc/emac.h @@ -134,7 +134,6 @@ struct arc_emac_priv { /* Devices */ struct device *dev; - struct phy_device *phy_dev; struct mii_bus *bus; struct arc_emac_mdio_bus_data bus_data; diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index a3a9392a4..b0da9693f 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -47,7 +47,7 @@ static inline int arc_emac_tx_avail(struct arc_emac_priv *priv) static void arc_emac_adjust_link(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); - struct phy_device *phy_dev = priv->phy_dev; + struct phy_device *phy_dev = ndev->phydev; unsigned int reg, state_changed = 0; if (priv->link != phy_dev->link) { @@ -79,46 +79,6 @@ static void arc_emac_adjust_link(struct net_device *ndev) phy_print_status(phy_dev); } -/** - * arc_emac_get_settings - Get PHY settings. - * @ndev: Pointer to net_device structure. - * @cmd: Pointer to ethtool_cmd structure. - * - * This implements ethtool command for getting PHY settings. If PHY could - * not be found, the function returns -ENODEV. This function calls the - * relevant PHY ethtool API to get the PHY settings. - * Issue "ethtool ethX" under linux prompt to execute this function. - */ -static int arc_emac_get_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) -{ - struct arc_emac_priv *priv = netdev_priv(ndev); - - return phy_ethtool_gset(priv->phy_dev, cmd); -} - -/** - * arc_emac_set_settings - Set PHY settings as passed in the argument. - * @ndev: Pointer to net_device structure. - * @cmd: Pointer to ethtool_cmd structure. - * - * This implements ethtool command for setting various PHY settings. If PHY - * could not be found, the function returns -ENODEV. This function calls the - * relevant PHY ethtool API to set the PHY. - * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this - * function. - */ -static int arc_emac_set_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) -{ - struct arc_emac_priv *priv = netdev_priv(ndev); - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - return phy_ethtool_sset(priv->phy_dev, cmd); -} - /** * arc_emac_get_drvinfo - Get EMAC driver information. * @ndev: Pointer to net_device structure. @@ -137,10 +97,10 @@ static void arc_emac_get_drvinfo(struct net_device *ndev, } static const struct ethtool_ops arc_emac_ethtool_ops = { - .get_settings = arc_emac_get_settings, - .set_settings = arc_emac_set_settings, .get_drvinfo = arc_emac_get_drvinfo, .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; #define FIRST_OR_LAST_MASK (FIRST_MASK | LAST_MASK) @@ -403,7 +363,7 @@ static void arc_emac_poll_controller(struct net_device *dev) static int arc_emac_open(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); - struct phy_device *phy_dev = priv->phy_dev; + struct phy_device *phy_dev = ndev->phydev; int i; phy_dev->autoneg = AUTONEG_ENABLE; @@ -474,7 +434,7 @@ static int arc_emac_open(struct net_device *ndev) /* Enable EMAC */ arc_reg_or(priv, R_CTRL, EN_MASK); - phy_start_aneg(priv->phy_dev); + phy_start_aneg(ndev->phydev); netif_start_queue(ndev); @@ -772,6 +732,7 @@ int arc_emac_probe(struct net_device *ndev, int interface) struct device *dev = ndev->dev.parent; struct resource res_regs; struct device_node *phy_node; + struct phy_device *phydev = NULL; struct arc_emac_priv *priv; const char *mac_addr; unsigned int id, clock_frequency, irq; @@ -788,14 +749,16 @@ int arc_emac_probe(struct net_device *ndev, int interface) err = of_address_to_resource(dev->of_node, 0, &res_regs); if (err) { dev_err(dev, "failed to retrieve registers base from device tree\n"); - return -ENODEV; + err = -ENODEV; + goto out_put_node; } /* Get IRQ from device tree */ irq = irq_of_parse_and_map(dev->of_node, 0); if (!irq) { dev_err(dev, "failed to retrieve value from device tree\n"); - return -ENODEV; + err = -ENODEV; + goto out_put_node; } ndev->netdev_ops = &arc_emac_netdev_ops; @@ -808,8 +771,10 @@ int arc_emac_probe(struct net_device *ndev, int interface) priv->dev = dev; priv->regs = devm_ioremap_resource(dev, &res_regs); - if (IS_ERR(priv->regs)) - return PTR_ERR(priv->regs); + if (IS_ERR(priv->regs)) { + err = PTR_ERR(priv->regs); + goto out_put_node; + } dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); @@ -817,7 +782,7 @@ int arc_emac_probe(struct net_device *ndev, int interface) err = clk_prepare_enable(priv->clk); if (err) { dev_err(dev, "failed to enable clock\n"); - return err; + goto out_put_node; } clock_frequency = clk_get_rate(priv->clk); @@ -826,7 +791,8 @@ int arc_emac_probe(struct net_device *ndev, int interface) if (of_property_read_u32(dev->of_node, "clock-frequency", &clock_frequency)) { dev_err(dev, "failed to retrieve from device tree\n"); - return -EINVAL; + err = -EINVAL; + goto out_put_node; } } @@ -887,16 +853,16 @@ int arc_emac_probe(struct net_device *ndev, int interface) goto out_clken; } - priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, - interface); - if (!priv->phy_dev) { + phydev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, + interface); + if (!phydev) { dev_err(dev, "of_phy_connect() failed\n"); err = -ENODEV; goto out_mdio; } dev_info(dev, "connected to %s phy with id 0x%x\n", - priv->phy_dev->drv->name, priv->phy_dev->phy_id); + phydev->drv->name, phydev->phy_id); netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT); @@ -906,17 +872,20 @@ int arc_emac_probe(struct net_device *ndev, int interface) goto out_netif_api; } + of_node_put(phy_node); return 0; out_netif_api: netif_napi_del(&priv->napi); - phy_disconnect(priv->phy_dev); - priv->phy_dev = NULL; + phy_disconnect(phydev); out_mdio: arc_mdio_remove(priv); out_clken: if (priv->clk) clk_disable_unprepare(priv->clk); +out_put_node: + of_node_put(phy_node); + return err; } EXPORT_SYMBOL_GPL(arc_emac_probe); @@ -925,8 +894,7 @@ int arc_emac_remove(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); - phy_disconnect(priv->phy_dev); - priv->phy_dev = NULL; + phy_disconnect(ndev->phydev); arc_mdio_remove(priv); unregister_netdev(ndev); netif_napi_del(&priv->napi); diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index e708e360a..4eb17daef 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1251,7 +1251,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct alx_priv *alx; struct alx_hw *hw; bool phy_configured; - int bars, err; + int err; err = pci_enable_device_mem(pdev); if (err) @@ -1271,11 +1271,10 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } - bars = pci_select_bars(pdev, IORESOURCE_MEM); - err = pci_request_selected_regions(pdev, bars, alx_drv_name); + err = pci_request_mem_regions(pdev, alx_drv_name); if (err) { dev_err(&pdev->dev, - "pci_request_selected_regions failed(bars:%d)\n", bars); + "pci_request_mem_regions failed\n"); goto out_pci_disable; } @@ -1401,7 +1400,7 @@ out_unmap: out_free_netdev: free_netdev(netdev); out_pci_release: - pci_release_selected_regions(pdev, bars); + pci_release_mem_regions(pdev); out_pci_disable: pci_disable_device(pdev); return err; @@ -1420,8 +1419,7 @@ static void alx_remove(struct pci_dev *pdev) unregister_netdev(alx->dev); iounmap(hw->hw_addr); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); @@ -1547,6 +1545,8 @@ static const struct pci_device_id alx_pci_tbl[] = { .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400), .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500), + .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h index 0959e6824..1fc2d8522 100644 --- a/drivers/net/ethernet/atheros/alx/reg.h +++ b/drivers/net/ethernet/atheros/alx/reg.h @@ -38,6 +38,7 @@ #define ALX_DEV_ID_AR8161 0x1091 #define ALX_DEV_ID_E2200 0xe091 #define ALX_DEV_ID_E2400 0xe0a1 +#define ALX_DEV_ID_E2500 0xe0b1 #define ALX_DEV_ID_AR8162 0x1090 #define ALX_DEV_ID_AR8171 0x10A1 #define ALX_DEV_ID_AR8172 0x10A0 diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index 1a3555d03..b047fd607 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -632,7 +632,7 @@ static void nb8800_mac_config(struct net_device *dev) static void nb8800_pause_config(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; u32 rxcr; if (priv->pause_aneg) { @@ -665,7 +665,7 @@ static void nb8800_pause_config(struct net_device *dev) static void nb8800_link_reconfigure(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; int change = 0; if (phydev->link) { @@ -691,7 +691,7 @@ static void nb8800_link_reconfigure(struct net_device *dev) } if (change) - phy_print_status(priv->phydev); + phy_print_status(phydev); } static void nb8800_update_mac_addr(struct net_device *dev) @@ -936,9 +936,10 @@ static int nb8800_dma_stop(struct net_device *dev) static void nb8800_pause_adv(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; u32 adv = 0; - if (!priv->phydev) + if (!phydev) return; if (priv->pause_rx) @@ -946,13 +947,14 @@ static void nb8800_pause_adv(struct net_device *dev) if (priv->pause_tx) adv ^= ADVERTISED_Asym_Pause; - priv->phydev->supported |= adv; - priv->phydev->advertising |= adv; + phydev->supported |= adv; + phydev->advertising |= adv; } static int nb8800_open(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev; int err; /* clear any pending interrupts */ @@ -970,10 +972,10 @@ static int nb8800_open(struct net_device *dev) nb8800_mac_rx(dev, true); nb8800_mac_tx(dev, true); - priv->phydev = of_phy_connect(dev, priv->phy_node, - nb8800_link_reconfigure, 0, - priv->phy_mode); - if (!priv->phydev) + phydev = of_phy_connect(dev, priv->phy_node, + nb8800_link_reconfigure, 0, + priv->phy_mode); + if (!phydev) goto err_free_irq; nb8800_pause_adv(dev); @@ -983,7 +985,7 @@ static int nb8800_open(struct net_device *dev) netif_start_queue(dev); nb8800_start_rx(dev); - phy_start(priv->phydev); + phy_start(phydev); return 0; @@ -998,8 +1000,9 @@ err_free_dma: static int nb8800_stop(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; - phy_stop(priv->phydev); + phy_stop(phydev); netif_stop_queue(dev); napi_disable(&priv->napi); @@ -1008,8 +1011,7 @@ static int nb8800_stop(struct net_device *dev) nb8800_mac_rx(dev, false); nb8800_mac_tx(dev, false); - phy_disconnect(priv->phydev); - priv->phydev = NULL; + phy_disconnect(phydev); free_irq(dev->irq, dev); @@ -1020,9 +1022,7 @@ static int nb8800_stop(struct net_device *dev) static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct nb8800_priv *priv = netdev_priv(dev); - - return phy_mii_ioctl(priv->phydev, rq, cmd); + return phy_mii_ioctl(dev->phydev, rq, cmd); } static const struct net_device_ops nb8800_netdev_ops = { @@ -1036,34 +1036,14 @@ static const struct net_device_ops nb8800_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -static int nb8800_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct nb8800_priv *priv = netdev_priv(dev); - - if (!priv->phydev) - return -ENODEV; - - return phy_ethtool_gset(priv->phydev, cmd); -} - -static int nb8800_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct nb8800_priv *priv = netdev_priv(dev); - - if (!priv->phydev) - return -ENODEV; - - return phy_ethtool_sset(priv->phydev, cmd); -} - static int nb8800_nway_reset(struct net_device *dev) { - struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; - if (!priv->phydev) + if (!phydev) return -ENODEV; - return genphy_restart_aneg(priv->phydev); + return genphy_restart_aneg(phydev); } static void nb8800_get_pauseparam(struct net_device *dev, @@ -1080,6 +1060,7 @@ static int nb8800_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pp) { struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; priv->pause_aneg = pp->autoneg; priv->pause_rx = pp->rx_pause; @@ -1089,8 +1070,8 @@ static int nb8800_set_pauseparam(struct net_device *dev, if (!priv->pause_aneg) nb8800_pause_config(dev); - else if (priv->phydev) - phy_start_aneg(priv->phydev); + else if (phydev) + phy_start_aneg(phydev); return 0; } @@ -1183,8 +1164,6 @@ static void nb8800_get_ethtool_stats(struct net_device *dev, } static const struct ethtool_ops nb8800_ethtool_ops = { - .get_settings = nb8800_get_settings, - .set_settings = nb8800_set_settings, .nway_reset = nb8800_nway_reset, .get_link = ethtool_op_get_link, .get_pauseparam = nb8800_get_pauseparam, @@ -1192,6 +1171,8 @@ static const struct ethtool_ops nb8800_ethtool_ops = { .get_sset_count = nb8800_get_sset_count, .get_strings = nb8800_get_strings, .get_ethtool_stats = nb8800_get_ethtool_stats, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int nb8800_hw_init(struct net_device *dev) @@ -1438,7 +1419,7 @@ static int nb8800_probe(struct platform_device *pdev) if (ops && ops->reset) { ret = ops->reset(dev); if (ret) - goto err_free_dev; + goto err_disable_clk; } bus = devm_mdiobus_alloc(&pdev->dev); @@ -1523,6 +1504,7 @@ static int nb8800_probe(struct platform_device *pdev) err_free_dma: nb8800_dma_free(dev); err_free_bus: + of_node_put(priv->phy_node); mdiobus_unregister(bus); err_disable_clk: clk_disable_unprepare(priv->clk); @@ -1538,6 +1520,7 @@ static int nb8800_remove(struct platform_device *pdev) struct nb8800_priv *priv = netdev_priv(ndev); unregister_netdev(ndev); + of_node_put(priv->phy_node); mdiobus_unregister(priv->mii_bus); diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h index e5adbc2aa..6ec4a956e 100644 --- a/drivers/net/ethernet/aurora/nb8800.h +++ b/drivers/net/ethernet/aurora/nb8800.h @@ -284,7 +284,6 @@ struct nb8800_priv { struct mii_bus *mii_bus; struct device_node *phy_node; - struct phy_device *phydev; /* PHY connection type from DT */ int phy_mode; diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 18042c246..bd8c80c0b 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -139,31 +139,19 @@ config BNX2X_SRIOV Virtualization support in the 578xx and 57712 products. This allows for virtual function acceleration in virtual environments. -config BNX2X_VXLAN - bool "Virtual eXtensible Local Area Network support" - default n - depends on BNX2X && VXLAN && !(BNX2X=y && VXLAN=m) - ---help--- - This enables hardward offload support for VXLAN protocol over the - NetXtremeII series adapters. - Say Y here if you want to enable hardware offload support for - Virtual eXtensible Local Area Network (VXLAN) in the driver. - -config BNX2X_GENEVE - bool "Generic Network Virtualization Encapsulation (GENEVE) support" - depends on BNX2X && GENEVE && !(BNX2X=y && GENEVE=m) - ---help--- - This allows one to create GENEVE virtual interfaces that provide - Layer 2 Networks over Layer 3 Networks. GENEVE is often used - to tunnel virtual network infrastructure in virtualized environments. - Say Y here if you want to enable hardware offload support for - Generic Network Virtualization Encapsulation (GENEVE) in the driver. - config BGMAC - tristate "BCMA bus GBit core support" + tristate + help + This enables the integrated ethernet controller support for many + Broadcom (mostly iProc) SoCs. An appropriate bus interface driver + needs to be enabled to select this. + +config BGMAC_BCMA + tristate "Broadcom iProc GBit BCMA support" depends on BCMA && BCMA_HOST_SOC depends on HAS_DMA depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST + select BGMAC select PHYLIB select FIXED_PHY ---help--- @@ -172,6 +160,19 @@ config BGMAC In case of using this driver on BCM4706 it's also requires to enable BCMA_DRIVER_GMAC_CMN to make it work. +config BGMAC_PLATFORM + tristate "Broadcom iProc GBit platform support" + depends on HAS_DMA + depends on ARCH_BCM_IPROC || COMPILE_TEST + depends on OF + select BGMAC + select PHYLIB + select FIXED_PHY + default ARCH_BCM_IPROC + ---help--- + Say Y here if you want to use the Broadcom iProc Gigabit Ethernet + controller through the generic platform interface + config SYSTEMPORT tristate "Broadcom SYSTEMPORT internal MAC support" depends on OF @@ -186,7 +187,6 @@ config SYSTEMPORT config BNXT tristate "Broadcom NetXtreme-C/E support" depends on PCI - depends on VXLAN || VXLAN=n select FW_LOADER select LIBCRC32C ---help--- diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile index 00584d78b..79f2372c6 100644 --- a/drivers/net/ethernet/broadcom/Makefile +++ b/drivers/net/ethernet/broadcom/Makefile @@ -11,5 +11,7 @@ obj-$(CONFIG_BNX2X) += bnx2x/ obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o obj-$(CONFIG_TIGON3) += tg3.o obj-$(CONFIG_BGMAC) += bgmac.o +obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o +obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o obj-$(CONFIG_BNXT) += bnxt/ diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 87c6b5bdd..6c8bc5fad 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1859,7 +1859,7 @@ static int bcm_enet_probe(struct platform_device *pdev) } else { /* run platform code to initialize PHY device */ - if (pd->mii_config && + if (pd && pd->mii_config && pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, bcm_enet_mdio_write_mii)) { dev_err(&pdev->dev, "unable to configure mdio bus\n"); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index bfa26a259..b2d30863c 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -96,28 +96,6 @@ static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, } /* Ethtool operations */ -static int bcm_sysport_set_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct bcm_sysport_priv *priv = netdev_priv(dev); - - if (!netif_running(dev)) - return -EINVAL; - - return phy_ethtool_sset(priv->phydev, cmd); -} - -static int bcm_sysport_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct bcm_sysport_priv *priv = netdev_priv(dev); - - if (!netif_running(dev)) - return -EINVAL; - - return phy_ethtool_gset(priv->phydev, cmd); -} - static int bcm_sysport_set_rx_csum(struct net_device *dev, netdev_features_t wanted) { @@ -1127,7 +1105,7 @@ static void bcm_sysport_tx_timeout(struct net_device *dev) static void bcm_sysport_adj_link(struct net_device *dev) { struct bcm_sysport_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; unsigned int changed = 0; u32 cmd_bits = 0, reg; @@ -1182,7 +1160,7 @@ static void bcm_sysport_adj_link(struct net_device *dev) umac_writel(priv, reg, UMAC_CMD); } - phy_print_status(priv->phydev); + phy_print_status(phydev); } static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, @@ -1525,7 +1503,7 @@ static void bcm_sysport_netif_start(struct net_device *dev) /* Enable RX interrupt and TX ring full interrupt */ intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); - phy_start(priv->phydev); + phy_start(dev->phydev); /* Enable TX interrupts for the 32 TXQs */ intrl2_1_mask_clear(priv, 0xffffffff); @@ -1546,6 +1524,7 @@ static void rbuf_init(struct bcm_sysport_priv *priv) static int bcm_sysport_open(struct net_device *dev) { struct bcm_sysport_priv *priv = netdev_priv(dev); + struct phy_device *phydev; unsigned int i; int ret; @@ -1570,9 +1549,9 @@ static int bcm_sysport_open(struct net_device *dev) /* Read CRC forward */ priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); - priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, - 0, priv->phy_interface); - if (!priv->phydev) { + phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, + 0, priv->phy_interface); + if (!phydev) { netdev_err(dev, "could not attach to PHY\n"); return -ENODEV; } @@ -1650,7 +1629,7 @@ out_free_tx_ring: out_free_irq0: free_irq(priv->irq0, dev); out_phy_disconnect: - phy_disconnect(priv->phydev); + phy_disconnect(phydev); return ret; } @@ -1661,7 +1640,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev) /* stop all software from updating hardware */ netif_tx_stop_all_queues(dev); napi_disable(&priv->napi); - phy_stop(priv->phydev); + phy_stop(dev->phydev); /* mask all interrupts */ intrl2_0_mask_set(priv, 0xffffffff); @@ -1708,14 +1687,12 @@ static int bcm_sysport_stop(struct net_device *dev) free_irq(priv->irq1, dev); /* Disconnect from PHY */ - phy_disconnect(priv->phydev); + phy_disconnect(dev->phydev); return 0; } static struct ethtool_ops bcm_sysport_ethtool_ops = { - .get_settings = bcm_sysport_get_settings, - .set_settings = bcm_sysport_set_settings, .get_drvinfo = bcm_sysport_get_drvinfo, .get_msglevel = bcm_sysport_get_msglvl, .set_msglevel = bcm_sysport_set_msglvl, @@ -1727,6 +1704,8 @@ static struct ethtool_ops bcm_sysport_ethtool_ops = { .set_wol = bcm_sysport_set_wol, .get_coalesce = bcm_sysport_get_coalesce, .set_coalesce = bcm_sysport_set_coalesce, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops bcm_sysport_netdev_ops = { @@ -1929,7 +1908,7 @@ static int bcm_sysport_suspend(struct device *d) bcm_sysport_netif_stop(dev); - phy_suspend(priv->phydev); + phy_suspend(dev->phydev); netif_device_detach(dev); @@ -2055,7 +2034,7 @@ static int bcm_sysport_resume(struct device *d) goto out_free_rx_ring; } - phy_resume(priv->phydev); + phy_resume(dev->phydev); bcm_sysport_netif_start(dev); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index f28bf545d..1c82e3da6 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -670,7 +670,6 @@ struct bcm_sysport_priv { /* PHY device */ struct device_node *phy_dn; - struct phy_device *phydev; phy_interface_t phy_interface; int old_pause; int old_link; diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c new file mode 100644 index 000000000..7c19c8e2b --- /dev/null +++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c @@ -0,0 +1,266 @@ +/* + * Driver for (BCM4706)? GBit MAC core on BCMA bus. + * + * Copyright (C) 2012 RafaÅ‚ MiÅ‚ecki + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include "bgmac.h" + +struct bcma_mdio { + struct bcma_device *core; + u8 phyaddr; +}; + +static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask, + u32 value, int timeout) +{ + u32 val; + int i; + + for (i = 0; i < timeout / 10; i++) { + val = bcma_read32(core, reg); + if ((val & mask) == value) + return true; + udelay(10); + } + dev_err(&core->dev, "Timeout waiting for reg 0x%X\n", reg); + return false; +} + +/************************************************** + * PHY ops + **************************************************/ + +static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg) +{ + struct bcma_device *core; + u16 phy_access_addr; + u16 phy_ctl_addr; + u32 tmp; + + BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK); + BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK); + BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT); + BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK); + BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT); + BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE); + BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START); + BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK); + BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK); + BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT); + BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE); + + if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) { + core = bcma_mdio->core->bus->drv_gmac_cmn.core; + phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; + phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; + } else { + core = bcma_mdio->core; + phy_access_addr = BGMAC_PHY_ACCESS; + phy_ctl_addr = BGMAC_PHY_CNTL; + } + + tmp = bcma_read32(core, phy_ctl_addr); + tmp &= ~BGMAC_PC_EPA_MASK; + tmp |= phyaddr; + bcma_write32(core, phy_ctl_addr, tmp); + + tmp = BGMAC_PA_START; + tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; + tmp |= reg << BGMAC_PA_REG_SHIFT; + bcma_write32(core, phy_access_addr, tmp); + + if (!bcma_mdio_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, + 1000)) { + dev_err(&core->dev, "Reading PHY %d register 0x%X failed\n", + phyaddr, reg); + return 0xffff; + } + + return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK; +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */ +static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, + u16 value) +{ + struct bcma_device *core; + u16 phy_access_addr; + u16 phy_ctl_addr; + u32 tmp; + + if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) { + core = bcma_mdio->core->bus->drv_gmac_cmn.core; + phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; + phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; + } else { + core = bcma_mdio->core; + phy_access_addr = BGMAC_PHY_ACCESS; + phy_ctl_addr = BGMAC_PHY_CNTL; + } + + tmp = bcma_read32(core, phy_ctl_addr); + tmp &= ~BGMAC_PC_EPA_MASK; + tmp |= phyaddr; + bcma_write32(core, phy_ctl_addr, tmp); + + bcma_write32(bcma_mdio->core, BGMAC_INT_STATUS, BGMAC_IS_MDIO); + if (bcma_read32(bcma_mdio->core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) + dev_warn(&core->dev, "Error setting MDIO int\n"); + + tmp = BGMAC_PA_START; + tmp |= BGMAC_PA_WRITE; + tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; + tmp |= reg << BGMAC_PA_REG_SHIFT; + tmp |= value; + bcma_write32(core, phy_access_addr, tmp); + + if (!bcma_mdio_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, + 1000)) { + dev_err(&core->dev, "Writing to PHY %d register 0x%X failed\n", + phyaddr, reg); + return -ETIMEDOUT; + } + + return 0; +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */ +static void bcma_mdio_phy_init(struct bcma_mdio *bcma_mdio) +{ + struct bcma_chipinfo *ci = &bcma_mdio->core->bus->chipinfo; + u8 i; + + if (ci->id == BCMA_CHIP_ID_BCM5356) { + for (i = 0; i < 5; i++) { + bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x008b); + bcma_mdio_phy_write(bcma_mdio, i, 0x15, 0x0100); + bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bcma_mdio, i, 0x12, 0x2aaa); + bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); + } + } + if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) || + (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) || + (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) { + struct bcma_drv_cc *cc = &bcma_mdio->core->bus->drv_cc; + + bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0); + bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0); + for (i = 0; i < 5; i++) { + bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5284); + bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); + bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x0010); + bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5296); + bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x1073); + bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9073); + bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x52b6); + bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9273); + bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); + } + } +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */ +static int bcma_mdio_phy_reset(struct mii_bus *bus) +{ + struct bcma_mdio *bcma_mdio = bus->priv; + u8 phyaddr = bcma_mdio->phyaddr; + + if (bcma_mdio->phyaddr == BGMAC_PHY_NOREGS) + return 0; + + bcma_mdio_phy_write(bcma_mdio, phyaddr, MII_BMCR, BMCR_RESET); + udelay(100); + if (bcma_mdio_phy_read(bcma_mdio, phyaddr, MII_BMCR) & BMCR_RESET) + dev_err(&bcma_mdio->core->dev, "PHY reset failed\n"); + bcma_mdio_phy_init(bcma_mdio); + + return 0; +} + +/************************************************** + * MII + **************************************************/ + +static int bcma_mdio_mii_read(struct mii_bus *bus, int mii_id, int regnum) +{ + return bcma_mdio_phy_read(bus->priv, mii_id, regnum); +} + +static int bcma_mdio_mii_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + return bcma_mdio_phy_write(bus->priv, mii_id, regnum, value); +} + +struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr) +{ + struct bcma_mdio *bcma_mdio; + struct mii_bus *mii_bus; + int err; + + bcma_mdio = kzalloc(sizeof(*bcma_mdio), GFP_KERNEL); + if (!bcma_mdio) + return ERR_PTR(-ENOMEM); + + mii_bus = mdiobus_alloc(); + if (!mii_bus) { + err = -ENOMEM; + goto err; + } + + mii_bus->name = "bcma_mdio mii bus"; + sprintf(mii_bus->id, "%s-%d-%d", "bcma_mdio", core->bus->num, + core->core_unit); + mii_bus->priv = bcma_mdio; + mii_bus->read = bcma_mdio_mii_read; + mii_bus->write = bcma_mdio_mii_write; + mii_bus->reset = bcma_mdio_phy_reset; + mii_bus->parent = &core->dev; + mii_bus->phy_mask = ~(1 << phyaddr); + + bcma_mdio->core = core; + bcma_mdio->phyaddr = phyaddr; + + err = mdiobus_register(mii_bus); + if (err) { + dev_err(&core->dev, "Registration of mii bus failed\n"); + goto err_free_bus; + } + + return mii_bus; + +err_free_bus: + mdiobus_free(mii_bus); +err: + kfree(bcma_mdio); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(bcma_mdio_mii_register); + +void bcma_mdio_mii_unregister(struct mii_bus *mii_bus) +{ + struct bcma_mdio *bcma_mdio; + + if (!mii_bus) + return; + + bcma_mdio = mii_bus->priv; + + mdiobus_unregister(mii_bus); + mdiobus_free(mii_bus); + kfree(bcma_mdio); +} +EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister); + +MODULE_AUTHOR("RafaÅ‚ MiÅ‚ecki"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c new file mode 100644 index 000000000..625235db6 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -0,0 +1,315 @@ +/* + * Driver for (BCM4706)? GBit MAC core on BCMA bus. + * + * Copyright (C) 2012 RafaÅ‚ MiÅ‚ecki + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include "bgmac.h" + +static inline bool bgmac_is_bcm4707_family(struct bcma_device *core) +{ + switch (core->bus->chipinfo.id) { + case BCMA_CHIP_ID_BCM4707: + case BCMA_CHIP_ID_BCM47094: + case BCMA_CHIP_ID_BCM53018: + return true; + default: + return false; + } +} + +/************************************************** + * BCMA bus ops + **************************************************/ + +static u32 bcma_bgmac_read(struct bgmac *bgmac, u16 offset) +{ + return bcma_read32(bgmac->bcma.core, offset); +} + +static void bcma_bgmac_write(struct bgmac *bgmac, u16 offset, u32 value) +{ + bcma_write32(bgmac->bcma.core, offset, value); +} + +static u32 bcma_bgmac_idm_read(struct bgmac *bgmac, u16 offset) +{ + return bcma_aread32(bgmac->bcma.core, offset); +} + +static void bcma_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value) +{ + return bcma_awrite32(bgmac->bcma.core, offset, value); +} + +static bool bcma_bgmac_clk_enabled(struct bgmac *bgmac) +{ + return bcma_core_is_enabled(bgmac->bcma.core); +} + +static void bcma_bgmac_clk_enable(struct bgmac *bgmac, u32 flags) +{ + bcma_core_enable(bgmac->bcma.core, flags); +} + +static void bcma_bgmac_cco_ctl_maskset(struct bgmac *bgmac, u32 offset, + u32 mask, u32 set) +{ + struct bcma_drv_cc *cc = &bgmac->bcma.core->bus->drv_cc; + + bcma_chipco_chipctl_maskset(cc, offset, mask, set); +} + +static u32 bcma_bgmac_get_bus_clock(struct bgmac *bgmac) +{ + struct bcma_drv_cc *cc = &bgmac->bcma.core->bus->drv_cc; + + return bcma_pmu_get_bus_clock(cc); +} + +static void bcma_bgmac_cmn_maskset32(struct bgmac *bgmac, u16 offset, u32 mask, + u32 set) +{ + bcma_maskset32(bgmac->bcma.cmn, offset, mask, set); +} + +static const struct bcma_device_id bgmac_bcma_tbl[] = { + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, + BCMA_ANY_REV, BCMA_ANY_CLASS), + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, + BCMA_ANY_CLASS), + {}, +}; +MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl); + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ +static int bgmac_probe(struct bcma_device *core) +{ + struct ssb_sprom *sprom = &core->bus->sprom; + struct mii_bus *mii_bus; + struct bgmac *bgmac; + u8 *mac; + int err; + + bgmac = kzalloc(sizeof(*bgmac), GFP_KERNEL); + if (!bgmac) + return -ENOMEM; + + bgmac->bcma.core = core; + bgmac->dev = &core->dev; + bgmac->dma_dev = core->dma_dev; + bgmac->irq = core->irq; + + bcma_set_drvdata(core, bgmac); + + switch (core->core_unit) { + case 0: + mac = sprom->et0mac; + break; + case 1: + mac = sprom->et1mac; + break; + case 2: + mac = sprom->et2mac; + break; + default: + dev_err(bgmac->dev, "Unsupported core_unit %d\n", + core->core_unit); + err = -ENOTSUPP; + goto err; + } + + ether_addr_copy(bgmac->mac_addr, mac); + + /* On BCM4706 we need common core to access PHY */ + if (core->id.id == BCMA_CORE_4706_MAC_GBIT && + !core->bus->drv_gmac_cmn.core) { + dev_err(bgmac->dev, "GMAC CMN core not found (required for BCM4706)\n"); + err = -ENODEV; + goto err; + } + bgmac->bcma.cmn = core->bus->drv_gmac_cmn.core; + + switch (core->core_unit) { + case 0: + bgmac->phyaddr = sprom->et0phyaddr; + break; + case 1: + bgmac->phyaddr = sprom->et1phyaddr; + break; + case 2: + bgmac->phyaddr = sprom->et2phyaddr; + break; + } + bgmac->phyaddr &= BGMAC_PHY_MASK; + if (bgmac->phyaddr == BGMAC_PHY_MASK) { + dev_err(bgmac->dev, "No PHY found\n"); + err = -ENODEV; + goto err; + } + dev_info(bgmac->dev, "Found PHY addr: %d%s\n", bgmac->phyaddr, + bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : ""); + + if (!bgmac_is_bcm4707_family(core)) { + mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr); + if (IS_ERR(mii_bus)) { + err = PTR_ERR(mii_bus); + goto err; + } + + bgmac->mii_bus = mii_bus; + } + + if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) { + dev_err(bgmac->dev, "PCI setup not implemented\n"); + err = -ENOTSUPP; + goto err1; + } + + bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo & + BGMAC_BFL_ENETROBO); + if (bgmac->has_robosw) + dev_warn(bgmac->dev, "Support for Roboswitch not implemented\n"); + + if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) + dev_warn(bgmac->dev, "Support for ADMtek ethernet switch not implemented\n"); + + /* Feature Flags */ + switch (core->bus->chipinfo.id) { + case BCMA_CHIP_ID_BCM5357: + bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1; + bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY; + if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47186) { + bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; + bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII; + } + if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM5358) + bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII; + break; + case BCMA_CHIP_ID_BCM53572: + bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1; + bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY; + if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47188) { + bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII; + bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; + } + break; + case BCMA_CHIP_ID_BCM4749: + bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1; + bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY; + if (core->bus->chipinfo.pkg == 10) { + bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII; + bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; + } + break; + case BCMA_CHIP_ID_BCM4716: + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + /* fallthrough */ + case BCMA_CHIP_ID_BCM47162: + bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2; + bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; + break; + /* bcm4707_family */ + case BCMA_CHIP_ID_BCM4707: + case BCMA_CHIP_ID_BCM47094: + case BCMA_CHIP_ID_BCM53018: + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + bgmac->feature_flags |= BGMAC_FEAT_NO_RESET; + bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500; + break; + default: + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; + } + + if (!bgmac_is_bcm4707_family(core) && core->id.rev > 2) + bgmac->feature_flags |= BGMAC_FEAT_MISC_PLL_REQ; + + if (core->id.id == BCMA_CORE_4706_MAC_GBIT) { + bgmac->feature_flags |= BGMAC_FEAT_CMN_PHY_CTL; + bgmac->feature_flags |= BGMAC_FEAT_NO_CLR_MIB; + } + + if (core->id.rev >= 4) { + bgmac->feature_flags |= BGMAC_FEAT_CMDCFG_SR_REV4; + bgmac->feature_flags |= BGMAC_FEAT_TX_MASK_SETUP; + bgmac->feature_flags |= BGMAC_FEAT_RX_MASK_SETUP; + } + + bgmac->read = bcma_bgmac_read; + bgmac->write = bcma_bgmac_write; + bgmac->idm_read = bcma_bgmac_idm_read; + bgmac->idm_write = bcma_bgmac_idm_write; + bgmac->clk_enabled = bcma_bgmac_clk_enabled; + bgmac->clk_enable = bcma_bgmac_clk_enable; + bgmac->cco_ctl_maskset = bcma_bgmac_cco_ctl_maskset; + bgmac->get_bus_clock = bcma_bgmac_get_bus_clock; + bgmac->cmn_maskset32 = bcma_bgmac_cmn_maskset32; + + err = bgmac_enet_probe(bgmac); + if (err) + goto err1; + + return 0; + +err1: + bcma_mdio_mii_unregister(bgmac->mii_bus); +err: + kfree(bgmac); + bcma_set_drvdata(core, NULL); + + return err; +} + +static void bgmac_remove(struct bcma_device *core) +{ + struct bgmac *bgmac = bcma_get_drvdata(core); + + bcma_mdio_mii_unregister(bgmac->mii_bus); + bgmac_enet_remove(bgmac); + bcma_set_drvdata(core, NULL); + kfree(bgmac); +} + +static struct bcma_driver bgmac_bcma_driver = { + .name = KBUILD_MODNAME, + .id_table = bgmac_bcma_tbl, + .probe = bgmac_probe, + .remove = bgmac_remove, +}; + +static int __init bgmac_init(void) +{ + int err; + + err = bcma_driver_register(&bgmac_bcma_driver); + if (err) + return err; + pr_info("Broadcom 47xx GBit MAC driver loaded\n"); + + return 0; +} + +static void __exit bgmac_exit(void) +{ + bcma_driver_unregister(&bgmac_bcma_driver); +} + +module_init(bgmac_init) +module_exit(bgmac_exit) + +MODULE_AUTHOR("RafaÅ‚ MiÅ‚ecki"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c new file mode 100644 index 000000000..be52f270c --- /dev/null +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -0,0 +1,185 @@ +/* + * Copyright (C) 2016 Broadcom + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include "bgmac.h" + +static u32 platform_bgmac_read(struct bgmac *bgmac, u16 offset) +{ + return readl(bgmac->plat.base + offset); +} + +static void platform_bgmac_write(struct bgmac *bgmac, u16 offset, u32 value) +{ + writel(value, bgmac->plat.base + offset); +} + +static u32 platform_bgmac_idm_read(struct bgmac *bgmac, u16 offset) +{ + return readl(bgmac->plat.idm_base + offset); +} + +static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value) +{ + return writel(value, bgmac->plat.idm_base + offset); +} + +static bool platform_bgmac_clk_enabled(struct bgmac *bgmac) +{ + if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & + (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC)) != BCMA_IOCTL_CLK) + return false; + if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET) + return false; + return true; +} + +static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags) +{ + bgmac_idm_write(bgmac, BCMA_IOCTL, + (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC | flags)); + bgmac_idm_read(bgmac, BCMA_IOCTL); + + bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0); + bgmac_idm_read(bgmac, BCMA_RESET_CTL); + udelay(1); + + bgmac_idm_write(bgmac, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags)); + bgmac_idm_read(bgmac, BCMA_IOCTL); + udelay(1); +} + +static void platform_bgmac_cco_ctl_maskset(struct bgmac *bgmac, u32 offset, + u32 mask, u32 set) +{ + /* This shouldn't be encountered */ + WARN_ON(1); +} + +static u32 platform_bgmac_get_bus_clock(struct bgmac *bgmac) +{ + /* This shouldn't be encountered */ + WARN_ON(1); + + return 0; +} + +static void platform_bgmac_cmn_maskset32(struct bgmac *bgmac, u16 offset, + u32 mask, u32 set) +{ + /* This shouldn't be encountered */ + WARN_ON(1); +} + +static int bgmac_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct bgmac *bgmac; + struct resource *regs; + const u8 *mac_addr; + + bgmac = devm_kzalloc(&pdev->dev, sizeof(*bgmac), GFP_KERNEL); + if (!bgmac) + return -ENOMEM; + + platform_set_drvdata(pdev, bgmac); + + /* Set the features of the 4707 family */ + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + bgmac->feature_flags |= BGMAC_FEAT_NO_RESET; + bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500; + bgmac->feature_flags |= BGMAC_FEAT_CMDCFG_SR_REV4; + bgmac->feature_flags |= BGMAC_FEAT_TX_MASK_SETUP; + bgmac->feature_flags |= BGMAC_FEAT_RX_MASK_SETUP; + + bgmac->dev = &pdev->dev; + bgmac->dma_dev = &pdev->dev; + + mac_addr = of_get_mac_address(np); + if (mac_addr) + ether_addr_copy(bgmac->mac_addr, mac_addr); + else + dev_warn(&pdev->dev, "MAC address not present in device tree\n"); + + bgmac->irq = platform_get_irq(pdev, 0); + if (bgmac->irq < 0) { + dev_err(&pdev->dev, "Unable to obtain IRQ\n"); + return bgmac->irq; + } + + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "amac_base"); + if (!regs) { + dev_err(&pdev->dev, "Unable to obtain base resource\n"); + return -EINVAL; + } + + bgmac->plat.base = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(bgmac->plat.base)) + return PTR_ERR(bgmac->plat.base); + + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base"); + if (!regs) { + dev_err(&pdev->dev, "Unable to obtain idm resource\n"); + return -EINVAL; + } + + bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(bgmac->plat.idm_base)) + return PTR_ERR(bgmac->plat.idm_base); + + bgmac->read = platform_bgmac_read; + bgmac->write = platform_bgmac_write; + bgmac->idm_read = platform_bgmac_idm_read; + bgmac->idm_write = platform_bgmac_idm_write; + bgmac->clk_enabled = platform_bgmac_clk_enabled; + bgmac->clk_enable = platform_bgmac_clk_enable; + bgmac->cco_ctl_maskset = platform_bgmac_cco_ctl_maskset; + bgmac->get_bus_clock = platform_bgmac_get_bus_clock; + bgmac->cmn_maskset32 = platform_bgmac_cmn_maskset32; + + return bgmac_enet_probe(bgmac); +} + +static int bgmac_remove(struct platform_device *pdev) +{ + struct bgmac *bgmac = platform_get_drvdata(pdev); + + bgmac_enet_remove(bgmac); + + return 0; +} + +static const struct of_device_id bgmac_of_enet_match[] = { + {.compatible = "brcm,amac",}, + {.compatible = "brcm,nsp-amac",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, bgmac_of_enet_match); + +static struct platform_driver bgmac_enet_driver = { + .driver = { + .name = "bgmac-enet", + .of_match_table = bgmac_of_enet_match, + }, + .probe = bgmac_probe, + .remove = bgmac_remove, +}; + +module_platform_driver(bgmac_enet_driver); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 25bbae592..c4751ece7 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -6,51 +6,27 @@ * Licensed under the GNU/GPL. See COPYING for details. */ -#include "bgmac.h" -#include -#include -#include +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include #include -#include -#include -#include -#include -#include #include +#include "bgmac.h" -static const struct bcma_device_id bgmac_bcma_tbl[] = { - BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), - BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), - {}, -}; -MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl); - -static inline bool bgmac_is_bcm4707_family(struct bgmac *bgmac) -{ - switch (bgmac->core->bus->chipinfo.id) { - case BCMA_CHIP_ID_BCM4707: - case BCMA_CHIP_ID_BCM47094: - case BCMA_CHIP_ID_BCM53018: - return true; - default: - return false; - } -} - -static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask, +static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask, u32 value, int timeout) { u32 val; int i; for (i = 0; i < timeout / 10; i++) { - val = bcma_read32(core, reg); + val = bgmac_read(bgmac, reg); if ((val & mask) == value) return true; udelay(10); } - pr_err("Timeout waiting for reg 0x%X\n", reg); + dev_err(bgmac->dev, "Timeout waiting for reg 0x%X\n", reg); return false; } @@ -84,22 +60,22 @@ static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) udelay(10); } if (i) - bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n", - ring->mmio_base, val); + dev_err(bgmac->dev, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n", + ring->mmio_base, val); /* Remove SUSPEND bit */ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0); - if (!bgmac_wait_value(bgmac->core, + if (!bgmac_wait_value(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS, BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED, 10000)) { - bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n", - ring->mmio_base); + dev_warn(bgmac->dev, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n", + ring->mmio_base); udelay(300); val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED) - bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n", - ring->mmio_base); + dev_err(bgmac->dev, "Reset of DMA TX ring 0x%X failed\n", + ring->mmio_base); } } @@ -109,7 +85,7 @@ static void bgmac_dma_tx_enable(struct bgmac *bgmac, u32 ctl; ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL); - if (bgmac->core->id.rev >= 4) { + if (bgmac->feature_flags & BGMAC_FEAT_TX_MASK_SETUP) { ctl &= ~BGMAC_DMA_TX_BL_MASK; ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT; @@ -152,7 +128,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, struct bgmac_dma_ring *ring, struct sk_buff *skb) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; struct net_device *net_dev = bgmac->net_dev; int index = ring->end % BGMAC_TX_RING_SLOTS; struct bgmac_slot_info *slot = &ring->slots[index]; @@ -161,7 +137,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, int i; if (skb->len > BGMAC_DESC_CTL1_LEN) { - bgmac_err(bgmac, "Too long skb (%d)\n", skb->len); + netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len); goto err_drop; } @@ -174,7 +150,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, * even when ring->end overflows */ if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) { - bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n"); + netdev_err(bgmac->net_dev, "TX ring is full, queue should be stopped!\n"); netif_stop_queue(net_dev); return NETDEV_TX_BUSY; } @@ -241,18 +217,20 @@ err_dma: } err_dma_head: - bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n", - ring->mmio_base); + netdev_err(bgmac->net_dev, "Mapping error of skb on ring 0x%X\n", + ring->mmio_base); err_drop: dev_kfree_skb(skb); + net_dev->stats.tx_dropped++; + net_dev->stats.tx_errors++; return NETDEV_TX_OK; } /* Free transmitted packets */ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; int empty_slot; bool freed = false; unsigned bytes_compl = 0, pkts_compl = 0; @@ -285,6 +263,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) DMA_TO_DEVICE); if (slot->skb) { + bgmac->net_dev->stats.tx_bytes += slot->skb->len; + bgmac->net_dev->stats.tx_packets++; bytes_compl += slot->skb->len; pkts_compl++; @@ -313,12 +293,12 @@ static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) return; bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0); - if (!bgmac_wait_value(bgmac->core, + if (!bgmac_wait_value(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS, BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED, 10000)) - bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n", - ring->mmio_base); + dev_err(bgmac->dev, "Reset of ring 0x%X RX failed\n", + ring->mmio_base); } static void bgmac_dma_rx_enable(struct bgmac *bgmac, @@ -327,7 +307,7 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac, u32 ctl; ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); - if (bgmac->core->id.rev >= 4) { + if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) { ctl &= ~BGMAC_DMA_RX_BL_MASK; ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; @@ -348,7 +328,7 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac, static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, struct bgmac_slot_info *slot) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; dma_addr_t dma_addr; struct bgmac_rx_header *rx; void *buf; @@ -367,7 +347,7 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET, BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(dma_dev, dma_addr)) { - bgmac_err(bgmac, "DMA mapping error\n"); + netdev_err(bgmac->net_dev, "DMA mapping error\n"); put_page(virt_to_head_page(buf)); return -ENOMEM; } @@ -437,7 +417,7 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, end_slot /= sizeof(struct bgmac_dma_desc); while (ring->start != end_slot) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; struct bgmac_slot_info *slot = &ring->slots[ring->start]; struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET; struct sk_buff *skb; @@ -462,16 +442,19 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, /* Check for poison and drop or pass the packet */ if (len == 0xdead && flags == 0xbeef) { - bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", - ring->start); + netdev_err(bgmac->net_dev, "Found poisoned packet at slot %d, DMA issue!\n", + ring->start); put_page(virt_to_head_page(buf)); + bgmac->net_dev->stats.rx_errors++; break; } if (len > BGMAC_RX_ALLOC_SIZE) { - bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n", - ring->start); + netdev_err(bgmac->net_dev, "Found oversized packet at slot %d, DMA issue!\n", + ring->start); put_page(virt_to_head_page(buf)); + bgmac->net_dev->stats.rx_length_errors++; + bgmac->net_dev->stats.rx_errors++; break; } @@ -480,8 +463,9 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE); if (unlikely(!skb)) { - bgmac_err(bgmac, "build_skb failed\n"); + netdev_err(bgmac->net_dev, "build_skb failed\n"); put_page(virt_to_head_page(buf)); + bgmac->net_dev->stats.rx_errors++; break; } skb_put(skb, BGMAC_RX_FRAME_OFFSET + @@ -491,6 +475,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, bgmac->net_dev); + bgmac->net_dev->stats.rx_bytes += len; + bgmac->net_dev->stats.rx_packets++; napi_gro_receive(&bgmac->napi, skb); handled++; } while (0); @@ -534,7 +520,7 @@ static bool bgmac_dma_unaligned(struct bgmac *bgmac, static void bgmac_dma_tx_ring_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; struct bgmac_dma_desc *dma_desc = ring->cpu_base; struct bgmac_slot_info *slot; int i; @@ -560,7 +546,7 @@ static void bgmac_dma_tx_ring_free(struct bgmac *bgmac, static void bgmac_dma_rx_ring_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; struct bgmac_slot_info *slot; int i; @@ -581,7 +567,7 @@ static void bgmac_dma_ring_desc_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring, int num_slots) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; int size; if (!ring->cpu_base) @@ -619,7 +605,7 @@ static void bgmac_dma_free(struct bgmac *bgmac) static int bgmac_dma_alloc(struct bgmac *bgmac) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; struct bgmac_dma_ring *ring; static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1, BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, }; @@ -630,8 +616,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base)); BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base)); - if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) { - bgmac_err(bgmac, "Core does not report 64-bit DMA\n"); + if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) { + dev_err(bgmac->dev, "Core does not report 64-bit DMA\n"); return -ENOTSUPP; } @@ -645,8 +631,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) &ring->dma_base, GFP_KERNEL); if (!ring->cpu_base) { - bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n", - ring->mmio_base); + dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n", + ring->mmio_base); goto err_dma_free; } @@ -670,8 +656,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) &ring->dma_base, GFP_KERNEL); if (!ring->cpu_base) { - bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n", - ring->mmio_base); + dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n", + ring->mmio_base); err = -ENOMEM; goto err_dma_free; } @@ -746,150 +732,6 @@ error: return err; } -/************************************************** - * PHY ops - **************************************************/ - -static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg) -{ - struct bcma_device *core; - u16 phy_access_addr; - u16 phy_ctl_addr; - u32 tmp; - - BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK); - BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK); - BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT); - BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK); - BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT); - BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE); - BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START); - BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK); - BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK); - BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT); - BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE); - - if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) { - core = bgmac->core->bus->drv_gmac_cmn.core; - phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; - phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; - } else { - core = bgmac->core; - phy_access_addr = BGMAC_PHY_ACCESS; - phy_ctl_addr = BGMAC_PHY_CNTL; - } - - tmp = bcma_read32(core, phy_ctl_addr); - tmp &= ~BGMAC_PC_EPA_MASK; - tmp |= phyaddr; - bcma_write32(core, phy_ctl_addr, tmp); - - tmp = BGMAC_PA_START; - tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; - tmp |= reg << BGMAC_PA_REG_SHIFT; - bcma_write32(core, phy_access_addr, tmp); - - if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) { - bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n", - phyaddr, reg); - return 0xffff; - } - - return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK; -} - -/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */ -static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value) -{ - struct bcma_device *core; - u16 phy_access_addr; - u16 phy_ctl_addr; - u32 tmp; - - if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) { - core = bgmac->core->bus->drv_gmac_cmn.core; - phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; - phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; - } else { - core = bgmac->core; - phy_access_addr = BGMAC_PHY_ACCESS; - phy_ctl_addr = BGMAC_PHY_CNTL; - } - - tmp = bcma_read32(core, phy_ctl_addr); - tmp &= ~BGMAC_PC_EPA_MASK; - tmp |= phyaddr; - bcma_write32(core, phy_ctl_addr, tmp); - - bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO); - if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) - bgmac_warn(bgmac, "Error setting MDIO int\n"); - - tmp = BGMAC_PA_START; - tmp |= BGMAC_PA_WRITE; - tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; - tmp |= reg << BGMAC_PA_REG_SHIFT; - tmp |= value; - bcma_write32(core, phy_access_addr, tmp); - - if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) { - bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n", - phyaddr, reg); - return -ETIMEDOUT; - } - - return 0; -} - -/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */ -static void bgmac_phy_init(struct bgmac *bgmac) -{ - struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; - struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; - u8 i; - - if (ci->id == BCMA_CHIP_ID_BCM5356) { - for (i = 0; i < 5; i++) { - bgmac_phy_write(bgmac, i, 0x1f, 0x008b); - bgmac_phy_write(bgmac, i, 0x15, 0x0100); - bgmac_phy_write(bgmac, i, 0x1f, 0x000f); - bgmac_phy_write(bgmac, i, 0x12, 0x2aaa); - bgmac_phy_write(bgmac, i, 0x1f, 0x000b); - } - } - if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) || - (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) || - (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) { - bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0); - bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0); - for (i = 0; i < 5; i++) { - bgmac_phy_write(bgmac, i, 0x1f, 0x000f); - bgmac_phy_write(bgmac, i, 0x16, 0x5284); - bgmac_phy_write(bgmac, i, 0x1f, 0x000b); - bgmac_phy_write(bgmac, i, 0x17, 0x0010); - bgmac_phy_write(bgmac, i, 0x1f, 0x000f); - bgmac_phy_write(bgmac, i, 0x16, 0x5296); - bgmac_phy_write(bgmac, i, 0x17, 0x1073); - bgmac_phy_write(bgmac, i, 0x17, 0x9073); - bgmac_phy_write(bgmac, i, 0x16, 0x52b6); - bgmac_phy_write(bgmac, i, 0x17, 0x9273); - bgmac_phy_write(bgmac, i, 0x1f, 0x000b); - } - } -} - -/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */ -static void bgmac_phy_reset(struct bgmac *bgmac) -{ - if (bgmac->phyaddr == BGMAC_PHY_NOREGS) - return; - - bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET); - udelay(100); - if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET) - bgmac_err(bgmac, "PHY reset failed\n"); - bgmac_phy_init(bgmac); -} /************************************************** * Chip ops @@ -903,14 +745,20 @@ static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set, { u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG); u32 new_val = (cmdcfg & mask) | set; + u32 cmdcfg_sr; + + if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4) + cmdcfg_sr = BGMAC_CMDCFG_SR_REV4; + else + cmdcfg_sr = BGMAC_CMDCFG_SR_REV0; - bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev)); + bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr); udelay(2); if (new_val != cmdcfg || force) bgmac_write(bgmac, BGMAC_CMDCFG, new_val); - bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev)); + bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr); udelay(2); } @@ -939,7 +787,7 @@ static void bgmac_chip_stats_update(struct bgmac *bgmac) { int i; - if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) { + if (!(bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)) { for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++) bgmac->mib_tx_regs[i] = bgmac_read(bgmac, @@ -958,7 +806,7 @@ static void bgmac_clear_mib(struct bgmac *bgmac) { int i; - if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) + if (bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB) return; bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR); @@ -988,7 +836,8 @@ static void bgmac_mac_speed(struct bgmac *bgmac) set |= BGMAC_CMDCFG_ES_2500; break; default: - bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed); + dev_err(bgmac->dev, "Unsupported speed: %d\n", + bgmac->mac_speed); } if (bgmac->mac_duplex == DUPLEX_HALF) @@ -999,17 +848,16 @@ static void bgmac_mac_speed(struct bgmac *bgmac) static void bgmac_miiconfig(struct bgmac *bgmac) { - struct bcma_device *core = bgmac->core; - u8 imode; - - if (bgmac_is_bcm4707_family(bgmac)) { - bcma_awrite32(core, BCMA_IOCTL, - bcma_aread32(core, BCMA_IOCTL) | 0x40 | - BGMAC_BCMA_IOCTL_SW_CLKEN); + if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) { + bgmac_idm_write(bgmac, BCMA_IOCTL, + bgmac_idm_read(bgmac, BCMA_IOCTL) | 0x40 | + BGMAC_BCMA_IOCTL_SW_CLKEN); bgmac->mac_speed = SPEED_2500; bgmac->mac_duplex = DUPLEX_FULL; bgmac_mac_speed(bgmac); } else { + u8 imode; + imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT; if (imode == 0 || imode == 1) { @@ -1023,14 +871,11 @@ static void bgmac_miiconfig(struct bgmac *bgmac) /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */ static void bgmac_chip_reset(struct bgmac *bgmac) { - struct bcma_device *core = bgmac->core; - struct bcma_bus *bus = core->bus; - struct bcma_chipinfo *ci = &bus->chipinfo; - u32 flags; + u32 cmdcfg_sr; u32 iost; int i; - if (bcma_core_is_enabled(core)) { + if (bgmac_clk_enabled(bgmac)) { if (!bgmac->stats_grabbed) { /* bgmac_chip_stats_update(bgmac); */ bgmac->stats_grabbed = true; @@ -1048,38 +893,32 @@ static void bgmac_chip_reset(struct bgmac *bgmac) /* TODO: Clear software multicast filter list */ } - iost = bcma_aread32(core, BCMA_IOST); - if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || - (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || - (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) + iost = bgmac_idm_read(bgmac, BCMA_IOST); + if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED) iost &= ~BGMAC_BCMA_IOST_ATTACHED; /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */ - if (ci->id != BCMA_CHIP_ID_BCM4707 && - ci->id != BCMA_CHIP_ID_BCM47094) { - flags = 0; + if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) { + u32 flags = 0; if (iost & BGMAC_BCMA_IOST_ATTACHED) { flags = BGMAC_BCMA_IOCTL_SW_CLKEN; if (!bgmac->has_robosw) flags |= BGMAC_BCMA_IOCTL_SW_RESET; } - bcma_core_enable(core, flags); + bgmac_clk_enable(bgmac, flags); } /* Request Misc PLL for corerev > 2 */ - if (core->id.rev > 2 && !bgmac_is_bcm4707_family(bgmac)) { + if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) { bgmac_set(bgmac, BCMA_CLKCTLST, BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ); - bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, + bgmac_wait_value(bgmac, BCMA_CLKCTLST, BGMAC_BCMA_CLKCTLST_MISC_PLL_ST, BGMAC_BCMA_CLKCTLST_MISC_PLL_ST, 1000); } - if (ci->id == BCMA_CHIP_ID_BCM5357 || - ci->id == BCMA_CHIP_ID_BCM4749 || - ci->id == BCMA_CHIP_ID_BCM53572) { - struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; + if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_PHY) { u8 et_swtype = 0; u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY | BGMAC_CHIPCTL_1_IF_TYPE_MII; @@ -1087,35 +926,37 @@ static void bgmac_chip_reset(struct bgmac *bgmac) if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) { if (kstrtou8(buf, 0, &et_swtype)) - bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n", - buf); + dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n", + buf); et_swtype &= 0x0f; et_swtype <<= 4; sw_type = et_swtype; - } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) { + } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) { sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII; - } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || - (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || - (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) { + } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) { sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII | BGMAC_CHIPCTL_1_SW_TYPE_RGMII; } - bcma_chipco_chipctl_maskset(cc, 1, - ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK | - BGMAC_CHIPCTL_1_SW_TYPE_MASK), - sw_type); + bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK | + BGMAC_CHIPCTL_1_SW_TYPE_MASK), + sw_type); } if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw) - bcma_awrite32(core, BCMA_IOCTL, - bcma_aread32(core, BCMA_IOCTL) & - ~BGMAC_BCMA_IOCTL_SW_RESET); + bgmac_idm_write(bgmac, BCMA_IOCTL, + bgmac_idm_read(bgmac, BCMA_IOCTL) & + ~BGMAC_BCMA_IOCTL_SW_RESET); /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to * be keps until taking MAC out of the reset. */ + if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4) + cmdcfg_sr = BGMAC_CMDCFG_SR_REV4; + else + cmdcfg_sr = BGMAC_CMDCFG_SR_REV0; + bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE | @@ -1133,19 +974,20 @@ static void bgmac_chip_reset(struct bgmac *bgmac) BGMAC_CMDCFG_PROM | BGMAC_CMDCFG_NLC | BGMAC_CMDCFG_CFE | - BGMAC_CMDCFG_SR(core->id.rev), + cmdcfg_sr, false); bgmac->mac_speed = SPEED_UNKNOWN; bgmac->mac_duplex = DUPLEX_UNKNOWN; bgmac_clear_mib(bgmac); - if (core->id.id == BCMA_CORE_4706_MAC_GBIT) - bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0, - BCMA_GMAC_CMN_PC_MTE); + if (bgmac->feature_flags & BGMAC_FEAT_CMN_PHY_CTL) + bgmac_cmn_maskset32(bgmac, BCMA_GMAC_CMN_PHY_CTL, ~0, + BCMA_GMAC_CMN_PC_MTE); else bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE); bgmac_miiconfig(bgmac); - bgmac_phy_init(bgmac); + if (bgmac->mii_bus) + bgmac->mii_bus->reset(bgmac->mii_bus); netdev_reset_queue(bgmac->net_dev); } @@ -1164,50 +1006,51 @@ static void bgmac_chip_intrs_off(struct bgmac *bgmac) /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */ static void bgmac_enable(struct bgmac *bgmac) { - struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; + u32 cmdcfg_sr; u32 cmdcfg; u32 mode; - u32 rxq_ctl; - u32 fl_ctl; - u16 bp_clk; - u8 mdp; + + if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4) + cmdcfg_sr = BGMAC_CMDCFG_SR_REV4; + else + cmdcfg_sr = BGMAC_CMDCFG_SR_REV0; cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG); bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE), - BGMAC_CMDCFG_SR(bgmac->core->id.rev), true); + cmdcfg_sr, true); udelay(2); cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE; bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg); mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT; - if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0) + if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0) bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); - if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2) - bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0, - BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); - - switch (ci->id) { - case BCMA_CHIP_ID_BCM5357: - case BCMA_CHIP_ID_BCM4749: - case BCMA_CHIP_ID_BCM53572: - case BCMA_CHIP_ID_BCM4716: - case BCMA_CHIP_ID_BCM47162: - fl_ctl = 0x03cb04cb; - if (ci->id == BCMA_CHIP_ID_BCM5357 || - ci->id == BCMA_CHIP_ID_BCM4749 || - ci->id == BCMA_CHIP_ID_BCM53572) + if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2) + bgmac_cco_ctl_maskset(bgmac, 1, ~0, + BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); + + if (bgmac->feature_flags & (BGMAC_FEAT_FLW_CTRL1 | + BGMAC_FEAT_FLW_CTRL2)) { + u32 fl_ctl; + + if (bgmac->feature_flags & BGMAC_FEAT_FLW_CTRL1) fl_ctl = 0x2300e1; + else + fl_ctl = 0x03cb04cb; + bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl); bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff); - break; } - if (!bgmac_is_bcm4707_family(bgmac)) { + if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) { + u32 rxq_ctl; + u16 bp_clk; + u8 mdp; + rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL); rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK; - bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / - 1000000; + bp_clk = bgmac_get_bus_clock(bgmac) / 1000000; mdp = (bp_clk * 128 / 1000) - 3; rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT); bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl); @@ -1251,7 +1094,7 @@ static irqreturn_t bgmac_interrupt(int irq, void *dev_id) int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX); if (int_status) - bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", int_status); + dev_err(bgmac->dev, "Unknown IRQs: 0x%08X\n", int_status); /* Disable new interrupts until handling existing ones */ bgmac_chip_intrs_off(bgmac); @@ -1302,16 +1145,16 @@ static int bgmac_open(struct net_device *net_dev) /* Specs say about reclaiming rings here, but we do that in DMA init */ bgmac_chip_init(bgmac); - err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED, + err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED, KBUILD_MODNAME, net_dev); if (err < 0) { - bgmac_err(bgmac, "IRQ request error: %d!\n", err); + dev_err(bgmac->dev, "IRQ request error: %d!\n", err); bgmac_dma_cleanup(bgmac); return err; } napi_enable(&bgmac->napi); - phy_start(bgmac->phy_dev); + phy_start(net_dev->phydev); netif_start_queue(net_dev); @@ -1324,11 +1167,11 @@ static int bgmac_stop(struct net_device *net_dev) netif_carrier_off(net_dev); - phy_stop(bgmac->phy_dev); + phy_stop(net_dev->phydev); napi_disable(&bgmac->napi); bgmac_chip_intrs_off(bgmac); - free_irq(bgmac->core->irq, net_dev); + free_irq(bgmac->irq, net_dev); bgmac_chip_reset(bgmac); bgmac_dma_cleanup(bgmac); @@ -1362,12 +1205,10 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr) static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) { - struct bgmac *bgmac = netdev_priv(net_dev); - if (!netif_running(net_dev)) return -EINVAL; - return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd); + return phy_mii_ioctl(net_dev->phydev, ifr, cmd); } static const struct net_device_ops bgmac_netdev_ops = { @@ -1384,54 +1225,151 @@ static const struct net_device_ops bgmac_netdev_ops = { * ethtool_ops **************************************************/ -static int bgmac_get_settings(struct net_device *net_dev, - struct ethtool_cmd *cmd) +struct bgmac_stat { + u8 size; + u32 offset; + const char *name; +}; + +static struct bgmac_stat bgmac_get_strings_stats[] = { + { 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" }, + { 4, BGMAC_TX_GOOD_PKTS, "tx_good" }, + { 8, BGMAC_TX_OCTETS, "tx_octets" }, + { 4, BGMAC_TX_PKTS, "tx_pkts" }, + { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" }, + { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" }, + { 4, BGMAC_TX_LEN_64, "tx_64" }, + { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" }, + { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" }, + { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" }, + { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" }, + { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" }, + { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" }, + { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" }, + { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" }, + { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" }, + { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" }, + { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" }, + { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" }, + { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" }, + { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" }, + { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" }, + { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" }, + { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" }, + { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" }, + { 4, BGMAC_TX_DEFERED, "tx_defered" }, + { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" }, + { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" }, + { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" }, + { 4, BGMAC_TX_Q0_PKTS, "tx_q0" }, + { 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" }, + { 4, BGMAC_TX_Q1_PKTS, "tx_q1" }, + { 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" }, + { 4, BGMAC_TX_Q2_PKTS, "tx_q2" }, + { 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" }, + { 4, BGMAC_TX_Q3_PKTS, "tx_q3" }, + { 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" }, + { 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" }, + { 4, BGMAC_RX_GOOD_PKTS, "rx_good" }, + { 8, BGMAC_RX_OCTETS, "rx_octets" }, + { 4, BGMAC_RX_PKTS, "rx_pkts" }, + { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" }, + { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" }, + { 4, BGMAC_RX_LEN_64, "rx_64" }, + { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" }, + { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" }, + { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" }, + { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" }, + { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" }, + { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" }, + { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" }, + { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" }, + { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" }, + { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" }, + { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" }, + { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" }, + { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" }, + { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" }, + { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" }, + { 4, BGMAC_RX_CRC_ERRS, "rx_crc" }, + { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" }, + { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" }, + { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" }, + { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" }, + { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" }, + { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" }, +}; + +#define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats) + +static int bgmac_get_sset_count(struct net_device *dev, int string_set) { - struct bgmac *bgmac = netdev_priv(net_dev); + switch (string_set) { + case ETH_SS_STATS: + return BGMAC_STATS_LEN; + } - return phy_ethtool_gset(bgmac->phy_dev, cmd); + return -EOPNOTSUPP; } -static int bgmac_set_settings(struct net_device *net_dev, - struct ethtool_cmd *cmd) +static void bgmac_get_strings(struct net_device *dev, u32 stringset, + u8 *data) { - struct bgmac *bgmac = netdev_priv(net_dev); + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < BGMAC_STATS_LEN; i++) + strlcpy(data + i * ETH_GSTRING_LEN, + bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN); +} + +static void bgmac_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *ss, uint64_t *data) +{ + struct bgmac *bgmac = netdev_priv(dev); + const struct bgmac_stat *s; + unsigned int i; + u64 val; + + if (!netif_running(dev)) + return; - return phy_ethtool_sset(bgmac->phy_dev, cmd); + for (i = 0; i < BGMAC_STATS_LEN; i++) { + s = &bgmac_get_strings_stats[i]; + val = 0; + if (s->size == 8) + val = (u64)bgmac_read(bgmac, s->offset + 4) << 32; + val |= bgmac_read(bgmac, s->offset); + data[i] = val; + } } static void bgmac_get_drvinfo(struct net_device *net_dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); - strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info)); + strlcpy(info->bus_info, "AXI", sizeof(info->bus_info)); } static const struct ethtool_ops bgmac_ethtool_ops = { - .get_settings = bgmac_get_settings, - .set_settings = bgmac_set_settings, + .get_strings = bgmac_get_strings, + .get_sset_count = bgmac_get_sset_count, + .get_ethtool_stats = bgmac_get_ethtool_stats, .get_drvinfo = bgmac_get_drvinfo, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /************************************************** * MII **************************************************/ -static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum) -{ - return bgmac_phy_read(bus->priv, mii_id, regnum); -} - -static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum, - u16 value) -{ - return bgmac_phy_write(bus->priv, mii_id, regnum, value); -} - static void bgmac_adjust_link(struct net_device *net_dev) { struct bgmac *bgmac = netdev_priv(net_dev); - struct phy_device *phy_dev = bgmac->phy_dev; + struct phy_device *phy_dev = net_dev->phydev; bool update = false; if (phy_dev->link) { @@ -1452,7 +1390,7 @@ static void bgmac_adjust_link(struct net_device *net_dev) } } -static int bgmac_fixed_phy_register(struct bgmac *bgmac) +static int bgmac_phy_connect_direct(struct bgmac *bgmac) { struct fixed_phy_status fphy_status = { .link = 1, @@ -1464,196 +1402,76 @@ static int bgmac_fixed_phy_register(struct bgmac *bgmac) phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL); if (!phy_dev || IS_ERR(phy_dev)) { - bgmac_err(bgmac, "Failed to register fixed PHY device\n"); + dev_err(bgmac->dev, "Failed to register fixed PHY device\n"); return -ENODEV; } err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link, PHY_INTERFACE_MODE_MII); if (err) { - bgmac_err(bgmac, "Connecting PHY failed\n"); + dev_err(bgmac->dev, "Connecting PHY failed\n"); return err; } - bgmac->phy_dev = phy_dev; - return err; } -static int bgmac_mii_register(struct bgmac *bgmac) +static int bgmac_phy_connect(struct bgmac *bgmac) { - struct mii_bus *mii_bus; struct phy_device *phy_dev; char bus_id[MII_BUS_ID_SIZE + 3]; - int err = 0; - - if (bgmac_is_bcm4707_family(bgmac)) - return bgmac_fixed_phy_register(bgmac); - - mii_bus = mdiobus_alloc(); - if (!mii_bus) - return -ENOMEM; - - mii_bus->name = "bgmac mii bus"; - sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num, - bgmac->core->core_unit); - mii_bus->priv = bgmac; - mii_bus->read = bgmac_mii_read; - mii_bus->write = bgmac_mii_write; - mii_bus->parent = &bgmac->core->dev; - mii_bus->phy_mask = ~(1 << bgmac->phyaddr); - - err = mdiobus_register(mii_bus); - if (err) { - bgmac_err(bgmac, "Registration of mii bus failed\n"); - goto err_free_bus; - } - - bgmac->mii_bus = mii_bus; /* Connect to the PHY */ - snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id, + snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id, bgmac->phyaddr); phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phy_dev)) { - bgmac_err(bgmac, "PHY connection failed\n"); - err = PTR_ERR(phy_dev); - goto err_unregister_bus; + dev_err(bgmac->dev, "PHY connecton failed\n"); + return PTR_ERR(phy_dev); } - bgmac->phy_dev = phy_dev; - - return err; -err_unregister_bus: - mdiobus_unregister(mii_bus); -err_free_bus: - mdiobus_free(mii_bus); - return err; -} - -static void bgmac_mii_unregister(struct bgmac *bgmac) -{ - struct mii_bus *mii_bus = bgmac->mii_bus; - - mdiobus_unregister(mii_bus); - mdiobus_free(mii_bus); + return 0; } -/************************************************** - * BCMA bus ops - **************************************************/ - -/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ -static int bgmac_probe(struct bcma_device *core) +int bgmac_enet_probe(struct bgmac *info) { struct net_device *net_dev; struct bgmac *bgmac; - struct ssb_sprom *sprom = &core->bus->sprom; - u8 *mac; int err; - switch (core->core_unit) { - case 0: - mac = sprom->et0mac; - break; - case 1: - mac = sprom->et1mac; - break; - case 2: - mac = sprom->et2mac; - break; - default: - pr_err("Unsupported core_unit %d\n", core->core_unit); - return -ENOTSUPP; - } - - if (!is_valid_ether_addr(mac)) { - dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac); - eth_random_addr(mac); - dev_warn(&core->dev, "Using random MAC: %pM\n", mac); - } - - /* This (reset &) enable is not preset in specs or reference driver but - * Broadcom does it in arch PCI code when enabling fake PCI device. - */ - bcma_core_enable(core, 0); - /* Allocation and references */ net_dev = alloc_etherdev(sizeof(*bgmac)); if (!net_dev) return -ENOMEM; + net_dev->netdev_ops = &bgmac_netdev_ops; - net_dev->irq = core->irq; net_dev->ethtool_ops = &bgmac_ethtool_ops; bgmac = netdev_priv(net_dev); + memcpy(bgmac, info, sizeof(*bgmac)); bgmac->net_dev = net_dev; - bgmac->core = core; - bcma_set_drvdata(core, bgmac); - - /* Defaults */ - memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN); - - /* On BCM4706 we need common core to access PHY */ - if (core->id.id == BCMA_CORE_4706_MAC_GBIT && - !core->bus->drv_gmac_cmn.core) { - bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n"); - err = -ENODEV; - goto err_netdev_free; - } - bgmac->cmn = core->bus->drv_gmac_cmn.core; - - switch (core->core_unit) { - case 0: - bgmac->phyaddr = sprom->et0phyaddr; - break; - case 1: - bgmac->phyaddr = sprom->et1phyaddr; - break; - case 2: - bgmac->phyaddr = sprom->et2phyaddr; - break; - } - bgmac->phyaddr &= BGMAC_PHY_MASK; - if (bgmac->phyaddr == BGMAC_PHY_MASK) { - bgmac_err(bgmac, "No PHY found\n"); - err = -ENODEV; - goto err_netdev_free; + net_dev->irq = bgmac->irq; + SET_NETDEV_DEV(net_dev, bgmac->dev); + + if (!is_valid_ether_addr(bgmac->mac_addr)) { + dev_err(bgmac->dev, "Invalid MAC addr: %pM\n", + bgmac->mac_addr); + eth_random_addr(bgmac->mac_addr); + dev_warn(bgmac->dev, "Using random MAC: %pM\n", + bgmac->mac_addr); } - bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr, - bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : ""); + ether_addr_copy(net_dev->dev_addr, bgmac->mac_addr); - if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) { - bgmac_err(bgmac, "PCI setup not implemented\n"); - err = -ENOTSUPP; - goto err_netdev_free; - } + /* This (reset &) enable is not preset in specs or reference driver but + * Broadcom does it in arch PCI code when enabling fake PCI device. + */ + bgmac_clk_enable(bgmac, 0); bgmac_chip_reset(bgmac); - /* For Northstar, we have to take all GMAC core out of reset */ - if (bgmac_is_bcm4707_family(bgmac)) { - struct bcma_device *ns_core; - int ns_gmac; - - /* Northstar has 4 GMAC cores */ - for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) { - /* As Northstar requirement, we have to reset all GMACs - * before accessing one. bgmac_chip_reset() call - * bcma_core_enable() for this core. Then the other - * three GMACs didn't reset. We do it here. - */ - ns_core = bcma_find_core_unit(core->bus, - BCMA_CORE_MAC_GBIT, - ns_gmac); - if (ns_core && !bcma_core_is_enabled(ns_core)) - bcma_core_enable(ns_core, 0); - } - } - err = bgmac_dma_alloc(bgmac); if (err) { - bgmac_err(bgmac, "Unable to alloc memory for DMA\n"); + dev_err(bgmac->dev, "Unable to alloc memory for DMA\n"); goto err_netdev_free; } @@ -1661,22 +1479,14 @@ static int bgmac_probe(struct bcma_device *core) if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0) bgmac->int_mask &= ~BGMAC_IS_TX_MASK; - /* TODO: reset the external phy. Specs are needed */ - bgmac_phy_reset(bgmac); - - bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo & - BGMAC_BFL_ENETROBO); - if (bgmac->has_robosw) - bgmac_warn(bgmac, "Support for Roboswitch not implemented\n"); - - if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) - bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n"); - netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); - err = bgmac_mii_register(bgmac); + if (!bgmac->mii_bus) + err = bgmac_phy_connect_direct(bgmac); + else + err = bgmac_phy_connect(bgmac); if (err) { - bgmac_err(bgmac, "Cannot register MDIO\n"); + dev_err(bgmac->dev, "Cannot connect to phy\n"); goto err_dma_free; } @@ -1686,64 +1496,34 @@ static int bgmac_probe(struct bcma_device *core) err = register_netdev(bgmac->net_dev); if (err) { - bgmac_err(bgmac, "Cannot register net device\n"); - goto err_mii_unregister; + dev_err(bgmac->dev, "Cannot register net device\n"); + goto err_phy_disconnect; } netif_carrier_off(net_dev); return 0; -err_mii_unregister: - bgmac_mii_unregister(bgmac); +err_phy_disconnect: + phy_disconnect(net_dev->phydev); err_dma_free: bgmac_dma_free(bgmac); - err_netdev_free: - bcma_set_drvdata(core, NULL); free_netdev(net_dev); return err; } +EXPORT_SYMBOL_GPL(bgmac_enet_probe); -static void bgmac_remove(struct bcma_device *core) +void bgmac_enet_remove(struct bgmac *bgmac) { - struct bgmac *bgmac = bcma_get_drvdata(core); - unregister_netdev(bgmac->net_dev); - bgmac_mii_unregister(bgmac); + phy_disconnect(bgmac->net_dev->phydev); netif_napi_del(&bgmac->napi); bgmac_dma_free(bgmac); - bcma_set_drvdata(core, NULL); free_netdev(bgmac->net_dev); } - -static struct bcma_driver bgmac_bcma_driver = { - .name = KBUILD_MODNAME, - .id_table = bgmac_bcma_tbl, - .probe = bgmac_probe, - .remove = bgmac_remove, -}; - -static int __init bgmac_init(void) -{ - int err; - - err = bcma_driver_register(&bgmac_bcma_driver); - if (err) - return err; - pr_info("Broadcom 47xx GBit MAC driver loaded\n"); - - return 0; -} - -static void __exit bgmac_exit(void) -{ - bcma_driver_unregister(&bgmac_bcma_driver); -} - -module_init(bgmac_init) -module_exit(bgmac_exit) +EXPORT_SYMBOL_GPL(bgmac_enet_remove); MODULE_AUTHOR("RafaÅ‚ MiÅ‚ecki"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 9a03c142b..24a250267 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -1,19 +1,6 @@ #ifndef _BGMAC_H #define _BGMAC_H -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#define bgmac_err(bgmac, fmt, ...) \ - dev_err(&(bgmac)->core->dev, fmt, ##__VA_ARGS__) -#define bgmac_warn(bgmac, fmt, ...) \ - dev_warn(&(bgmac)->core->dev, fmt, ##__VA_ARGS__) -#define bgmac_info(bgmac, fmt, ...) \ - dev_info(&(bgmac)->core->dev, fmt, ##__VA_ARGS__) -#define bgmac_dbg(bgmac, fmt, ...) \ - dev_dbg(&(bgmac)->core->dev, fmt, ##__VA_ARGS__) - -#include -#include #include #define BGMAC_DEV_CTL 0x000 @@ -123,7 +110,7 @@ #define BGMAC_TX_LEN_1024_TO_1522 0x334 #define BGMAC_TX_LEN_1523_TO_2047 0x338 #define BGMAC_TX_LEN_2048_TO_4095 0x33c -#define BGMAC_TX_LEN_4095_TO_8191 0x340 +#define BGMAC_TX_LEN_4096_TO_8191 0x340 #define BGMAC_TX_LEN_8192_TO_MAX 0x344 #define BGMAC_TX_JABBER_PKTS 0x348 /* Error */ #define BGMAC_TX_OVERSIZE_PKTS 0x34c /* Error */ @@ -166,7 +153,7 @@ #define BGMAC_RX_LEN_1024_TO_1522 0x3e4 #define BGMAC_RX_LEN_1523_TO_2047 0x3e8 #define BGMAC_RX_LEN_2048_TO_4095 0x3ec -#define BGMAC_RX_LEN_4095_TO_8191 0x3f0 +#define BGMAC_RX_LEN_4096_TO_8191 0x3f0 #define BGMAC_RX_LEN_8192_TO_MAX 0x3f4 #define BGMAC_RX_JABBER_PKTS 0x3f8 /* Error */ #define BGMAC_RX_OVERSIZE_PKTS 0x3fc /* Error */ @@ -201,7 +188,6 @@ #define BGMAC_CMDCFG_HD_SHIFT 10 #define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */ #define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */ -#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0) #define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */ #define BGMAC_CMDCFG_AE 0x00400000 #define BGMAC_CMDCFG_CFE 0x00800000 @@ -387,6 +373,24 @@ #define ETHER_MAX_LEN 1518 +/* Feature Flags */ +#define BGMAC_FEAT_TX_MASK_SETUP BIT(0) +#define BGMAC_FEAT_RX_MASK_SETUP BIT(1) +#define BGMAC_FEAT_IOST_ATTACHED BIT(2) +#define BGMAC_FEAT_NO_RESET BIT(3) +#define BGMAC_FEAT_MISC_PLL_REQ BIT(4) +#define BGMAC_FEAT_SW_TYPE_PHY BIT(5) +#define BGMAC_FEAT_SW_TYPE_EPHYRMII BIT(6) +#define BGMAC_FEAT_SW_TYPE_RGMII BIT(7) +#define BGMAC_FEAT_CMN_PHY_CTL BIT(8) +#define BGMAC_FEAT_FLW_CTRL1 BIT(9) +#define BGMAC_FEAT_FLW_CTRL2 BIT(10) +#define BGMAC_FEAT_SET_RXQ_CLK BIT(11) +#define BGMAC_FEAT_CLKCTLST BIT(12) +#define BGMAC_FEAT_NO_CLR_MIB BIT(13) +#define BGMAC_FEAT_FORCE_SPEED_2500 BIT(14) +#define BGMAC_FEAT_CMDCFG_SR_REV4 BIT(15) + struct bgmac_slot_info { union { struct sk_buff *skb; @@ -436,12 +440,26 @@ struct bgmac_rx_header { }; struct bgmac { - struct bcma_device *core; - struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */ + union { + struct { + void *base; + void *idm_base; + } plat; + struct { + struct bcma_device *core; + /* Reference to CMN core for BCM4706 */ + struct bcma_device *cmn; + } bcma; + }; + + struct device *dev; + struct device *dma_dev; + unsigned char mac_addr[ETH_ALEN]; + u32 feature_flags; + struct net_device *net_dev; struct napi_struct napi; struct mii_bus *mii_bus; - struct phy_device *phy_dev; /* DMA */ struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS]; @@ -453,6 +471,7 @@ struct bgmac { u32 mib_rx_regs[BGMAC_NUM_MIB_RX_REGS]; /* Int */ + int irq; u32 int_mask; /* Current MAC state */ @@ -463,16 +482,71 @@ struct bgmac { bool has_robosw; bool loopback; + + u32 (*read)(struct bgmac *bgmac, u16 offset); + void (*write)(struct bgmac *bgmac, u16 offset, u32 value); + u32 (*idm_read)(struct bgmac *bgmac, u16 offset); + void (*idm_write)(struct bgmac *bgmac, u16 offset, u32 value); + bool (*clk_enabled)(struct bgmac *bgmac); + void (*clk_enable)(struct bgmac *bgmac, u32 flags); + void (*cco_ctl_maskset)(struct bgmac *bgmac, u32 offset, u32 mask, + u32 set); + u32 (*get_bus_clock)(struct bgmac *bgmac); + void (*cmn_maskset32)(struct bgmac *bgmac, u16 offset, u32 mask, + u32 set); }; +int bgmac_enet_probe(struct bgmac *info); +void bgmac_enet_remove(struct bgmac *bgmac); + +struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr); +void bcma_mdio_mii_unregister(struct mii_bus *mii_bus); + static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset) { - return bcma_read32(bgmac->core, offset); + return bgmac->read(bgmac, offset); } static inline void bgmac_write(struct bgmac *bgmac, u16 offset, u32 value) { - bcma_write32(bgmac->core, offset, value); + bgmac->write(bgmac, offset, value); +} + +static inline u32 bgmac_idm_read(struct bgmac *bgmac, u16 offset) +{ + return bgmac->idm_read(bgmac, offset); +} + +static inline void bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value) +{ + bgmac->idm_write(bgmac, offset, value); +} + +static inline bool bgmac_clk_enabled(struct bgmac *bgmac) +{ + return bgmac->clk_enabled(bgmac); +} + +static inline void bgmac_clk_enable(struct bgmac *bgmac, u32 flags) +{ + bgmac->clk_enable(bgmac, flags); +} + +static inline void bgmac_cco_ctl_maskset(struct bgmac *bgmac, u32 offset, + u32 mask, u32 set) +{ + bgmac->cco_ctl_maskset(bgmac, offset, mask, set); +} + +static inline u32 bgmac_get_bus_clock(struct bgmac *bgmac) +{ + return bgmac->get_bus_clock(bgmac); +} + +static inline void bgmac_cmn_maskset32(struct bgmac *bgmac, u16 offset, + u32 mask, u32 set) +{ + bgmac->cmn_maskset32(bgmac, offset, mask, set); } static inline void bgmac_maskset(struct bgmac *bgmac, u16 offset, u32 mask, @@ -490,5 +564,4 @@ static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set) { bgmac_maskset(bgmac, offset, ~0, set); } - #endif /* _BGMAC_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 8e1231521..8c2220b59 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -6352,10 +6352,6 @@ bnx2_open(struct net_device *dev) struct bnx2 *bp = netdev_priv(dev); int rc; - rc = bnx2_request_firmware(bp); - if (rc < 0) - goto out; - netif_carrier_off(dev); bnx2_disable_int(bp); @@ -6424,7 +6420,6 @@ open_err: bnx2_free_irq(bp); bnx2_free_mem(bp); bnx2_del_napi(bp); - bnx2_release_firmware(bp); goto out; } @@ -8571,6 +8566,12 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); + rc = bnx2_request_firmware(bp); + if (rc < 0) + goto error; + + + bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | @@ -8603,6 +8604,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; error: + bnx2_release_firmware(bp); pci_iounmap(pdev, bp->regview); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 9089404bb..e447c8ef4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -59,9 +59,6 @@ #include #include #include -#if IS_ENABLED(CONFIG_BNX2X_GENEVE) -#include -#endif #include "bnx2x.h" #include "bnx2x_init.h" #include "bnx2x_init_ops.h" @@ -769,6 +766,11 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) (bp->common.bc_ver & 0xff00) >> 8, (bp->common.bc_ver & 0xff)); + if (pci_channel_offline(bp->pdev)) { + BNX2X_ERR("Cannot dump MCP info while in PCI error\n"); + return; + } + val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); @@ -9412,10 +9414,16 @@ unload_error: /* Release IRQs */ bnx2x_free_irq(bp); - /* Reset the chip */ - rc = bnx2x_reset_hw(bp, reset_code); - if (rc) - BNX2X_ERR("HW_RESET failed\n"); + /* Reset the chip, unless PCI function is offline. If we reach this + * point following a PCI error handling, it means device is really + * in a bad state and we're about to remove it, so reset the chip + * is not a good idea. + */ + if (!pci_channel_offline(bp->pdev)) { + rc = bnx2x_reset_hw(bp, reset_code); + if (rc) + BNX2X_ERR("HW_RESET failed\n"); + } /* Report UNLOAD_DONE to MCP */ bnx2x_send_unload_done(bp, keep_link); @@ -10070,7 +10078,6 @@ static void bnx2x_parity_recover(struct bnx2x *bp) } } -#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE) static int bnx2x_udp_port_update(struct bnx2x *bp) { struct bnx2x_func_switch_update_params *switch_update_params; @@ -10171,47 +10178,42 @@ static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port, DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n", type, port); } -#endif - -#ifdef CONFIG_BNX2X_VXLAN -static void bnx2x_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) -{ - struct bnx2x *bp = netdev_priv(netdev); - u16 t_port = ntohs(port); - - __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN); -} - -static void bnx2x_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) -{ - struct bnx2x *bp = netdev_priv(netdev); - u16 t_port = ntohs(port); - - __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN); -} -#endif -#if IS_ENABLED(CONFIG_BNX2X_GENEVE) -static void bnx2x_add_geneve_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) +static void bnx2x_udp_tunnel_add(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct bnx2x *bp = netdev_priv(netdev); - u16 t_port = ntohs(port); + u16 t_port = ntohs(ti->port); - __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN); + break; + case UDP_TUNNEL_TYPE_GENEVE: + __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); + break; + default: + break; + } } -static void bnx2x_del_geneve_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) +static void bnx2x_udp_tunnel_del(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct bnx2x *bp = netdev_priv(netdev); - u16 t_port = ntohs(port); + u16 t_port = ntohs(ti->port); - __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN); + break; + case UDP_TUNNEL_TYPE_GENEVE: + __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); + break; + default: + break; + } } -#endif static int bnx2x_close(struct net_device *dev); @@ -10319,7 +10321,6 @@ sp_rtnl_not_reset: &bp->sp_rtnl_state)) bnx2x_update_mng_version(bp); -#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE) if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT, &bp->sp_rtnl_state)) { if (bnx2x_udp_port_update(bp)) { @@ -10329,20 +10330,14 @@ sp_rtnl_not_reset: BNX2X_UDP_PORT_MAX); } else { /* Since we don't store additional port information, - * if no port is configured for any feature ask for + * if no ports are configured for any feature ask for * information about currently configured ports. */ -#ifdef CONFIG_BNX2X_VXLAN - if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) - vxlan_get_rx_port(bp->dev); -#endif -#if IS_ENABLED(CONFIG_BNX2X_GENEVE) - if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) - geneve_get_rx_port(bp->dev); -#endif + if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count && + !bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) + udp_tunnel_get_rx_info(bp->dev); } } -#endif /* work which needs rtnl lock not-taken (as it takes the lock itself and * can be called from other contexts as well) @@ -12545,14 +12540,8 @@ static int bnx2x_open(struct net_device *dev) if (rc) return rc; -#ifdef CONFIG_BNX2X_VXLAN if (IS_PF(bp)) - vxlan_get_rx_port(dev); -#endif -#if IS_ENABLED(CONFIG_BNX2X_GENEVE) - if (IS_PF(bp)) - geneve_get_rx_port(dev); -#endif + udp_tunnel_get_rx_info(dev); return 0; } @@ -13039,14 +13028,8 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_get_phys_port_id = bnx2x_get_phys_port_id, .ndo_set_vf_link_state = bnx2x_set_vf_link_state, .ndo_features_check = bnx2x_features_check, -#ifdef CONFIG_BNX2X_VXLAN - .ndo_add_vxlan_port = bnx2x_add_vxlan_port, - .ndo_del_vxlan_port = bnx2x_del_vxlan_port, -#endif -#if IS_ENABLED(CONFIG_BNX2X_GENEVE) - .ndo_add_geneve_port = bnx2x_add_geneve_port, - .ndo_del_geneve_port = bnx2x_del_geneve_port, -#endif + .ndo_udp_tunnel_add = bnx2x_udp_tunnel_add, + .ndo_udp_tunnel_del = bnx2x_udp_tunnel_del, }; static int bnx2x_set_coherency_mask(struct bnx2x *bp) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e655b76e8..228c964e7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -37,9 +37,7 @@ #include #include #include -#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE) -#include -#endif +#include #ifdef CONFIG_NET_RX_BUSY_POLL #include #endif @@ -75,12 +73,32 @@ enum board_idx { BCM57301, BCM57302, BCM57304, + BCM57417_NPAR, + BCM58700, + BCM57311, + BCM57312, BCM57402, BCM57404, BCM57406, + BCM57402_NPAR, + BCM57407, + BCM57412, + BCM57414, + BCM57416, + BCM57417, + BCM57412_NPAR, BCM57314, + BCM57417_SFP, + BCM57416_SFP, + BCM57404_NPAR, + BCM57406_NPAR, + BCM57407_SFP, + BCM57414_NPAR, + BCM57416_NPAR, BCM57304_VF, BCM57404_VF, + BCM57414_VF, + BCM57314_VF, }; /* indexed by enum above */ @@ -90,25 +108,65 @@ static const struct { { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" }, { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" }, { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, + { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, + { "Broadcom BCM58700 Nitro 4-port 1Gb/2.5Gb/10Gb Ethernet" }, + { "Broadcom BCM57311 NetXtreme-C Single-port 10Gb Ethernet" }, + { "Broadcom BCM57312 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" }, { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" }, { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" }, { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" }, + { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, + { "Broadcom BCM57407 NetXtreme-E Dual-port 10GBase-T Ethernet" }, + { "Broadcom BCM57412 NetXtreme-E Dual-port 10Gb Ethernet" }, + { "Broadcom BCM57414 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" }, + { "Broadcom BCM57416 NetXtreme-E Dual-port 10GBase-T Ethernet" }, + { "Broadcom BCM57417 NetXtreme-E Dual-port 10GBase-T Ethernet" }, + { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, { "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, + { "Broadcom BCM57417 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" }, + { "Broadcom BCM57416 NetXtreme-E Dual-port 10Gb Ethernet" }, + { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, + { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, + { "Broadcom BCM57407 NetXtreme-E Dual-port 25Gb Ethernet" }, + { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, + { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" }, { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" }, + { "Broadcom BCM57414 NetXtreme-E Ethernet Virtual Function" }, + { "Broadcom BCM57314 NetXtreme-E Ethernet Virtual Function" }, }; static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, + { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, + { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, + { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, + { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, + { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, + { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, + { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, + { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, + { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, + { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, + { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, + { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, + { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, #ifdef CONFIG_BNXT_SRIOV { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF }, { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF }, + { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = BCM57414_VF }, + { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = BCM57314_VF }, #endif { 0 } }; @@ -125,12 +183,14 @@ static const u16 bnxt_async_events_arr[] = { HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, + HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, }; static bool bnxt_vf_pciid(enum board_idx idx) { - return (idx == BCM57304_VF || idx == BCM57404_VF); + return (idx == BCM57304_VF || idx == BCM57404_VF || + idx == BCM57314_VF || idx == BCM57414_VF); } #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) @@ -920,6 +980,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, } tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); + tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); rxr->rx_prod = NEXT_RX(prod); cons = NEXT_RX(cons); @@ -938,32 +999,102 @@ static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi, bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); } +static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, + int payload_off, int tcp_ts, + struct sk_buff *skb) +{ +#ifdef CONFIG_INET + struct tcphdr *th; + int len, nw_off; + u16 outer_ip_off, inner_ip_off, inner_mac_off; + u32 hdr_info = tpa_info->hdr_info; + bool loopback = false; + + inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); + inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); + outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); + + /* If the packet is an internal loopback packet, the offsets will + * have an extra 4 bytes. + */ + if (inner_mac_off == 4) { + loopback = true; + } else if (inner_mac_off > 4) { + __be16 proto = *((__be16 *)(skb->data + inner_ip_off - + ETH_HLEN - 2)); + + /* We only support inner iPv4/ipv6. If we don't see the + * correct protocol ID, it must be a loopback packet where + * the offsets are off by 4. + */ + if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) + loopback = true; + } + if (loopback) { + /* internal loopback packet, subtract all offsets by 4 */ + inner_ip_off -= 4; + inner_mac_off -= 4; + outer_ip_off -= 4; + } + + nw_off = inner_ip_off - ETH_HLEN; + skb_set_network_header(skb, nw_off); + if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { + struct ipv6hdr *iph = ipv6_hdr(skb); + + skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); + len = skb->len - skb_transport_offset(skb); + th = tcp_hdr(skb); + th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); + } else { + struct iphdr *iph = ip_hdr(skb); + + skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); + len = skb->len - skb_transport_offset(skb); + th = tcp_hdr(skb); + th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); + } + + if (inner_mac_off) { /* tunnel */ + struct udphdr *uh = NULL; + __be16 proto = *((__be16 *)(skb->data + outer_ip_off - + ETH_HLEN - 2)); + + if (proto == htons(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)skb->data; + + if (iph->protocol == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } else { + struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; + + if (iph->nexthdr == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } + if (uh) { + if (uh->check) + skb_shinfo(skb)->gso_type |= + SKB_GSO_UDP_TUNNEL_CSUM; + else + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; + } + } +#endif + return skb; +} + #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) -static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info, - struct rx_tpa_end_cmp *tpa_end, - struct rx_tpa_end_cmp_ext *tpa_end1, +static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, + int payload_off, int tcp_ts, struct sk_buff *skb) { #ifdef CONFIG_INET struct tcphdr *th; - int payload_off, tcp_opt_len = 0; - int len, nw_off; - u16 segs; - - segs = TPA_END_TPA_SEGS(tpa_end); - if (segs == 1) - return skb; + int len, nw_off, tcp_opt_len; - NAPI_GRO_CB(skb)->count = segs; - skb_shinfo(skb)->gso_size = - le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); - skb_shinfo(skb)->gso_type = tpa_info->gso_type; - payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & - RX_TPA_END_CMP_PAYLOAD_OFFSET) >> - RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; - if (TPA_END_GRO_TS(tpa_end)) + if (tcp_ts) tcp_opt_len = 12; if (tpa_info->gso_type == SKB_GSO_TCPV4) { @@ -1020,6 +1151,32 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info, return skb; } +static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, + struct bnxt_tpa_info *tpa_info, + struct rx_tpa_end_cmp *tpa_end, + struct rx_tpa_end_cmp_ext *tpa_end1, + struct sk_buff *skb) +{ +#ifdef CONFIG_INET + int payload_off; + u16 segs; + + segs = TPA_END_TPA_SEGS(tpa_end); + if (segs == 1) + return skb; + + NAPI_GRO_CB(skb)->count = segs; + skb_shinfo(skb)->gso_size = + le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); + skb_shinfo(skb)->gso_type = tpa_info->gso_type; + payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & + RX_TPA_END_CMP_PAYLOAD_OFFSET) >> + RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; + skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); +#endif + return skb; +} + static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, @@ -1130,7 +1287,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } if (TPA_END_GRO(tpa_end)) - skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb); + skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); return skb; } @@ -1358,6 +1515,11 @@ static int bnxt_async_event_process(struct bnxt *bp, set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); break; } + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: + if (BNXT_PF(bp)) + goto async_event_process_exit; + set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); + break; default: netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n", event_id); @@ -1536,6 +1698,76 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) return rx_pkts; } +static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) +{ + struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); + struct bnxt *bp = bnapi->bp; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + struct tx_cmp *txcmp; + struct rx_cmp_ext *rxcmp1; + u32 cp_cons, tmp_raw_cons; + u32 raw_cons = cpr->cp_raw_cons; + u32 rx_pkts = 0; + bool agg_event = false; + + while (1) { + int rc; + + cp_cons = RING_CMP(raw_cons); + txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + if (!TX_CMP_VALID(txcmp, raw_cons)) + break; + + if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { + tmp_raw_cons = NEXT_RAW_CMP(raw_cons); + cp_cons = RING_CMP(tmp_raw_cons); + rxcmp1 = (struct rx_cmp_ext *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) + break; + + /* force an error to recycle the buffer */ + rxcmp1->rx_cmp_cfa_code_errors_v2 |= + cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); + + rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event); + if (likely(rc == -EIO)) + rx_pkts++; + else if (rc == -EBUSY) /* partial completion */ + break; + } else if (unlikely(TX_CMP_TYPE(txcmp) == + CMPL_BASE_TYPE_HWRM_DONE)) { + bnxt_hwrm_handler(bp, txcmp); + } else { + netdev_err(bp->dev, + "Invalid completion received on special ring\n"); + } + raw_cons = NEXT_RAW_CMP(raw_cons); + + if (rx_pkts == budget) + break; + } + + cpr->cp_raw_cons = raw_cons; + BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); + writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); + + if (agg_event) { + writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); + writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); + } + + if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { + napi_complete(napi); + BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + } + return rx_pkts; +} + static int bnxt_poll(struct napi_struct *napi, int budget) { struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); @@ -2208,6 +2440,9 @@ static int bnxt_alloc_vnics(struct bnxt *bp) num_vnics += bp->rx_nr_rings; #endif + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + num_vnics++; + bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), GFP_KERNEL); if (!bp->vnic_info) @@ -2225,7 +2460,8 @@ static void bnxt_init_vnics(struct bnxt *bp) struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; vnic->fw_vnic_id = INVALID_HW_RING_ID; - vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; + vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; + vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; if (bp->vnic_info[i].rss_hash_key) { @@ -2262,7 +2498,7 @@ static void bnxt_set_tpa_flags(struct bnxt *bp) bp->flags &= ~BNXT_FLAG_TPA; if (bp->dev->features & NETIF_F_LRO) bp->flags |= BNXT_FLAG_LRO; - if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0)) + if (bp->dev->features & NETIF_F_GRO) bp->flags |= BNXT_FLAG_GRO; } @@ -2529,7 +2765,7 @@ static int bnxt_alloc_stats(struct bnxt *bp) cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; } - if (BNXT_PF(bp)) { + if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) { bp->hw_port_stats_size = sizeof(struct rx_port_stats) + sizeof(struct tx_port_stats) + 1024; @@ -3031,7 +3267,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); - req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0]; + req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); @@ -3068,8 +3304,10 @@ static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); - req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX | - CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); + req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); + if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) + req.flags |= + cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); req.enables = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | @@ -3176,7 +3414,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_rss_cfg_input req = {0}; - if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID) + if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); @@ -3188,10 +3426,14 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) req.hash_type = cpu_to_le32(vnic->hash_type); - if (vnic->flags & BNXT_VNIC_RSS_FLAG) - max_rings = bp->rx_nr_rings; - else + if (vnic->flags & BNXT_VNIC_RSS_FLAG) { + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + max_rings = bp->rx_nr_rings - 1; + else + max_rings = bp->rx_nr_rings; + } else { max_rings = 1; + } /* Fill the RSS indirection table with ring group ids */ for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { @@ -3204,7 +3446,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); } - req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx); + req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } @@ -3227,32 +3469,35 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } -static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id) +static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, + u16 ctx_idx) { struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); req.rss_cos_lb_ctx_id = - cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx); + cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; + bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; } static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) { - int i; + int i, j; for (i = 0; i < bp->nr_vnics; i++) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; - if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID) - bnxt_hwrm_vnic_ctx_free_one(bp, i); + for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { + if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) + bnxt_hwrm_vnic_ctx_free_one(bp, i, j); + } } bp->rsscos_nr_ctxs = 0; } -static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id) +static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) { int rc; struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; @@ -3265,7 +3510,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id) mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = + bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = le16_to_cpu(resp->rss_cos_lb_ctx_id); mutex_unlock(&bp->hwrm_cmd_lock); @@ -3277,17 +3522,34 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) unsigned int ring = 0, grp_idx; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_cfg_input req = {0}; + u16 def_vlan = 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); + + req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); /* Only RSS support for now TBD: COS & LB */ - req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP | - VNIC_CFG_REQ_ENABLES_RSS_RULE); - req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx); - req.cos_rule = cpu_to_le16(0xffff); + if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { + req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); + req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | + VNIC_CFG_REQ_ENABLES_MRU); + } else { + req.rss_rule = cpu_to_le16(0xffff); + } + + if (BNXT_CHIP_TYPE_NITRO_A0(bp) && + (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { + req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); + req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); + } else { + req.cos_rule = cpu_to_le16(0xffff); + } + if (vnic->flags & BNXT_VNIC_RSS_FLAG) ring = 0; else if (vnic->flags & BNXT_VNIC_RFS_FLAG) ring = vnic_id - 1; + else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) + ring = bp->rx_nr_rings - 1; grp_idx = bp->rx_ring[ring].bnapi->index; req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); @@ -3297,7 +3559,11 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); - if (bp->flags & BNXT_FLAG_STRIP_VLAN) +#ifdef CONFIG_BNXT_SRIOV + if (BNXT_VF(bp)) + def_vlan = bp->vf.vlan; +#endif + if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -3351,7 +3617,8 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, bp->grp_info[grp_idx].fw_grp_id; } - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; + bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; + bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; if (vnic_id == 0) req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); @@ -3784,6 +4051,9 @@ static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) if (!bp->bnapi) return 0; + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + return 0; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); mutex_lock(&bp->hwrm_cmd_lock); @@ -3812,9 +4082,12 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) struct hwrm_stat_ctx_alloc_input req = {0}; struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + return 0; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); - req.update_period_ms = cpu_to_le32(1000); + req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < bp->cp_nr_rings; i++) { @@ -3836,6 +4109,39 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) return 0; } +static int bnxt_hwrm_func_qcfg(struct bnxt *bp) +{ + struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto func_qcfg_exit; + +#ifdef CONFIG_BNXT_SRIOV + if (BNXT_VF(bp)) { + struct bnxt_vf_info *vf = &bp->vf; + + vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; + } +#endif + switch (resp->port_partition_type) { + case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: + case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: + case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: + bp->port_partition_type = resp->port_partition_type; + break; + } + +func_qcfg_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + int bnxt_hwrm_func_qcaps(struct bnxt *bp) { int rc = 0; @@ -3855,6 +4161,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) pf->fw_fid = le16_to_cpu(resp->fid); pf->port_id = le16_to_cpu(resp->port_id); + bp->dev->dev_port = pf->port_id; memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); @@ -3990,6 +4297,11 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) if (resp->hwrm_intf_maj >= 1) bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); + bp->chip_num = le16_to_cpu(resp->chip_num); + if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && + !resp->chip_metal) + bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; + hwrm_ver_get_exit: mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -4078,7 +4390,7 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) int rc; /* allocate context for vnic */ - rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id); + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); if (rc) { netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", vnic_id, rc); @@ -4086,6 +4398,16 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) } bp->rsscos_nr_ctxs++; + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", + vnic_id, rc); + goto vnic_setup_err; + } + bp->rsscos_nr_ctxs++; + } + /* configure default vnic, ring grp */ rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); if (rc) { @@ -4143,6 +4465,36 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) #endif } +/* Allow PF and VF with default VLAN to be in promiscuous mode */ +static bool bnxt_promisc_ok(struct bnxt *bp) +{ +#ifdef CONFIG_BNXT_SRIOV + if (BNXT_VF(bp) && !bp->vf.vlan) + return false; +#endif + return true; +} + +static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) +{ + unsigned int rc = 0; + + rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); + if (rc) { + netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", + rc); + return rc; + } + + rc = bnxt_hwrm_vnic_cfg(bp, 1); + if (rc) { + netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", + rc); + return rc; + } + return rc; +} + static int bnxt_cfg_rx_mode(struct bnxt *); static bool bnxt_mc_list_updated(struct bnxt *, u32 *); @@ -4150,6 +4502,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) { struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; int rc = 0; + unsigned int rx_nr_rings = bp->rx_nr_rings; if (irq_re_init) { rc = bnxt_hwrm_stat_ctx_alloc(bp); @@ -4172,8 +4525,11 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) goto err_out; } + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + rx_nr_rings--; + /* default vnic 0 */ - rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings); + rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); if (rc) { netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); goto err_out; @@ -4208,7 +4564,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; - if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp)) + if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; if (bp->dev->flags & IFF_ALLMULTI) { @@ -4228,7 +4584,19 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) rc = bnxt_hwrm_set_coal(bp); if (rc) netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", - rc); + rc); + + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { + rc = bnxt_setup_nitroa0_vnic(bp); + if (rc) + netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", + rc); + } + + if (BNXT_VF(bp)) { + bnxt_hwrm_func_qcfg(bp); + netdev_update_features(bp->dev); + } return 0; @@ -4532,14 +4900,23 @@ static void bnxt_del_napi(struct bnxt *bp) static void bnxt_init_napi(struct bnxt *bp) { int i; + unsigned int cp_nr_rings = bp->cp_nr_rings; struct bnxt_napi *bnapi; if (bp->flags & BNXT_FLAG_USING_MSIX) { - for (i = 0; i < bp->cp_nr_rings; i++) { + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + cp_nr_rings--; + for (i = 0; i < cp_nr_rings; i++) { bnapi = bp->bnapi[i]; netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); } + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { + bnapi = bp->bnapi[cp_nr_rings]; + netif_napi_add(bp->dev, &bnapi->napi, + bnxt_poll_nitroa0, 64); + napi_hash_add(&bnapi->napi); + } } else { bnapi = bp->bnapi[0]; netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); @@ -4580,9 +4957,7 @@ static void bnxt_tx_disable(struct bnxt *bp) for (i = 0; i < bp->tx_nr_rings; i++) { txr = &bp->tx_ring[i]; txq = netdev_get_tx_queue(bp->dev, i); - __netif_tx_lock(txq, smp_processor_id()); txr->dev_state = BNXT_DEV_STATE_CLOSING; - __netif_tx_unlock(txq); } } /* Stop all TX queues */ @@ -4644,6 +5019,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) int rc = 0; struct hwrm_port_phy_qcaps_input req = {0}; struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_link_info *link_info = &bp->link_info; if (bp->hwrm_spec_code < 0x10201) return 0; @@ -4666,6 +5042,8 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; } + link_info->support_auto_speeds = + le16_to_cpu(resp->supported_speeds_auto_mode); hwrm_phy_qcaps_exit: mutex_unlock(&bp->hwrm_cmd_lock); @@ -4923,7 +5301,7 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp) { struct hwrm_port_phy_cfg_input req = {0}; - if (BNXT_VF(bp)) + if (!BNXT_SINGLE_PF(bp)) return 0; if (pci_num_vf(bp->pdev)) @@ -5073,15 +5451,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) netdev_warn(bp->dev, "failed to update phy settings\n"); } - if (irq_re_init) { -#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE) - vxlan_get_rx_port(bp->dev); -#endif - if (!bnxt_hwrm_tunnel_dst_port_alloc( - bp, htons(0x17c1), - TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE)) - bp->nge_port_cnt = 1; - } + if (irq_re_init) + udp_tunnel_get_rx_info(bp->dev); set_bit(BNXT_STATE_OPEN, &bp->state); bnxt_enable_int(bp); @@ -5122,12 +5493,19 @@ static int bnxt_open(struct net_device *dev) struct bnxt *bp = netdev_priv(dev); int rc = 0; - rc = bnxt_hwrm_func_reset(bp); - if (rc) { - netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n", - rc); - rc = -1; - return rc; + if (!test_bit(BNXT_STATE_FN_RST_DONE, &bp->state)) { + rc = bnxt_hwrm_func_reset(bp); + if (rc) { + netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n", + rc); + rc = -EBUSY; + return rc; + } + /* Do func_reset during the 1st PF open only to prevent killing + * the VFs when the PF is brought down and up. + */ + if (BNXT_PF(bp)) + set_bit(BNXT_STATE_FN_RST_DONE, &bp->state); } return __bnxt_open_nic(bp, true, true); } @@ -5347,8 +5725,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); - /* Only allow PF to be in promiscuous mode */ - if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp)) + if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; uc_update = bnxt_uc_list_updated(bp); @@ -5440,8 +5817,12 @@ static bool bnxt_rfs_capable(struct bnxt *bp) return false; vnics = 1 + bp->rx_nr_rings; - if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) + if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) { + netdev_warn(bp->dev, + "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", + min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1)); return false; + } return true; #else @@ -5454,7 +5835,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, { struct bnxt *bp = netdev_priv(dev); - if (!bnxt_rfs_capable(bp)) + if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) features &= ~NETIF_F_NTUPLE; /* Both CTAG and STAG VLAN accelaration on the RX side have to be @@ -5469,7 +5850,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; } - +#ifdef CONFIG_BNXT_SRIOV + if (BNXT_VF(bp)) { + if (bp->vf.vlan) { + features &= ~(NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX); + } + } +#endif return features; } @@ -5483,7 +5871,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) bool update_tpa = false; flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; - if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0)) + if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) flags |= BNXT_FLAG_GRO; if (features & NETIF_F_LRO) flags |= BNXT_FLAG_LRO; @@ -5585,9 +5973,10 @@ static void bnxt_dbg_dump_states(struct bnxt *bp) } } -static void bnxt_reset_task(struct bnxt *bp) +static void bnxt_reset_task(struct bnxt *bp, bool silent) { - bnxt_dbg_dump_states(bp); + if (!silent) + bnxt_dbg_dump_states(bp); if (netif_running(bp->dev)) { bnxt_close_nic(bp, false, false); bnxt_open_nic(bp, false, false); @@ -5638,6 +6027,23 @@ bnxt_restart_timer: mod_timer(&bp->timer, jiffies + bp->current_interval); } +/* Only called from bnxt_sp_task() */ +static void bnxt_reset(struct bnxt *bp, bool silent) +{ + /* bnxt_reset_task() calls bnxt_close_nic() which waits + * for BNXT_STATE_IN_SP_TASK to clear. + * If there is a parallel dev_close(), bnxt_close() may be holding + * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we + * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). + */ + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + rtnl_lock(); + if (test_bit(BNXT_STATE_OPEN, &bp->state)) + bnxt_reset_task(bp, silent); + set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + rtnl_unlock(); +} + static void bnxt_cfg_ntp_filters(struct bnxt *); static void bnxt_sp_task(struct work_struct *work) @@ -5674,16 +6080,20 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); } - if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) { - /* bnxt_reset_task() calls bnxt_close_nic() which waits - * for BNXT_STATE_IN_SP_TASK to clear. - */ - clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); - rtnl_lock(); - bnxt_reset_task(bp); - set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); - rtnl_unlock(); + if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) { + bnxt_hwrm_tunnel_dst_port_alloc( + bp, bp->nge_port, + TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); + } + if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) { + bnxt_hwrm_tunnel_dst_port_free( + bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); } + if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) + bnxt_reset(bp, false); + + if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) + bnxt_reset(bp, true); if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) bnxt_get_port_module_status(bp); @@ -5774,6 +6184,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bp->tx_coal_ticks_irq = 2; bp->tx_coal_bufs_irq = 2; + bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; + init_timer(&bp->timer); bp->timer.data = (unsigned long)bp; bp->timer.function = bnxt_timer; @@ -5839,7 +6251,7 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu) { struct bnxt *bp = netdev_priv(dev); - if (new_mtu < 60 || new_mtu > 9000) + if (new_mtu < 60 || new_mtu > 9500) return -EINVAL; if (netif_running(dev)) @@ -5918,7 +6330,8 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, keys1->ports.ports == keys2->ports.ports && keys1->basic.ip_proto == keys2->basic.ip_proto && keys1->basic.n_proto == keys2->basic.n_proto && - ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr)) + ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && + ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) return true; return false; @@ -5931,12 +6344,28 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, struct bnxt_ntuple_filter *fltr, *new_fltr; struct flow_keys *fkeys; struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); - int rc = 0, idx, bit_id; + int rc = 0, idx, bit_id, l2_idx = 0; struct hlist_head *head; if (skb->encapsulation) return -EPROTONOSUPPORT; + if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + int off = 0, j; + + netif_addr_lock_bh(dev); + for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { + if (ether_addr_equal(eth->h_dest, + vnic->uc_list + off)) { + l2_idx = j + 1; + break; + } + } + netif_addr_unlock_bh(dev); + if (!l2_idx) + return -EINVAL; + } new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); if (!new_fltr) return -ENOMEM; @@ -5954,6 +6383,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, goto err_free; } + memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; @@ -5979,6 +6409,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, new_fltr->sw_id = (u16)bit_id; new_fltr->flow_id = flow_id; + new_fltr->l2_fltr_idx = l2_idx; new_fltr->rxq = rxq_index; hlist_add_head_rcu(&new_fltr->hash, head); bp->ntp_fltr_count++; @@ -6048,47 +6479,83 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp) #endif /* CONFIG_RFS_ACCEL */ -static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, - __be16 port) +static void bnxt_udp_tunnel_add(struct net_device *dev, + struct udp_tunnel_info *ti) { struct bnxt *bp = netdev_priv(dev); - if (!netif_running(dev)) + if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) return; - if (sa_family != AF_INET6 && sa_family != AF_INET) + if (!netif_running(dev)) return; - if (bp->vxlan_port_cnt && bp->vxlan_port != port) - return; + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port) + return; - bp->vxlan_port_cnt++; - if (bp->vxlan_port_cnt == 1) { - bp->vxlan_port = port; - set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + bp->vxlan_port_cnt++; + if (bp->vxlan_port_cnt == 1) { + bp->vxlan_port = ti->port; + set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); + } + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (bp->nge_port_cnt && bp->nge_port != ti->port) + return; + + bp->nge_port_cnt++; + if (bp->nge_port_cnt == 1) { + bp->nge_port = ti->port; + set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event); + } + break; + default: + return; } + + schedule_work(&bp->sp_task); } -static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, - __be16 port) +static void bnxt_udp_tunnel_del(struct net_device *dev, + struct udp_tunnel_info *ti) { struct bnxt *bp = netdev_priv(dev); - if (!netif_running(dev)) + if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) return; - if (sa_family != AF_INET6 && sa_family != AF_INET) + if (!netif_running(dev)) return; - if (bp->vxlan_port_cnt && bp->vxlan_port == port) { + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port) + return; bp->vxlan_port_cnt--; - if (bp->vxlan_port_cnt == 0) { - set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); - } + if (bp->vxlan_port_cnt != 0) + return; + + set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (!bp->nge_port_cnt || bp->nge_port != ti->port) + return; + bp->nge_port_cnt--; + + if (bp->nge_port_cnt != 0) + return; + + set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event); + break; + default: + return; } + + schedule_work(&bp->sp_task); } static const struct net_device_ops bnxt_netdev_ops = { @@ -6119,8 +6586,8 @@ static const struct net_device_ops bnxt_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = bnxt_rx_flow_steer, #endif - .ndo_add_vxlan_port = bnxt_add_vxlan_port, - .ndo_del_vxlan_port = bnxt_del_vxlan_port, + .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, + .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, #ifdef CONFIG_NET_RX_BUSY_POLL .ndo_busy_poll = bnxt_busy_poll, #endif @@ -6169,6 +6636,12 @@ static int bnxt_probe_phy(struct bnxt *bp) return rc; } + /* Older firmware does not have supported_auto_speeds, so assume + * that all supported speeds can be autonegotiated. + */ + if (link_info->auto_link_speeds && !link_info->support_auto_speeds) + link_info->support_auto_speeds = link_info->support_speeds; + /*initialize the ethool setting copy with NVM settings */ if (BNXT_AUTO_MODE(link_info->auto_mode)) { link_info->autoneg = BNXT_AUTONEG_SPEED; @@ -6224,7 +6697,10 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs); max_ring_grps = bp->pf.max_hw_ring_grps; } - + if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { + *max_cp -= 1; + *max_rx -= 2; + } if (bp->flags & BNXT_FLAG_AGG_RINGS) *max_rx >>= 1; *max_rx = min_t(int, *max_rx, max_ring_grps); @@ -6260,6 +6736,10 @@ static int bnxt_set_dflt_rings(struct bnxt *bp) bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; bp->num_stat_ctxs = bp->cp_nr_rings; + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { + bp->rx_nr_rings++; + bp->cp_nr_rings++; + } return rc; } @@ -6286,6 +6766,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) struct bnxt *bp; int rc, max_irqs; + if (pdev->device == 0x16cd && pci_is_bridge(pdev)) + return -ENODEV; + if (version_printed++ == 0) pr_info("%s", version); @@ -6312,13 +6795,25 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); + rc = bnxt_alloc_hwrm_resources(bp); + if (rc) + goto init_err; + + mutex_init(&bp->hwrm_cmd_lock); + rc = bnxt_hwrm_ver_get(bp); + if (rc) + goto init_err; + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | - NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO; + NETIF_F_RXCSUM | NETIF_F_GRO; + + if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) + dev->hw_features |= NETIF_F_LRO; dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | @@ -6337,12 +6832,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) #ifdef CONFIG_BNXT_SRIOV init_waitqueue_head(&bp->sriov_cfg_wait); #endif - rc = bnxt_alloc_hwrm_resources(bp); - if (rc) - goto init_err; - - mutex_init(&bp->hwrm_cmd_lock); - bnxt_hwrm_ver_get(bp); + bp->gro_func = bnxt_gro_func_5730x; + if (BNXT_CHIP_NUM_57X1X(bp->chip_num)) + bp->gro_func = bnxt_gro_func_5731x; rc = bnxt_hwrm_func_drv_rgtr(bp); if (rc) @@ -6365,6 +6857,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto init_err; } + bnxt_hwrm_func_qcfg(bp); + bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); if (BNXT_PF(bp)) @@ -6375,7 +6869,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) #endif bnxt_set_dflt_rings(bp); - if (BNXT_PF(bp)) { + if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) { dev->hw_features |= NETIF_F_NTUPLE; if (bnxt_rfs_capable(bp)) { bp->flags |= BNXT_FLAG_RFS; @@ -6424,6 +6918,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); + struct bnxt *bp = netdev_priv(netdev); netdev_info(netdev, "PCI I/O error detected\n"); @@ -6438,6 +6933,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, if (netif_running(netdev)) bnxt_close(netdev); + /* So that func_reset will be done during slot_reset */ + clear_bit(BNXT_STATE_FN_RST_DONE, &bp->state); pci_disable_device(pdev); rtnl_unlock(); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 2824d65b2..23e04a614 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -11,10 +11,10 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.2.0" +#define DRV_MODULE_VERSION "1.3.0" #define DRV_VER_MAJ 1 -#define DRV_VER_MIN 0 +#define DRV_VER_MIN 3 #define DRV_VER_UPD 0 struct tx_bd { @@ -298,13 +298,14 @@ struct rx_tpa_start_cmp_ext { #define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC (0x1 << 1) #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2) #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) + #define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8) __le32 rx_tpa_start_cmp_metadata; __le32 rx_tpa_start_cmp_cfa_code_v2; #define RX_TPA_START_CMP_V2 (0x1 << 0) #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16) #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16 - __le32 rx_tpa_start_cmp_unused5; + __le32 rx_tpa_start_cmp_hdr_info; }; struct rx_tpa_end_cmp { @@ -358,7 +359,8 @@ struct rx_tpa_end_cmp { RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO) #define TPA_END_GRO_TS(rx_tpa_end) \ - ((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & cpu_to_le32(RX_TPA_END_GRO_TS)) + (!!((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & \ + cpu_to_le32(RX_TPA_END_GRO_TS))) struct rx_tpa_end_cmp_ext { __le32 rx_tpa_end_cmp_dup_acks; @@ -584,6 +586,19 @@ struct bnxt_tpa_info { u32 metadata; enum pkt_hash_types hash_type; u32 rss_hash; + u32 hdr_info; + +#define BNXT_TPA_L4_SIZE(hdr_info) \ + (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32) + +#define BNXT_TPA_INNER_L3_OFF(hdr_info) \ + (((hdr_info) >> 18) & 0x1ff) + +#define BNXT_TPA_INNER_L2_OFF(hdr_info) \ + (((hdr_info) >> 9) & 0x1ff) + +#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \ + ((hdr_info) & 0x1ff) }; struct bnxt_rx_ring_info { @@ -680,7 +695,8 @@ struct bnxt_ring_grp_info { struct bnxt_vnic_info { u16 fw_vnic_id; /* returned by Chimp during alloc */ - u16 fw_rss_cos_lb_ctx; +#define BNXT_MAX_CTX_PER_VNIC 2 + u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC]; u16 fw_l2_ctx_id; #define BNXT_MAX_UC_ADDRS 4 __le64 fw_l2_filter_id[BNXT_MAX_UC_ADDRS]; @@ -739,8 +755,8 @@ struct bnxt_vf_info { struct bnxt_pf_info { #define BNXT_FIRST_PF_FID 1 #define BNXT_FIRST_VF_FID 128 - u32 fw_fid; - u8 port_id; + u16 fw_fid; + u16 port_id; u8 mac_addr[ETH_ALEN]; u16 max_rsscos_ctxs; u16 max_cp_rings; @@ -769,10 +785,12 @@ struct bnxt_pf_info { struct bnxt_ntuple_filter { struct hlist_node hash; + u8 dst_mac_addr[ETH_ALEN]; u8 src_mac_addr[ETH_ALEN]; struct flow_keys fkeys; __le64 filter_id; u16 sw_id; + u8 l2_fltr_idx; u16 rxq; u32 flow_id; unsigned long state; @@ -835,6 +853,7 @@ struct bnxt_link_info { #define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB #define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB #define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB + u16 support_auto_speeds; u16 lp_auto_link_speeds; u16 force_link_speed; u32 preemphasis; @@ -873,6 +892,45 @@ struct bnxt { void __iomem *bar2; u32 reg_base; + u16 chip_num; +#define CHIP_NUM_57301 0x16c8 +#define CHIP_NUM_57302 0x16c9 +#define CHIP_NUM_57304 0x16ca +#define CHIP_NUM_58700 0x16cd +#define CHIP_NUM_57402 0x16d0 +#define CHIP_NUM_57404 0x16d1 +#define CHIP_NUM_57406 0x16d2 + +#define CHIP_NUM_57311 0x16ce +#define CHIP_NUM_57312 0x16cf +#define CHIP_NUM_57314 0x16df +#define CHIP_NUM_57412 0x16d6 +#define CHIP_NUM_57414 0x16d7 +#define CHIP_NUM_57416 0x16d8 +#define CHIP_NUM_57417 0x16d9 + +#define BNXT_CHIP_NUM_5730X(chip_num) \ + ((chip_num) >= CHIP_NUM_57301 && \ + (chip_num) <= CHIP_NUM_57304) + +#define BNXT_CHIP_NUM_5740X(chip_num) \ + ((chip_num) >= CHIP_NUM_57402 && \ + (chip_num) <= CHIP_NUM_57406) + +#define BNXT_CHIP_NUM_5731X(chip_num) \ + ((chip_num) == CHIP_NUM_57311 || \ + (chip_num) == CHIP_NUM_57312 || \ + (chip_num) == CHIP_NUM_57314) + +#define BNXT_CHIP_NUM_5741X(chip_num) \ + ((chip_num) >= CHIP_NUM_57412 && \ + (chip_num) <= CHIP_NUM_57417) + +#define BNXT_CHIP_NUM_57X0X(chip_num) \ + (BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num)) + +#define BNXT_CHIP_NUM_57X1X(chip_num) \ + (BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num)) struct net_device *dev; struct pci_dev *pdev; @@ -900,6 +958,7 @@ struct bnxt { #define BNXT_FLAG_SHARED_RINGS 0x200 #define BNXT_FLAG_PORT_STATS 0x400 #define BNXT_FLAG_EEE_CAP 0x1000 + #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ BNXT_FLAG_RFS | \ @@ -907,12 +966,18 @@ struct bnxt { #define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF)) #define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF) +#define BNXT_NPAR(bp) ((bp)->port_partition_type) +#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp)) +#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) struct bnxt_napi **bnapi; struct bnxt_rx_ring_info *rx_ring; struct bnxt_tx_ring_info *tx_ring; + struct sk_buff * (*gro_func)(struct bnxt_tpa_info *, int, int, + struct sk_buff *); + u32 rx_buf_size; u32 rx_buf_use_size; /* useable size */ u32 rx_ring_size; @@ -959,6 +1024,7 @@ struct bnxt { unsigned long state; #define BNXT_STATE_OPEN 0 #define BNXT_STATE_IN_SP_TASK 1 +#define BNXT_STATE_FN_RST_DONE 2 struct bnxt_irq *irq_tbl; u8 mac_addr[ETH_ALEN]; @@ -991,8 +1057,10 @@ struct bnxt { __be16 vxlan_port; u8 vxlan_port_cnt; __le16 vxlan_fw_dst_port_id; + __be16 nge_port; u8 nge_port_cnt; __le16 nge_fw_dst_port_id; + u8 port_partition_type; u16 rx_coal_ticks; u16 rx_coal_ticks_irq; @@ -1005,6 +1073,11 @@ struct bnxt { #define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2) + u32 stats_coal_ticks; +#define BNXT_DEF_STATS_COAL_TICKS 1000000 +#define BNXT_MIN_STATS_COAL_TICKS 250000 +#define BNXT_MAX_STATS_COAL_TICKS 1000000 + struct work_struct sp_task; unsigned long sp_event; #define BNXT_RX_MASK_SP_EVENT 0 @@ -1018,6 +1091,9 @@ struct bnxt { #define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8 #define BNXT_PERIODIC_STATS_SP_EVENT 9 #define BNXT_HWRM_PORT_MODULE_SP_EVENT 10 +#define BNXT_RESET_TASK_SILENT_SP_EVENT 11 +#define BNXT_GENEVE_ADD_PORT_SP_EVENT 12 +#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13 struct bnxt_pf_info pf; #ifdef CONFIG_BNXT_SRIOV diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1b0ae4a72..b83e17403 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -56,6 +56,8 @@ static int bnxt_get_coalesce(struct net_device *dev, coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq; coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq; + coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; + return 0; } @@ -63,6 +65,7 @@ static int bnxt_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) { struct bnxt *bp = netdev_priv(dev); + bool update_stats = false; int rc = 0; bp->rx_coal_ticks = coal->rx_coalesce_usecs; @@ -76,8 +79,26 @@ static int bnxt_set_coalesce(struct net_device *dev, bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq; bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq; - if (netif_running(dev)) - rc = bnxt_hwrm_set_coal(bp); + if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { + u32 stats_ticks = coal->stats_block_coalesce_usecs; + + stats_ticks = clamp_t(u32, stats_ticks, + BNXT_MIN_STATS_COAL_TICKS, + BNXT_MAX_STATS_COAL_TICKS); + stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); + bp->stats_coal_ticks = stats_ticks; + update_stats = true; + } + + if (netif_running(dev)) { + if (update_stats) { + rc = bnxt_close_nic(bp, true, false); + if (!rc) + rc = bnxt_open_nic(bp, true, false); + } else { + rc = bnxt_hwrm_set_coal(bp); + } + } return rc; } @@ -341,9 +362,13 @@ static void bnxt_get_channels(struct net_device *dev, channel->max_other = 0; if (bp->flags & BNXT_FLAG_SHARED_RINGS) { channel->combined_count = bp->rx_nr_rings; + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + channel->combined_count--; } else { - channel->rx_count = bp->rx_nr_rings; - channel->tx_count = bp->tx_nr_rings_per_tc; + if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) { + channel->rx_count = bp->rx_nr_rings; + channel->tx_count = bp->tx_nr_rings_per_tc; + } } } @@ -366,6 +391,10 @@ static int bnxt_set_channels(struct net_device *dev, (channel->rx_count || channel->tx_count)) return -EINVAL; + if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count || + channel->tx_count)) + return -EINVAL; + if (channel->combined_count) sh = true; @@ -628,7 +657,66 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) return speed_mask; } -static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info) +#define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\ +{ \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 100baseT_Full); \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 1000baseT_Full); \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 10000baseT_Full); \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 25000baseCR_Full); \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 40000baseCR4_Full);\ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 50000baseCR2_Full);\ + if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + Pause); \ + if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \ + ethtool_link_ksettings_add_link_mode( \ + lk_ksettings, name, Asym_Pause);\ + } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + Asym_Pause); \ + } \ +} + +#define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \ +{ \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 100baseT_Full) || \ + ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 100baseT_Half)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 1000baseT_Full) || \ + ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 1000baseT_Half)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 10000baseT_Full)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 25000baseCR_Full)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 40000baseCR4_Full)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 50000baseCR2_Full)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \ +} + +static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *lk_ksettings) { u16 fw_speeds = link_info->auto_link_speeds; u8 fw_pause = 0; @@ -636,10 +724,11 @@ static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info) if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) fw_pause = link_info->auto_pause_setting; - return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); + BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising); } -static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info) +static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *lk_ksettings) { u16 fw_speeds = link_info->lp_auto_link_speeds; u8 fw_pause = 0; @@ -647,16 +736,24 @@ static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info) if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) fw_pause = link_info->lp_pause; - return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); + BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, + lp_advertising); } -static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info) +static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *lk_ksettings) { u16 fw_speeds = link_info->support_speeds; - u32 supported; - supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); - return supported | SUPPORTED_Pause | SUPPORTED_Asym_Pause; + BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported); + + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause); + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + Asym_Pause); + + if (link_info->support_auto_speeds) + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + Autoneg); } u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) @@ -683,65 +780,62 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) } } -static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int bnxt_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *lk_ksettings) { struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; - u16 ethtool_speed; + struct ethtool_link_settings *base = &lk_ksettings->base; + u32 ethtool_speed; - cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info); - - if (link_info->auto_link_speeds) - cmd->supported |= SUPPORTED_Autoneg; + ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); + bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); + ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); if (link_info->autoneg) { - cmd->advertising = - bnxt_fw_to_ethtool_advertised_spds(link_info); - cmd->advertising |= ADVERTISED_Autoneg; - cmd->autoneg = AUTONEG_ENABLE; + bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings); + ethtool_link_ksettings_add_link_mode(lk_ksettings, + advertising, Autoneg); + base->autoneg = AUTONEG_ENABLE; if (link_info->phy_link_status == BNXT_LINK_LINK) - cmd->lp_advertising = - bnxt_fw_to_ethtool_lp_adv(link_info); + bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings); ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); if (!netif_carrier_ok(dev)) - cmd->duplex = DUPLEX_UNKNOWN; + base->duplex = DUPLEX_UNKNOWN; else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) - cmd->duplex = DUPLEX_FULL; + base->duplex = DUPLEX_FULL; else - cmd->duplex = DUPLEX_HALF; + base->duplex = DUPLEX_HALF; } else { - cmd->autoneg = AUTONEG_DISABLE; - cmd->advertising = 0; + base->autoneg = AUTONEG_DISABLE; ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed); - cmd->duplex = DUPLEX_HALF; + base->duplex = DUPLEX_HALF; if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) - cmd->duplex = DUPLEX_FULL; + base->duplex = DUPLEX_FULL; } - ethtool_cmd_speed_set(cmd, ethtool_speed); + base->speed = ethtool_speed; - cmd->port = PORT_NONE; + base->port = PORT_NONE; if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { - cmd->port = PORT_TP; - cmd->supported |= SUPPORTED_TP; - cmd->advertising |= ADVERTISED_TP; + base->port = PORT_TP; + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + TP); + ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, + TP); } else { - cmd->supported |= SUPPORTED_FIBRE; - cmd->advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + FIBRE); + ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, + FIBRE); if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) - cmd->port = PORT_DA; + base->port = PORT_DA; else if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE) - cmd->port = PORT_FIBRE; + base->port = PORT_FIBRE; } - - if (link_info->transceiver == - PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL) - cmd->transceiver = XCVR_INTERNAL; - else - cmd->transceiver = XCVR_EXTERNAL; - cmd->phy_address = link_info->phy_addr; + base->phy_address = link_info->phy_addr; return 0; } @@ -815,37 +909,25 @@ u16 bnxt_get_fw_auto_link_speeds(u32 advertising) return fw_speed_mask; } -static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int bnxt_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *lk_ksettings) { - int rc = 0; struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; + const struct ethtool_link_settings *base = &lk_ksettings->base; u32 speed, fw_advertising = 0; bool set_pause = false; + int rc = 0; - if (BNXT_VF(bp)) - return rc; - - if (cmd->autoneg == AUTONEG_ENABLE) { - u32 supported_spds = bnxt_fw_to_ethtool_support_spds(link_info); + if (!BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP; - if (cmd->advertising & ~(supported_spds | ADVERTISED_Autoneg | - ADVERTISED_TP | ADVERTISED_FIBRE)) { - netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n", - cmd->advertising); - rc = -EINVAL; - goto set_setting_exit; - } - fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising); - if (fw_advertising & ~link_info->support_speeds) { - netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n", - cmd->advertising); - rc = -EINVAL; - goto set_setting_exit; - } + if (base->autoneg == AUTONEG_ENABLE) { + BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, + advertising); link_info->autoneg |= BNXT_AUTONEG_SPEED; if (!fw_advertising) - link_info->advertising = link_info->support_speeds; + link_info->advertising = link_info->support_auto_speeds; else link_info->advertising = fw_advertising; /* any change to autoneg will cause link change, therefore the @@ -863,16 +945,12 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) rc = -EINVAL; goto set_setting_exit; } - /* TODO: currently don't support half duplex */ - if (cmd->duplex == DUPLEX_HALF) { + if (base->duplex == DUPLEX_HALF) { netdev_err(dev, "HALF DUPLEX is not supported!\n"); rc = -EINVAL; goto set_setting_exit; } - /* If received a request for an unknown duplex, assume full*/ - if (cmd->duplex == DUPLEX_UNKNOWN) - cmd->duplex = DUPLEX_FULL; - speed = ethtool_cmd_speed(cmd); + speed = base->speed; fw_speed = bnxt_get_fw_speed(dev, speed); if (!fw_speed) { rc = -EINVAL; @@ -911,8 +989,8 @@ static int bnxt_set_pauseparam(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; - if (BNXT_VF(bp)) - return rc; + if (!BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP; if (epause->autoneg) { if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) @@ -1010,6 +1088,8 @@ static int bnxt_firmware_reset(struct net_device *dev, case BNX_DIR_TYPE_APE_FW: case BNX_DIR_TYPE_APE_PATCH: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; + /* Self-reset APE upon next PCIe reset: */ + req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; break; case BNX_DIR_TYPE_KONG_FW: case BNX_DIR_TYPE_KONG_PATCH: @@ -1043,9 +1123,27 @@ static int bnxt_flash_firmware(struct net_device *dev, case BNX_DIR_TYPE_BOOTCODE_2: code_type = CODE_BOOT; break; + case BNX_DIR_TYPE_CHIMP_PATCH: + code_type = CODE_CHIMP_PATCH; + break; case BNX_DIR_TYPE_APE_FW: code_type = CODE_MCTP_PASSTHRU; break; + case BNX_DIR_TYPE_APE_PATCH: + code_type = CODE_APE_PATCH; + break; + case BNX_DIR_TYPE_KONG_FW: + code_type = CODE_KONG_FW; + break; + case BNX_DIR_TYPE_KONG_PATCH: + code_type = CODE_KONG_PATCH; + break; + case BNX_DIR_TYPE_BONO_FW: + code_type = CODE_BONO_FW; + break; + case BNX_DIR_TYPE_BONO_PATCH: + code_type = CODE_BONO_PATCH; + break; default: netdev_err(dev, "Unsupported directory entry type: %u\n", dir_type); @@ -1100,6 +1198,8 @@ static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type) case BNX_DIR_TYPE_APE_PATCH: case BNX_DIR_TYPE_KONG_FW: case BNX_DIR_TYPE_KONG_PATCH: + case BNX_DIR_TYPE_BONO_FW: + case BNX_DIR_TYPE_BONO_PATCH: return true; } @@ -1137,7 +1237,8 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev, const struct firmware *fw; int rc; - if (bnxt_dir_type_is_executable(dir_type) == false) + if (dir_type != BNX_DIR_TYPE_UPDATE && + bnxt_dir_type_is_executable(dir_type) == false) return -EINVAL; rc = request_firmware(&fw, filename, &dev->dev); @@ -1433,8 +1534,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); int rc = 0; - if (BNXT_VF(bp)) - return 0; + if (!BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP; if (!(bp->flags & BNXT_FLAG_EEE_CAP)) return -EOPNOTSUPP; @@ -1618,8 +1719,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev, } const struct ethtool_ops bnxt_ethtool_ops = { - .get_settings = bnxt_get_settings, - .set_settings = bnxt_set_settings, + .get_link_ksettings = bnxt_get_link_ksettings, + .set_link_ksettings = bnxt_set_link_ksettings, .get_pauseparam = bnxt_get_pauseparam, .set_pauseparam = bnxt_set_pauseparam, .get_drvinfo = bnxt_get_drvinfo, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h index 461675caa..82bf44ab8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h @@ -70,6 +70,7 @@ enum SUPPORTED_CODE { CODE_KONG_PATCH, /* 18 - KONG Patch firmware */ CODE_BONO_FW, /* 19 - BONO firmware */ CODE_BONO_PATCH, /* 20 - BONO Patch firmware */ + CODE_CHIMP_PATCH, /* 21 - ChiMP Patch firmware */ MAX_CODE_TYPE, }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 05e3c49a7..517567f6d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -105,6 +105,7 @@ struct hwrm_async_event_cmpl { #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE (0x7UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0) @@ -484,12 +485,12 @@ struct hwrm_async_event_cmpl_hwrm_error { #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL }; -/* HW Resource Manager Specification 1.2.2 */ +/* HW Resource Manager Specification 1.3.0 */ #define HWRM_VERSION_MAJOR 1 -#define HWRM_VERSION_MINOR 2 -#define HWRM_VERSION_UPDATE 2 +#define HWRM_VERSION_MINOR 3 +#define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_STR "1.2.2" +#define HWRM_VERSION_STR "1.3.0" /* * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. @@ -611,6 +612,9 @@ struct cmd_nums { #define HWRM_FWD_RESP (0xd2UL) #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL) #define HWRM_TEMP_MONITOR_QUERY (0xe0UL) + #define HWRM_WOL_FILTER_ALLOC (0xf0UL) + #define HWRM_WOL_FILTER_FREE (0xf1UL) + #define HWRM_WOL_FILTER_QCFG (0xf2UL) #define HWRM_DBG_READ_DIRECT (0xff10UL) #define HWRM_DBG_READ_INDIRECT (0xff11UL) #define HWRM_DBG_WRITE_DIRECT (0xff12UL) @@ -1020,6 +1024,10 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; @@ -1066,8 +1074,9 @@ struct hwrm_func_qcfg_output { __le16 fid; __le16 port_id; __le16 vlan; - u8 unused_0; - u8 unused_1; + __le16 flags; + #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL + #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL u8 mac_address[6]; __le16 pci_id; __le16 alloc_rsscos_ctx; @@ -1086,23 +1095,23 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0) #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0) #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0) - u8 unused_2; + u8 unused_0; __le16 dflt_vnic_id; - u8 unused_3; - u8 unused_4; + u8 unused_1; + u8 unused_2; __le32 min_bw; __le32 max_bw; u8 evb_mode; #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0) #define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0) #define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0) - u8 unused_5; - __le16 unused_6; + u8 unused_3; + __le16 unused_4; __le32 alloc_mcast_filters; __le32 alloc_hw_ring_grps; + u8 unused_5; + u8 unused_6; u8 unused_7; - u8 unused_8; - u8 unused_9; u8 valid; }; @@ -1410,8 +1419,8 @@ struct hwrm_func_buf_rgtr_input { #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0) #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0) #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0) - #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x16UL << 0) - #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x17UL << 0) + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x15UL << 0) + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x16UL << 0) #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0) __le16 req_buf_len; __le16 resp_buf_len; @@ -1499,6 +1508,12 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL __le32 enables; #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL @@ -1815,13 +1830,22 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24) #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24) #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24) - __le32 unused_1; + __le16 fec_cfg; + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL + u8 unused_1; + u8 unused_2; char phy_vendor_name[16]; char phy_vendor_partnumber[16]; - __le32 unused_2; - u8 unused_3; + __le32 unused_3; u8 unused_4; u8 unused_5; + u8 unused_6; u8 valid; }; @@ -1842,6 +1866,8 @@ struct hwrm_port_mac_cfg_input { #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL + #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL + #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL __le32 enables; #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL @@ -2127,6 +2153,7 @@ struct hwrm_port_phy_i2c_read_output { u8 valid; }; +/* hwrm_queue_qportcfg */ /* Input (24 bytes) */ struct hwrm_queue_qportcfg_input { __le16 req_type; @@ -2382,7 +2409,7 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0) u8 queue_id0_pri_lvl; u8 queue_id0_bw_weight; u8 queue_id1; @@ -2392,7 +2419,7 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0) u8 queue_id1_pri_lvl; u8 queue_id1_bw_weight; u8 queue_id2; @@ -2402,7 +2429,7 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0) u8 queue_id2_pri_lvl; u8 queue_id2_bw_weight; u8 queue_id3; @@ -2412,7 +2439,7 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0) u8 queue_id3_pri_lvl; u8 queue_id3_bw_weight; u8 queue_id4; @@ -2422,7 +2449,7 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0) u8 queue_id4_pri_lvl; u8 queue_id4_bw_weight; u8 queue_id5; @@ -2432,7 +2459,7 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0) u8 queue_id5_pri_lvl; u8 queue_id5_bw_weight; u8 queue_id6; @@ -2442,7 +2469,7 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0) u8 queue_id6_pri_lvl; u8 queue_id6_bw_weight; u8 queue_id7; @@ -2452,7 +2479,7 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0) u8 queue_id7_pri_lvl; u8 queue_id7_bw_weight; u8 unused_1[5]; @@ -3150,7 +3177,7 @@ struct hwrm_cfa_l2_filter_cfg_output { }; /* hwrm_cfa_l2_set_rx_mask */ -/* Input (40 bytes) */ +/* Input (56 bytes) */ struct hwrm_cfa_l2_set_rx_mask_input { __le16 req_type; __le16 cmpl_ring; @@ -3165,9 +3192,15 @@ struct hwrm_cfa_l2_set_rx_mask_input { #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL __le64 mc_tbl_addr; __le32 num_mc_entries; __le32 unused_0; + __le64 vlan_tag_tbl_addr; + __le32 num_vlan_tags; + __le32 unused_1; }; /* Output (16 bytes) */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h index 40a7b0e09..73f224955 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h @@ -13,6 +13,7 @@ enum bnxt_nvm_directory_type { BNX_DIR_TYPE_UNUSED = 0, BNX_DIR_TYPE_PKG_LOG = 1, + BNX_DIR_TYPE_UPDATE = 2, BNX_DIR_TYPE_CHIMP_PATCH = 3, BNX_DIR_TYPE_BOOTCODE = 4, BNX_DIR_TYPE_VPD = 5, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 363884dd9..50d2007a2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -143,6 +143,9 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos) u16 vlan_tag; int rc; + if (bp->hwrm_spec_code < 0x10201) + return -ENOTSUPP; + rc = bnxt_vf_ndo_prep(bp, vf_id); if (rc) return rc; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 6ad2b4a51..6c4652b52 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -12543,10 +12543,6 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, info->data = TG3_RSS_MAX_NUM_QS; } - /* The first interrupt vector only - * handles link interrupts. - */ - info->data -= 1; return 0; default: @@ -14005,7 +14001,9 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) } if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || + (!ec->rx_coalesce_usecs) || (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || + (!ec->tx_coalesce_usecs) || (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || @@ -14016,16 +14014,6 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) return -EINVAL; - /* No rx interrupts will be generated if both are zero */ - if ((ec->rx_coalesce_usecs == 0) && - (ec->rx_max_coalesced_frames == 0)) - return -EINVAL; - - /* No tx interrupts will be generated if both are zero */ - if ((ec->tx_coalesce_usecs == 0) && - (ec->tx_max_coalesced_frames == 0)) - return -EINVAL; - /* Only copy relevant parameters, ignore all others. */ tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; @@ -18125,14 +18113,14 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, rtnl_lock(); - /* We needn't recover from permanent error */ - if (state == pci_channel_io_frozen) - tp->pcierr_recovery = true; - /* We probably don't have netdev yet */ if (!netdev || !netif_running(netdev)) goto done; + /* We needn't recover from permanent error */ + if (state == pci_channel_io_frozen) + tp->pcierr_recovery = true; + tg3_phy_stop(tp); tg3_netif_stop(tp); @@ -18229,7 +18217,7 @@ static void tg3_io_resume(struct pci_dev *pdev) rtnl_lock(); - if (!netif_running(netdev)) + if (!netdev || !netif_running(netdev)) goto done; tg3_full_lock(tp, 0); diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 8fc246ea1..05c1c1dd7 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -312,7 +312,8 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf, struct bnad_debug_info *regrd_debug = file->private_data; struct bnad *bnad = (struct bnad *)regrd_debug->i_private; struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc; - int addr, len, rc, i; + int rc, i; + u32 addr, len; u32 *regbuf; void __iomem *rb, *reg_addr; unsigned long flags; @@ -372,7 +373,8 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf, struct bnad_debug_info *debug = file->private_data; struct bnad *bnad = (struct bnad *)debug->i_private; struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc; - int addr, val, rc; + int rc; + u32 addr, val; void __iomem *reg_addr; unsigned long flags; void *kern_buf; diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index d95098223..47b1bbb95 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -31,15 +31,10 @@ #define BNAD_NUM_TXF_COUNTERS 12 #define BNAD_NUM_RXF_COUNTERS 10 #define BNAD_NUM_CQ_COUNTERS (3 + 5) -#define BNAD_NUM_RXQ_COUNTERS 6 +#define BNAD_NUM_RXQ_COUNTERS 7 #define BNAD_NUM_TXQ_COUNTERS 5 -#define BNAD_ETHTOOL_STATS_NUM \ - (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \ - sizeof(struct bnad_drv_stats) / sizeof(u64) + \ - offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64)) - -static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { +static const char *bnad_net_stats_strings[] = { "rx_packets", "tx_packets", "rx_bytes", @@ -50,22 +45,10 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { "tx_dropped", "multicast", "collisions", - "rx_length_errors", - "rx_over_errors", "rx_crc_errors", "rx_frame_errors", - "rx_fifo_errors", - "rx_missed_errors", - - "tx_aborted_errors", - "tx_carrier_errors", "tx_fifo_errors", - "tx_heartbeat_errors", - "tx_window_errors", - - "rx_compressed", - "tx_compressed", "netif_queue_stop", "netif_queue_wakeup", @@ -254,6 +237,8 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { "fc_tx_fid_parity_errors", }; +#define BNAD_ETHTOOL_STATS_NUM ARRAY_SIZE(bnad_net_stats_strings) + static int bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { @@ -658,6 +643,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string) string += ETH_GSTRING_LEN; sprintf(string, "rxq%d_allocbuf_failed", q_num); string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_mapbuf_failed", q_num); + string += ETH_GSTRING_LEN; sprintf(string, "rxq%d_producer_index", q_num); string += ETH_GSTRING_LEN; sprintf(string, "rxq%d_consumer_index", q_num); @@ -678,6 +665,9 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string) sprintf(string, "rxq%d_allocbuf_failed", q_num); string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_mapbuf_failed", + q_num); + string += ETH_GSTRING_LEN; sprintf(string, "rxq%d_producer_index", q_num); string += ETH_GSTRING_LEN; @@ -854,9 +844,9 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *buf) { struct bnad *bnad = netdev_priv(netdev); - int i, j, bi; + int i, j, bi = 0; unsigned long flags; - struct rtnl_link_stats64 *net_stats64; + struct rtnl_link_stats64 net_stats64; u64 *stats64; u32 bmap; @@ -871,14 +861,25 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, * under the same lock */ spin_lock_irqsave(&bnad->bna_lock, flags); - bi = 0; - memset(buf, 0, stats->n_stats * sizeof(u64)); - - net_stats64 = (struct rtnl_link_stats64 *)buf; - bnad_netdev_qstats_fill(bnad, net_stats64); - bnad_netdev_hwstats_fill(bnad, net_stats64); - bi = sizeof(*net_stats64) / sizeof(u64); + memset(&net_stats64, 0, sizeof(net_stats64)); + bnad_netdev_qstats_fill(bnad, &net_stats64); + bnad_netdev_hwstats_fill(bnad, &net_stats64); + + buf[bi++] = net_stats64.rx_packets; + buf[bi++] = net_stats64.tx_packets; + buf[bi++] = net_stats64.rx_bytes; + buf[bi++] = net_stats64.tx_bytes; + buf[bi++] = net_stats64.rx_errors; + buf[bi++] = net_stats64.tx_errors; + buf[bi++] = net_stats64.rx_dropped; + buf[bi++] = net_stats64.tx_dropped; + buf[bi++] = net_stats64.multicast; + buf[bi++] = net_stats64.collisions; + buf[bi++] = net_stats64.rx_length_errors; + buf[bi++] = net_stats64.rx_crc_errors; + buf[bi++] = net_stats64.rx_frame_errors; + buf[bi++] = net_stats64.tx_fifo_errors; /* Get netif_queue_stopped from stack */ bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev); diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index cb07d95e3..d954a97b0 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -304,7 +304,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) static void macb_handle_link_change(struct net_device *dev) { struct macb *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; + struct phy_device *phydev = dev->phydev; unsigned long flags; int status_change = 0; @@ -414,7 +414,6 @@ static int macb_mii_probe(struct net_device *dev) bp->link = 0; bp->speed = 0; bp->duplex = -1; - bp->phy_dev = phydev; return 0; } @@ -1324,6 +1323,24 @@ dma_error: return 0; } +static inline int macb_clear_csum(struct sk_buff *skb) +{ + /* no change for packets without checksum offloading */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + /* make sure we can modify the header */ + if (unlikely(skb_cow_head(skb, 0))) + return -1; + + /* initialize checksum field + * This is required - at least for Zynq, which otherwise calculates + * wrong UDP header checksums for UDP packets with UDP data len <=2 + */ + *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; + return 0; +} + static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) { u16 queue_index = skb_get_queue_mapping(skb); @@ -1363,6 +1380,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } + if (macb_clear_csum(skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + /* Map socket buffer for DMA transfer */ if (!macb_tx_map(bp, queue, skb)) { dev_kfree_skb_any(skb); @@ -1886,7 +1908,7 @@ static int macb_open(struct net_device *dev) netif_carrier_off(dev); /* if the phy is not yet register, retry later*/ - if (!bp->phy_dev) + if (!dev->phydev) return -EAGAIN; /* RX buffers initialization */ @@ -1905,7 +1927,7 @@ static int macb_open(struct net_device *dev) macb_init_hw(bp); /* schedule a link state check */ - phy_start(bp->phy_dev); + phy_start(dev->phydev); netif_tx_start_all_queues(dev); @@ -1920,8 +1942,8 @@ static int macb_close(struct net_device *dev) netif_tx_stop_all_queues(dev); napi_disable(&bp->napi); - if (bp->phy_dev) - phy_stop(bp->phy_dev); + if (dev->phydev) + phy_stop(dev->phydev); spin_lock_irqsave(&bp->lock, flags); macb_reset_hw(bp); @@ -2092,28 +2114,6 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev) return nstat; } -static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct macb *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_gset(phydev, cmd); -} - -static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct macb *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_sset(phydev, cmd); -} - static int macb_get_regs_len(struct net_device *netdev) { return MACB_GREGS_NBR * sizeof(u32); @@ -2186,19 +2186,17 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) } static const struct ethtool_ops macb_ethtool_ops = { - .get_settings = macb_get_settings, - .set_settings = macb_set_settings, .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, .get_wol = macb_get_wol, .set_wol = macb_set_wol, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct ethtool_ops gem_ethtool_ops = { - .get_settings = macb_get_settings, - .set_settings = macb_set_settings, .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, .get_link = ethtool_op_get_link, @@ -2206,12 +2204,13 @@ static const struct ethtool_ops gem_ethtool_ops = { .get_ethtool_stats = gem_get_ethtool_stats, .get_strings = gem_get_ethtool_strings, .get_sset_count = gem_get_sset_count, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct macb *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; + struct phy_device *phydev = dev->phydev; if (!netif_running(dev)) return -EINVAL; @@ -2570,7 +2569,7 @@ static int at91ether_open(struct net_device *dev) MACB_BIT(HRESP)); /* schedule a link state check */ - phy_start(lp->phy_dev); + phy_start(dev->phydev); netif_start_queue(dev); @@ -3010,7 +3009,7 @@ static int macb_probe(struct platform_device *pdev) if (err) goto err_out_free_netdev; - phydev = bp->phy_dev; + phydev = dev->phydev; netif_carrier_off(dev); @@ -3029,7 +3028,7 @@ static int macb_probe(struct platform_device *pdev) return 0; err_out_unregister_mdio: - phy_disconnect(bp->phy_dev); + phy_disconnect(dev->phydev); mdiobus_unregister(bp->mii_bus); mdiobus_free(bp->mii_bus); @@ -3057,8 +3056,8 @@ static int macb_remove(struct platform_device *pdev) if (dev) { bp = netdev_priv(dev); - if (bp->phy_dev) - phy_disconnect(bp->phy_dev); + if (dev->phydev) + phy_disconnect(dev->phydev); mdiobus_unregister(bp->mii_bus); mdiobus_free(bp->mii_bus); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 644743c9c..b6fcf1062 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -823,7 +823,6 @@ struct macb { struct macb_or_gem_ops macbgem_ops; struct mii_bus *mii_bus; - struct phy_device *phy_dev; int link; int speed; int duplex; diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c index 8ad7425f8..c03d37016 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c @@ -19,26 +19,16 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include -#include -#include -#include #include -#include #include -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" #include "response_manager.h" #include "octeon_device.h" -#include "octeon_nic.h" #include "octeon_main.h" -#include "octeon_network.h" #include "cn66xx_regs.h" #include "cn66xx_device.h" -#include "liquidio_image.h" -#include "octeon_mem_ops.h" int lio_cn6xxx_soft_reset(struct octeon_device *oct) { @@ -74,9 +64,9 @@ void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct) u32 val; pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); - if (val & 0x000f0000) { + if (val & 0x000c0000) { dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n", - val & 0x000f0000); + val & 0x000c0000); } val |= 0xf; /* Enable Link error reporting */ @@ -229,7 +219,7 @@ void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct) /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0); - /* / Select ES,RO,NS setting from register for Output Queue Packet + /* Select ES, RO, NS setting from register for Output Queue Packet * Address */ octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF); @@ -367,7 +357,8 @@ void lio_cn6xxx_enable_io_queues(struct octeon_device *oct) void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) { - u32 mask, i, loop = HZ; + int i; + u32 mask, loop = HZ; u32 d32; /* Reset the Enable bits for Input Queues. */ @@ -376,7 +367,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask); /* Wait until hardware indicates that the queues are out of reset. */ - mask = oct->io_qmask.iq; + mask = (u32)oct->io_qmask.iq; d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); while (((d32 & mask) != mask) && loop--) { d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); @@ -384,8 +375,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) } /* Reset the doorbell register for each Input queue. */ - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { - if (!(oct->io_qmask.iq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & (1ULL << i))) continue; octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF); d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i)); @@ -398,7 +389,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) /* Wait until hardware indicates that the queues are out of reset. */ loop = HZ; - mask = oct->io_qmask.oq; + mask = (u32)oct->io_qmask.oq; d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); while (((d32 & mask) != mask) && loop--) { d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); @@ -408,8 +399,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) /* Reset the doorbell register for each Output queue. */ /* for (i = 0; i < oct->num_oqs; i++) { */ - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct->io_qmask.oq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & (1ULL << i))) continue; octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF); d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i)); @@ -429,16 +420,16 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) void lio_cn6xxx_reinit_regs(struct octeon_device *oct) { - u32 i; + int i; - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { - if (!(oct->io_qmask.iq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & (1ULL << i))) continue; oct->fn_list.setup_iq_regs(oct, i); } - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct->io_qmask.oq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & (1ULL << i))) continue; oct->fn_list.setup_oq_regs(oct, i); } @@ -450,8 +441,8 @@ void lio_cn6xxx_reinit_regs(struct octeon_device *oct) oct->fn_list.enable_io_queues(oct); /* for (i = 0; i < oct->num_oqs; i++) { */ - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct->io_qmask.oq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & (1ULL << i))) continue; writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg); } @@ -495,8 +486,7 @@ u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx) } u32 -lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)), - struct octeon_instr_queue *iq) +lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq) { u32 new_idx = readl(iq->inst_cnt_reg); @@ -547,17 +537,18 @@ static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct) dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port); } -void +static void lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64) { dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n", CVM_CAST64(intr64)); } -int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) +static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) { struct octeon_droq *droq; - u32 oq_no, pkt_count, droq_time_mask, droq_mask, droq_int_enb; + int oq_no; + u32 pkt_count, droq_time_mask, droq_mask, droq_int_enb; u32 droq_cnt_enb, droq_cnt_mask; droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB); @@ -573,12 +564,12 @@ int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) oct->droq_intr = 0; /* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */ - for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) { - if (!(droq_mask & (1 << oq_no))) + for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) { + if (!(droq_mask & (1ULL << oq_no))) continue; droq = oct->droq[oq_no]; - pkt_count = octeon_droq_check_hw_for_pkts(oct, droq); + pkt_count = octeon_droq_check_hw_for_pkts(droq); if (pkt_count) { oct->droq_intr |= (1ULL << oq_no); if (droq->ops.poll_mode) { diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h index f77918779..28c472242 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h @@ -82,8 +82,6 @@ void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no); void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no); void lio_cn6xxx_enable_io_queues(struct octeon_device *oct); void lio_cn6xxx_disable_io_queues(struct octeon_device *oct); -void lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64); -int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct); irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev); void lio_cn6xxx_reinit_regs(struct octeon_device *oct); void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr, @@ -91,8 +89,7 @@ void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr, void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask); u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx); u32 -lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)), - struct octeon_instr_queue *iq); +lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq); void lio_cn6xxx_enable_interrupt(void *chip); void lio_cn6xxx_disable_interrupt(void *chip); void cn6xxx_get_pcie_qlmport(struct octeon_device *oct); diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c index 8e830d0c0..29755bc68 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c @@ -19,28 +19,17 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include -#include -#include -#include #include -#include #include -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" #include "response_manager.h" #include "octeon_device.h" -#include "octeon_nic.h" #include "octeon_main.h" -#include "octeon_network.h" #include "cn66xx_regs.h" #include "cn66xx_device.h" #include "cn68xx_regs.h" -#include "cn68xx_device.h" -#include "liquidio_image.h" -#include "octeon_mem_ops.h" static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct) { @@ -129,7 +118,7 @@ static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct) pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val); } -int lio_is_210nv(struct octeon_device *oct) +static int lio_is_210nv(struct octeon_device *oct) { u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG); diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h index d4e1c9fb0..ea7bdcce6 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h @@ -28,6 +28,5 @@ #define __CN68XX_DEVICE_H__ int lio_setup_cn68xx_octeon_device(struct octeon_device *oct); -int lio_is_210nv(struct octeon_device *oct); #endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h index 38cddbd10..d45a0f4aa 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h +++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h @@ -29,7 +29,6 @@ #ifndef __CN68XX_REGS_H__ #define __CN68XX_REGS_H__ -#include "cn66xx_regs.h" /*###################### REQUEST QUEUE #########################*/ diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 245c063ed..289eb8907 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -19,13 +19,9 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include #include #include -#include -#include #include -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" @@ -36,9 +32,8 @@ #include "octeon_network.h" #include "cn66xx_regs.h" #include "cn66xx_device.h" -#include "cn68xx_regs.h" -#include "cn68xx_device.h" -#include "liquidio_image.h" + +static int octnet_get_link_stats(struct net_device *netdev); struct oct_mdio_cmd_context { int octeon_id; @@ -71,34 +66,126 @@ enum { INTERFACE_MODE_RXAUI, INTERFACE_MODE_QSGMII, INTERFACE_MODE_AGL, + INTERFACE_MODE_XLAUI, + INTERFACE_MODE_XFI, + INTERFACE_MODE_10G_KR, + INTERFACE_MODE_40G_KR4, + INTERFACE_MODE_MIXED, }; #define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0])) #define OCT_ETHTOOL_REGDUMP_LEN 4096 #define OCT_ETHTOOL_REGSVER 1 +/* statistics of PF */ +static const char oct_stats_strings[][ETH_GSTRING_LEN] = { + "rx_packets", + "tx_packets", + "rx_bytes", + "tx_bytes", + "rx_errors", /*jabber_err+l2_err+frame_err */ + "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */ + "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd + *+st->fromwire.dmac_drop + st->fromwire.fw_err_drop + */ + "tx_dropped", + + "tx_total_sent", + "tx_total_fwd", + "tx_err_pko", + "tx_err_link", + "tx_err_drop", + + "tx_tso", + "tx_tso_packets", + "tx_tso_err", + "tx_vxlan", + + "mac_tx_total_pkts", + "mac_tx_total_bytes", + "mac_tx_mcast_pkts", + "mac_tx_bcast_pkts", + "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */ + "mac_tx_total_collisions", + "mac_tx_one_collision", + "mac_tx_multi_collison", + "mac_tx_max_collision_fail", + "mac_tx_max_deferal_fail", + "mac_tx_fifo_err", + "mac_tx_runts", + + "rx_total_rcvd", + "rx_total_fwd", + "rx_jabber_err", + "rx_l2_err", + "rx_frame_err", + "rx_err_pko", + "rx_err_link", + "rx_err_drop", + + "rx_vxlan", + "rx_vxlan_err", + + "rx_lro_pkts", + "rx_lro_bytes", + "rx_total_lro", + + "rx_lro_aborts", + "rx_lro_aborts_port", + "rx_lro_aborts_seq", + "rx_lro_aborts_tsval", + "rx_lro_aborts_timer", + "rx_fwd_rate", + + "mac_rx_total_rcvd", + "mac_rx_bytes", + "mac_rx_total_bcst", + "mac_rx_total_mcst", + "mac_rx_runts", + "mac_rx_ctl_packets", + "mac_rx_fifo_err", + "mac_rx_dma_drop", + "mac_rx_fcs_err", + + "link_state_changes", +}; + +/* statistics of host tx queue */ static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { - "Instr posted", - "Instr processed", - "Instr dropped", - "Bytes Sent", - "Sgentry_sent", - "Inst cntreg", - "Tx done", - "Tx Iq busy", - "Tx dropped", - "Tx bytes", + "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/ + "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/ + "dropped", + "iq_busy", + "sgentry_sent", + + "fw_instr_posted", + "fw_instr_processed", + "fw_instr_dropped", + "fw_bytes_sent", + + "tso", + "vxlan", + "txq_restart", }; +/* statistics of host rx queue */ static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = { - "OQ Pkts Received", - "OQ Bytes Received", - "Dropped no dispatch", - "Dropped nomem", - "Dropped toomany", - "Stack RX cnt", - "Stack RX Bytes", - "RX dropped", + "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */ + "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */ + "dropped", /*oct->droq[oq_no]->stats.rx_dropped+ + *oct->droq[oq_no]->stats.dropped_nodispatch+ + *oct->droq[oq_no]->stats.dropped_toomany+ + *oct->droq[oq_no]->stats.dropped_nomem + */ + "dropped_nomem", + "dropped_toomany", + "fw_dropped", + "fw_pkts_received", + "fw_bytes_received", + "fw_dropped_nodispatch", + + "vxlan", + "buffer_alloc_failure", }; #define OCTNIC_NCMD_AUTONEG_ON 0x1 @@ -112,8 +199,9 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) linfo = &lio->linfo; - if (linfo->link.s.interface == INTERFACE_MODE_XAUI || - linfo->link.s.interface == INTERFACE_MODE_RXAUI) { + if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || + linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || + linfo->link.s.if_mode == INTERFACE_MODE_XFI) { ecmd->port = PORT_FIBRE; ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE | @@ -124,10 +212,11 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->autoneg = AUTONEG_DISABLE; } else { - dev_err(&oct->pci_dev->dev, "Unknown link interface reported\n"); + dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n", + linfo->link.s.if_mode); } - if (linfo->link.s.status) { + if (linfo->link.s.link_up) { ethtool_cmd_speed_set(ecmd, linfo->link.s.speed); ecmd->duplex = linfo->link.s.duplex; } else { @@ -222,23 +311,20 @@ static int octnet_gpio_access(struct net_device *netdev, int addr, int val) struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; int ret = 0; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = addr; - nctrl.ncmd.s.param3 = val; + nctrl.ncmd.s.param1 = addr; + nctrl.ncmd.s.param2 = val; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nparams.resp_order = OCTEON_RESP_ORDERED; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); return -EINVAL; @@ -253,20 +339,18 @@ static void octnet_mdio_resp_callback(struct octeon_device *oct, u32 status, void *buf) { - struct oct_mdio_cmd_resp *mdio_cmd_rsp; struct oct_mdio_cmd_context *mdio_cmd_ctx; struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; oct = lio_get_device(mdio_cmd_ctx->octeon_id); if (status) { dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n", CVM_CAST64(status)); - ACCESS_ONCE(mdio_cmd_ctx->cond) = -1; + WRITE_ONCE(mdio_cmd_ctx->cond, -1); } else { - ACCESS_ONCE(mdio_cmd_ctx->cond) = 1; + WRITE_ONCE(mdio_cmd_ctx->cond, 1); } wake_up_interruptible(&mdio_cmd_ctx->wc); } @@ -297,15 +381,16 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr; - ACCESS_ONCE(mdio_cmd_ctx->cond) = 0; + WRITE_ONCE(mdio_cmd_ctx->cond, 0); mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev); mdio_cmd->op = op; mdio_cmd->mdio_addr = loc; if (op) mdio_cmd->value1 = *value; - mdio_cmd->value2 = lio->linfo.ifidx; octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8); + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45, 0, 0, 0); @@ -317,7 +402,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) retval = octeon_send_soft_command(oct_dev, sc); - if (retval) { + if (retval == IQ_SEND_FAILED) { dev_err(&oct_dev->pci_dev->dev, "octnet_mdio45_access instruction failed status: %x\n", retval); @@ -335,7 +420,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), sizeof(struct oct_mdio_cmd) / 8); - if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) { + if (READ_ONCE(mdio_cmd_ctx->cond) == 1) { if (!op) *value = mdio_cmd_rsp->resp.value1; } else { @@ -379,18 +464,16 @@ static int lio_set_phys_id(struct net_device *netdev, /* Configure Beacon values */ value = LIO68XX_LED_BEACON_CFGON; - ret = - octnet_mdio45_access(lio, 1, - LIO68XX_LED_BEACON_ADDR, - &value); + ret = octnet_mdio45_access(lio, 1, + LIO68XX_LED_BEACON_ADDR, + &value); if (ret) return ret; value = LIO68XX_LED_CTRL_CFGON; - ret = - octnet_mdio45_access(lio, 1, - LIO68XX_LED_CTRL_ADDR, - &value); + ret = octnet_mdio45_access(lio, 1, + LIO68XX_LED_CTRL_ADDR, + &value); if (ret) return ret; } else { @@ -469,7 +552,7 @@ lio_ethtool_get_ringparam(struct net_device *netdev, tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx); } - if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) { + if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) { ering->rx_pending = 0; ering->rx_max_pending = 0; ering->rx_mini_pending = 0; @@ -503,10 +586,10 @@ static void lio_set_msglevel(struct net_device *netdev, u32 msglvl) if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) { if (msglvl & NETIF_MSG_HW) liquidio_set_feature(netdev, - OCTNET_CMD_VERBOSE_ENABLE); + OCTNET_CMD_VERBOSE_ENABLE, 0); else liquidio_set_feature(netdev, - OCTNET_CMD_VERBOSE_DISABLE); + OCTNET_CMD_VERBOSE_DISABLE, 0); } lio->msg_enable = msglvl; @@ -518,61 +601,279 @@ lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) /* Notes: Not supporting any auto negotiation in these * drivers. Just report pause frame support. */ - pause->tx_pause = 1; - pause->rx_pause = 1; /* TODO: Need to support RX pause frame!!. */ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + pause->autoneg = 0; + + pause->tx_pause = oct->tx_pause; + pause->rx_pause = oct->rx_pause; } static void lio_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) + struct ethtool_stats *stats __attribute__((unused)), + u64 *data) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; + struct net_device_stats *netstats = &netdev->stats; int i = 0, j; - for (j = 0; j < MAX_OCTEON_INSTR_QUEUES; j++) { - if (!(oct_dev->io_qmask.iq & (1UL << j))) + netdev->netdev_ops->ndo_get_stats(netdev); + octnet_get_link_stats(netdev); + + /*sum of oct->droq[oq_no]->stats->rx_pkts_received */ + data[i++] = CVM_CAST64(netstats->rx_packets); + /*sum of oct->instr_queue[iq_no]->stats.tx_done */ + data[i++] = CVM_CAST64(netstats->tx_packets); + /*sum of oct->droq[oq_no]->stats->rx_bytes_received */ + data[i++] = CVM_CAST64(netstats->rx_bytes); + /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ + data[i++] = CVM_CAST64(netstats->tx_bytes); + data[i++] = CVM_CAST64(netstats->rx_errors); + data[i++] = CVM_CAST64(netstats->tx_errors); + /*sum of oct->droq[oq_no]->stats->rx_dropped + + *oct->droq[oq_no]->stats->dropped_nodispatch + + *oct->droq[oq_no]->stats->dropped_toomany + + *oct->droq[oq_no]->stats->dropped_nomem + */ + data[i++] = CVM_CAST64(netstats->rx_dropped); + /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */ + data[i++] = CVM_CAST64(netstats->tx_dropped); + + /*data[i++] = CVM_CAST64(stats->multicast); */ + /*data[i++] = CVM_CAST64(stats->collisions); */ + + /* firmware tx stats */ + /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx]. + *fromhost.fw_total_sent + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent); + /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd); + /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko); + /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link); + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. + *fw_err_drop + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop); + + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso); + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. + *fw_tso_fwd + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd); + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. + *fw_err_tso + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso); + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. + *fw_tx_vxlan + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan); + + /* mac tx statistics */ + /*CVMX_BGXX_CMRX_TX_STAT5 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent); + /*CVMX_BGXX_CMRX_TX_STAT4 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent); + /*CVMX_BGXX_CMRX_TX_STAT15 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent); + /*CVMX_BGXX_CMRX_TX_STAT14 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent); + /*CVMX_BGXX_CMRX_TX_STAT17 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent); + /*CVMX_BGXX_CMRX_TX_STAT0 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions); + /*CVMX_BGXX_CMRX_TX_STAT3 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent); + /*CVMX_BGXX_CMRX_TX_STAT2 */ + data[i++] = + CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent); + /*CVMX_BGXX_CMRX_TX_STAT0 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail); + /*CVMX_BGXX_CMRX_TX_STAT1 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail); + /*CVMX_BGXX_CMRX_TX_STAT16 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err); + /*CVMX_BGXX_CMRX_TX_STAT6 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts); + + /* RX firmware stats */ + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_total_rcvd + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_total_fwd + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd); + /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err); + /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err); + /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_err_pko + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko); + /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link); + /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. + *fromwire.fw_err_drop + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop); + + /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. + *fromwire.fw_rx_vxlan + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan); + /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. + *fromwire.fw_rx_vxlan_err + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err); + + /* LRO */ + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_pkts + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_octs + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs); + /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro); + /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_aborts_port + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_aborts_seq + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_aborts_tsval + */ + data[i++] = + CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_aborts_timer + */ + /* intrmod: packet forward rate */ + data[i++] = + CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer); + /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate); + + /* mac: link-level stats */ + /*CVMX_BGXX_CMRX_RX_STAT0 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd); + /*CVMX_BGXX_CMRX_RX_STAT1 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd); + /*CVMX_PKI_STATX_STAT5 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst); + /*CVMX_PKI_STATX_STAT5 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst); + /*wqe->word2.err_code or wqe->word2.err_level */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts); + /*CVMX_BGXX_CMRX_RX_STAT2 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd); + /*CVMX_BGXX_CMRX_RX_STAT6 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err); + /*CVMX_BGXX_CMRX_RX_STAT4 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop); + /*wqe->word2.err_code or wqe->word2.err_level */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err); + /*lio->link_changes*/ + data[i++] = CVM_CAST64(lio->link_changes); + + /* TX -- lio_update_stats(lio); */ + for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) { + if (!(oct_dev->io_qmask.iq & (1ULL << j))) continue; + /*packets to network port*/ + /*# of packets tx to network */ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); + /*# of bytes tx to network */ data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); - data[i++] = - CVM_CAST64( - oct_dev->instr_queue[j]->stats.instr_processed); + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); + /*# of packets dropped */ data[i++] = - CVM_CAST64( - oct_dev->instr_queue[j]->stats.instr_dropped); + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); + /*# of tx fails due to queue full */ data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); + /*XXX gather entries sent */ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent); + + /*instruction to firmware: data and control */ + /*# of instructions to the queue */ data[i++] = - readl(oct_dev->instr_queue[j]->inst_cnt_reg); - data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); - data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); + CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); + /*# of instructions processed */ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]-> + stats.instr_processed); + /*# of instructions could not be processed */ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]-> + stats.instr_dropped); + /*bytes sent through the queue */ data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); + CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); + + /*tso request*/ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); + /*vxlan request*/ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); + /*txq restart*/ data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart); } - /* for (j = 0; j < oct_dev->num_oqs; j++){ */ - for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES; j++) { - if (!(oct_dev->io_qmask.oq & (1UL << j))) + /* RX */ + /* for (j = 0; j < oct_dev->num_oqs; j++) { */ + for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) { + if (!(oct_dev->io_qmask.oq & (1ULL << j))) continue; - data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); - data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); - data[i++] = - CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); - data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); - data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); + + /*packets send to TCP/IP network stack */ + /*# of packets to network stack */ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received); + /*# of bytes to network stack */ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received); + /*# of packets dropped */ + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + + oct_dev->droq[j]->stats.dropped_toomany + + oct_dev->droq[j]->stats.rx_dropped); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); + + /*control and data path*/ + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); + + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); } } @@ -581,26 +882,43 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; int num_iq_stats, num_oq_stats, i, j; + int num_stats; - num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { - if (!(oct_dev->io_qmask.iq & (1UL << i))) - continue; - for (j = 0; j < num_iq_stats; j++) { - sprintf(data, "IQ%d %s", i, oct_iq_stats_strings[j]); + switch (stringset) { + case ETH_SS_STATS: + num_stats = ARRAY_SIZE(oct_stats_strings); + for (j = 0; j < num_stats; j++) { + sprintf(data, "%s", oct_stats_strings[j]); data += ETH_GSTRING_LEN; } - } - num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); - /* for (i = 0; i < oct_dev->num_oqs; i++) { */ - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct_dev->io_qmask.oq & (1UL << i))) - continue; - for (j = 0; j < num_oq_stats; j++) { - sprintf(data, "OQ%d %s", i, oct_droq_stats_strings[j]); - data += ETH_GSTRING_LEN; + num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { + if (!(oct_dev->io_qmask.iq & (1ULL << i))) + continue; + for (j = 0; j < num_iq_stats; j++) { + sprintf(data, "tx-%d-%s", i, + oct_iq_stats_strings[j]); + data += ETH_GSTRING_LEN; + } } + + num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); + /* for (i = 0; i < oct_dev->num_oqs; i++) { */ + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { + if (!(oct_dev->io_qmask.oq & (1ULL << i))) + continue; + for (j = 0; j < num_oq_stats; j++) { + sprintf(data, "rx-%d-%s", i, + oct_droq_stats_strings[j]); + data += ETH_GSTRING_LEN; + } + } + break; + + default: + netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); + break; } } @@ -609,8 +927,14 @@ static int lio_get_sset_count(struct net_device *netdev, int sset) struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; - return (ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs) + - (ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); + switch (sset) { + case ETH_SS_STATS: + return (ARRAY_SIZE(oct_stats_strings) + + ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + + ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); + default: + return -EOPNOTSUPP; + } } static int lio_get_intr_coalesce(struct net_device *netdev, @@ -618,50 +942,49 @@ static int lio_get_intr_coalesce(struct net_device *netdev, { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; - struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; struct octeon_instr_queue *iq; struct oct_intrmod_cfg *intrmod_cfg; intrmod_cfg = &oct->intrmod; switch (oct->chip_id) { - /* case OCTEON_CN73XX: Todo */ - /* break; */ case OCTEON_CN68XX: - case OCTEON_CN66XX: - if (!intrmod_cfg->intrmod_enable) { + case OCTEON_CN66XX: { + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; + + if (!intrmod_cfg->rx_enable) { intr_coal->rx_coalesce_usecs = CFG_GET_OQ_INTR_TIME(cn6xxx->conf); intr_coal->rx_max_coalesced_frames = CFG_GET_OQ_INTR_PKT(cn6xxx->conf); - } else { - intr_coal->use_adaptive_rx_coalesce = - intrmod_cfg->intrmod_enable; - intr_coal->rate_sample_interval = - intrmod_cfg->intrmod_check_intrvl; - intr_coal->pkt_rate_high = - intrmod_cfg->intrmod_maxpkt_ratethr; - intr_coal->pkt_rate_low = - intrmod_cfg->intrmod_minpkt_ratethr; - intr_coal->rx_max_coalesced_frames_high = - intrmod_cfg->intrmod_maxcnt_trigger; - intr_coal->rx_coalesce_usecs_high = - intrmod_cfg->intrmod_maxtmr_trigger; - intr_coal->rx_coalesce_usecs_low = - intrmod_cfg->intrmod_mintmr_trigger; - intr_coal->rx_max_coalesced_frames_low = - intrmod_cfg->intrmod_mincnt_trigger; } - - iq = oct->instr_queue[lio->linfo.txpciq[0]]; + iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no]; intr_coal->tx_max_coalesced_frames = iq->fill_threshold; break; - + } default: netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); return -EINVAL; } - + if (intrmod_cfg->rx_enable) { + intr_coal->use_adaptive_rx_coalesce = + intrmod_cfg->rx_enable; + intr_coal->rate_sample_interval = + intrmod_cfg->check_intrvl; + intr_coal->pkt_rate_high = + intrmod_cfg->maxpkt_ratethr; + intr_coal->pkt_rate_low = + intrmod_cfg->minpkt_ratethr; + intr_coal->rx_max_coalesced_frames_high = + intrmod_cfg->rx_maxcnt_trigger; + intr_coal->rx_coalesce_usecs_high = + intrmod_cfg->rx_maxtmr_trigger; + intr_coal->rx_coalesce_usecs_low = + intrmod_cfg->rx_mintmr_trigger; + intr_coal->rx_max_coalesced_frames_low = + intrmod_cfg->rx_mincnt_trigger; + } return 0; } @@ -681,19 +1004,20 @@ static void octnet_intrmod_callback(struct octeon_device *oct_dev, else dev_info(&oct_dev->pci_dev->dev, "Rx-Adaptive Interrupt moderation enabled:%llx\n", - oct_dev->intrmod.intrmod_enable); + oct_dev->intrmod.rx_enable); octeon_free_soft_command(oct_dev, sc); } /* Configure interrupt moderation parameters */ -static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg) +static int octnet_set_intrmod_cfg(struct lio *lio, + struct oct_intrmod_cfg *intr_cfg) { struct octeon_soft_command *sc; struct oct_intrmod_cmd *cmd; struct oct_intrmod_cfg *cfg; int retval; - struct octeon_device *oct_dev = (struct octeon_device *)oct; + struct octeon_device *oct_dev = lio->oct_dev; /* Alloc soft command */ sc = (struct octeon_soft_command *) @@ -714,6 +1038,8 @@ static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg) cmd->cfg = cfg; cmd->oct_dev = oct_dev; + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); @@ -722,17 +1048,171 @@ static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg) sc->wait_time = 1000; retval = octeon_send_soft_command(oct_dev, sc); - if (retval) { + if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct_dev, sc); + return -EINVAL; + } + + return 0; +} + +static void +octnet_nic_stats_callback(struct octeon_device *oct_dev, + u32 status, void *ptr) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; + struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *) + sc->virtrptr; + struct oct_nic_stats_ctrl *ctrl = (struct oct_nic_stats_ctrl *) + sc->ctxptr; + struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire; + struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost; + + struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire; + struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost; + + if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) { + octeon_swap_8B_data((u64 *)&resp->stats, + (sizeof(struct oct_link_stats)) >> 3); + + /* RX link-level stats */ + rstats->total_rcvd = rsp_rstats->total_rcvd; + rstats->bytes_rcvd = rsp_rstats->bytes_rcvd; + rstats->total_bcst = rsp_rstats->total_bcst; + rstats->total_mcst = rsp_rstats->total_mcst; + rstats->runts = rsp_rstats->runts; + rstats->ctl_rcvd = rsp_rstats->ctl_rcvd; + /* Accounts for over/under-run of buffers */ + rstats->fifo_err = rsp_rstats->fifo_err; + rstats->dmac_drop = rsp_rstats->dmac_drop; + rstats->fcs_err = rsp_rstats->fcs_err; + rstats->jabber_err = rsp_rstats->jabber_err; + rstats->l2_err = rsp_rstats->l2_err; + rstats->frame_err = rsp_rstats->frame_err; + + /* RX firmware stats */ + rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd; + rstats->fw_total_fwd = rsp_rstats->fw_total_fwd; + rstats->fw_err_pko = rsp_rstats->fw_err_pko; + rstats->fw_err_link = rsp_rstats->fw_err_link; + rstats->fw_err_drop = rsp_rstats->fw_err_drop; + rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan; + rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err; + + /* Number of packets that are LROed */ + rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts; + /* Number of octets that are LROed */ + rstats->fw_lro_octs = rsp_rstats->fw_lro_octs; + /* Number of LRO packets formed */ + rstats->fw_total_lro = rsp_rstats->fw_total_lro; + /* Number of times lRO of packet aborted */ + rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts; + rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port; + rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq; + rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval; + rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer; + /* intrmod: packet forward rate */ + rstats->fwd_rate = rsp_rstats->fwd_rate; + + /* TX link-level stats */ + tstats->total_pkts_sent = rsp_tstats->total_pkts_sent; + tstats->total_bytes_sent = rsp_tstats->total_bytes_sent; + tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent; + tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent; + tstats->ctl_sent = rsp_tstats->ctl_sent; + /* Packets sent after one collision*/ + tstats->one_collision_sent = rsp_tstats->one_collision_sent; + /* Packets sent after multiple collision*/ + tstats->multi_collision_sent = rsp_tstats->multi_collision_sent; + /* Packets not sent due to max collisions */ + tstats->max_collision_fail = rsp_tstats->max_collision_fail; + /* Packets not sent due to max deferrals */ + tstats->max_deferral_fail = rsp_tstats->max_deferral_fail; + /* Accounts for over/under-run of buffers */ + tstats->fifo_err = rsp_tstats->fifo_err; + tstats->runts = rsp_tstats->runts; + /* Total number of collisions detected */ + tstats->total_collisions = rsp_tstats->total_collisions; + + /* firmware stats */ + tstats->fw_total_sent = rsp_tstats->fw_total_sent; + tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; + tstats->fw_err_pko = rsp_tstats->fw_err_pko; + tstats->fw_err_link = rsp_tstats->fw_err_link; + tstats->fw_err_drop = rsp_tstats->fw_err_drop; + tstats->fw_tso = rsp_tstats->fw_tso; + tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd; + tstats->fw_err_tso = rsp_tstats->fw_err_tso; + tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan; + + resp->status = 1; + } else { + resp->status = -1; + } + complete(&ctrl->complete); +} + +/* Configure interrupt moderation parameters */ +static int octnet_get_link_stats(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + + struct octeon_soft_command *sc; + struct oct_nic_stats_ctrl *ctrl; + struct oct_nic_stats_resp *resp; + + int retval; + + /* Alloc soft command */ + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct_dev, + 0, + sizeof(struct oct_nic_stats_resp), + sizeof(struct octnic_ctrl_pkt)); + + if (!sc) + return -ENOMEM; + + resp = (struct oct_nic_stats_resp *)sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_stats_resp)); + + ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr; + memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl)); + ctrl->netdev = netdev; + init_completion(&ctrl->complete); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, + OPCODE_NIC_PORT_STATS, 0, 0, 0); + + sc->callback = octnet_nic_stats_callback; + sc->callback_arg = sc; + sc->wait_time = 500; /*in milli seconds*/ + + retval = octeon_send_soft_command(oct_dev, sc); + if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct_dev, sc); + return -EINVAL; + } + + wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000)); + + if (resp->status != 1) { octeon_free_soft_command(oct_dev, sc); + return -EINVAL; } + octeon_free_soft_command(oct_dev, sc); + return 0; } /* Enable/Disable auto interrupt Moderation */ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce - *intr_coal, int adaptive) + *intr_coal) { int ret = 0; struct octeon_device *oct = lio->oct_dev; @@ -740,59 +1220,73 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce intrmod_cfg = &oct->intrmod; - if (adaptive) { + if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) { if (intr_coal->rate_sample_interval) - intrmod_cfg->intrmod_check_intrvl = + intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval; else - intrmod_cfg->intrmod_check_intrvl = + intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; if (intr_coal->pkt_rate_high) - intrmod_cfg->intrmod_maxpkt_ratethr = + intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high; else - intrmod_cfg->intrmod_maxpkt_ratethr = + intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; if (intr_coal->pkt_rate_low) - intrmod_cfg->intrmod_minpkt_ratethr = + intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low; else - intrmod_cfg->intrmod_minpkt_ratethr = + intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; - + } + if (oct->intrmod.rx_enable) { if (intr_coal->rx_max_coalesced_frames_high) - intrmod_cfg->intrmod_maxcnt_trigger = + intrmod_cfg->rx_maxcnt_trigger = intr_coal->rx_max_coalesced_frames_high; else - intrmod_cfg->intrmod_maxcnt_trigger = - LIO_INTRMOD_MAXCNT_TRIGGER; + intrmod_cfg->rx_maxcnt_trigger = + LIO_INTRMOD_RXMAXCNT_TRIGGER; if (intr_coal->rx_coalesce_usecs_high) - intrmod_cfg->intrmod_maxtmr_trigger = + intrmod_cfg->rx_maxtmr_trigger = intr_coal->rx_coalesce_usecs_high; else - intrmod_cfg->intrmod_maxtmr_trigger = - LIO_INTRMOD_MAXTMR_TRIGGER; + intrmod_cfg->rx_maxtmr_trigger = + LIO_INTRMOD_RXMAXTMR_TRIGGER; if (intr_coal->rx_coalesce_usecs_low) - intrmod_cfg->intrmod_mintmr_trigger = + intrmod_cfg->rx_mintmr_trigger = intr_coal->rx_coalesce_usecs_low; else - intrmod_cfg->intrmod_mintmr_trigger = - LIO_INTRMOD_MINTMR_TRIGGER; + intrmod_cfg->rx_mintmr_trigger = + LIO_INTRMOD_RXMINTMR_TRIGGER; if (intr_coal->rx_max_coalesced_frames_low) - intrmod_cfg->intrmod_mincnt_trigger = + intrmod_cfg->rx_mincnt_trigger = intr_coal->rx_max_coalesced_frames_low; else - intrmod_cfg->intrmod_mincnt_trigger = - LIO_INTRMOD_MINCNT_TRIGGER; + intrmod_cfg->rx_mincnt_trigger = + LIO_INTRMOD_RXMINCNT_TRIGGER; + } + if (oct->intrmod.tx_enable) { + if (intr_coal->tx_max_coalesced_frames_high) + intrmod_cfg->tx_maxcnt_trigger = + intr_coal->tx_max_coalesced_frames_high; + else + intrmod_cfg->tx_maxcnt_trigger = + LIO_INTRMOD_TXMAXCNT_TRIGGER; + if (intr_coal->tx_max_coalesced_frames_low) + intrmod_cfg->tx_mincnt_trigger = + intr_coal->tx_max_coalesced_frames_low; + else + intrmod_cfg->tx_mincnt_trigger = + LIO_INTRMOD_TXMINCNT_TRIGGER; } - intrmod_cfg->intrmod_enable = adaptive; - ret = octnet_set_intrmod_cfg(oct, intrmod_cfg); + ret = octnet_set_intrmod_cfg(lio, intrmod_cfg); return ret; } @@ -800,54 +1294,82 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce static int oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal) { - int ret; struct octeon_device *oct = lio->oct_dev; - struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; u32 rx_max_coalesced_frames; - if (!intr_coal->rx_max_coalesced_frames) - rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT; - else - rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames; - - /* Disable adaptive interrupt modulation */ - ret = oct_cfg_adaptive_intr(lio, intr_coal, 0); - if (ret) - return ret; - /* Config Cnt based interrupt values */ - octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, - rx_max_coalesced_frames); - CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames); + switch (oct->chip_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: { + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; + + if (!intr_coal->rx_max_coalesced_frames) + rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT; + else + rx_max_coalesced_frames = + intr_coal->rx_max_coalesced_frames; + octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, + rx_max_coalesced_frames); + CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames); + break; + } + default: + return -EINVAL; + } return 0; } static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce *intr_coal) { - int ret; struct octeon_device *oct = lio->oct_dev; - struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; u32 time_threshold, rx_coalesce_usecs; - if (!intr_coal->rx_coalesce_usecs) - rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME; - else - rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; + /* Config Time based interrupt values */ + switch (oct->chip_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: { + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; + if (!intr_coal->rx_coalesce_usecs) + rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME; + else + rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; - /* Disable adaptive interrupt modulation */ - ret = oct_cfg_adaptive_intr(lio, intr_coal, 0); - if (ret) - return ret; + time_threshold = lio_cn6xxx_get_oq_ticks(oct, + rx_coalesce_usecs); + octeon_write_csr(oct, + CN6XXX_SLI_OQ_INT_LEVEL_TIME, + time_threshold); - /* Config Time based interrupt values */ - time_threshold = lio_cn6xxx_get_oq_ticks(oct, rx_coalesce_usecs); - octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold); - CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs); + CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs); + break; + } + default: + return -EINVAL; + } return 0; } +static int +oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal + __attribute__((unused))) +{ + struct octeon_device *oct = lio->oct_dev; + + /* Config Cnt based interrupt values */ + switch (oct->chip_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: + break; + default: + return -EINVAL; + } + return 0; +} + static int lio_set_intr_coalesce(struct net_device *netdev, struct ethtool_coalesce *intr_coal) { @@ -855,59 +1377,48 @@ static int lio_set_intr_coalesce(struct net_device *netdev, int ret; struct octeon_device *oct = lio->oct_dev; u32 j, q_no; + int db_max, db_min; - if ((intr_coal->tx_max_coalesced_frames >= CN6XXX_DB_MIN) && - (intr_coal->tx_max_coalesced_frames <= CN6XXX_DB_MAX)) { - for (j = 0; j < lio->linfo.num_txpciq; j++) { - q_no = lio->linfo.txpciq[j]; - oct->instr_queue[q_no]->fill_threshold = - intr_coal->tx_max_coalesced_frames; + switch (oct->chip_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: + db_min = CN6XXX_DB_MIN; + db_max = CN6XXX_DB_MAX; + if ((intr_coal->tx_max_coalesced_frames >= db_min) && + (intr_coal->tx_max_coalesced_frames <= db_max)) { + for (j = 0; j < lio->linfo.num_txpciq; j++) { + q_no = lio->linfo.txpciq[j].s.q_no; + oct->instr_queue[q_no]->fill_threshold = + intr_coal->tx_max_coalesced_frames; + } + } else { + dev_err(&oct->pci_dev->dev, + "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n", + intr_coal->tx_max_coalesced_frames, db_min, + db_max); + return -EINVAL; } - } else { - dev_err(&oct->pci_dev->dev, - "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n", - intr_coal->tx_max_coalesced_frames, CN6XXX_DB_MIN, - CN6XXX_DB_MAX); + break; + default: return -EINVAL; } - /* User requested adaptive-rx on */ - if (intr_coal->use_adaptive_rx_coalesce) { - ret = oct_cfg_adaptive_intr(lio, intr_coal, 1); - if (ret) - goto ret_intrmod; - } + oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0; + oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0; - /* User requested adaptive-rx off and rx coalesce */ - if ((intr_coal->rx_coalesce_usecs) && - (!intr_coal->use_adaptive_rx_coalesce)) { + ret = oct_cfg_adaptive_intr(lio, intr_coal); + + if (!intr_coal->use_adaptive_rx_coalesce) { ret = oct_cfg_rx_intrtime(lio, intr_coal); if (ret) goto ret_intrmod; - } - /* User requested adaptive-rx off and rx coalesce */ - if ((intr_coal->rx_max_coalesced_frames) && - (!intr_coal->use_adaptive_rx_coalesce)) { ret = oct_cfg_rx_intrcnt(lio, intr_coal); if (ret) goto ret_intrmod; } - - /* User requested adaptive-rx off, so use default coalesce params */ - if ((!intr_coal->rx_max_coalesced_frames) && - (!intr_coal->use_adaptive_rx_coalesce) && - (!intr_coal->rx_coalesce_usecs)) { - dev_info(&oct->pci_dev->dev, - "Turning off adaptive-rx interrupt moderation\n"); - dev_info(&oct->pci_dev->dev, - "Using RX Coalesce Default values rx_coalesce_usecs:%d rx_max_coalesced_frames:%d\n", - CN6XXX_OQ_INTR_TIME, CN6XXX_OQ_INTR_PKT); - ret = oct_cfg_rx_intrtime(lio, intr_coal); - if (ret) - goto ret_intrmod; - - ret = oct_cfg_rx_intrcnt(lio, intr_coal); + if (!intr_coal->use_adaptive_tx_coalesce) { + ret = oct_cfg_tx_intrcnt(lio, intr_coal); if (ret) goto ret_intrmod; } @@ -923,23 +1434,28 @@ static int lio_get_ts_info(struct net_device *netdev, struct lio *lio = GET_LIO(netdev); info->so_timestamping = +#ifdef PTP_HARDWARE_TIMESTAMPING SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | +#endif SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; + SOF_TIMESTAMPING_SOFTWARE; if (lio->ptp_clock) info->phc_index = ptp_clock_index(lio->ptp_clock); else info->phc_index = -1; +#ifdef PTP_HARDWARE_TIMESTAMPING info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); +#endif return 0; } @@ -950,7 +1466,6 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) struct octeon_device *oct = lio->oct_dev; struct oct_link_info *linfo; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; int ret = 0; /* get the link info */ @@ -965,12 +1480,14 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->duplex != DUPLEX_FULL))) return -EINVAL; - /* Ethtool Support is not provided for XAUI and RXAUI Interfaces + /* Ethtool Support is not provided for XAUI, RXAUI, and XFI Interfaces * as they operate at fixed Speed and Duplex settings */ - if (linfo->link.s.interface == INTERFACE_MODE_XAUI || - linfo->link.s.interface == INTERFACE_MODE_RXAUI) { - dev_info(&oct->pci_dev->dev, "XAUI IFs settings cannot be modified.\n"); + if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || + linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || + linfo->link.s.if_mode == INTERFACE_MODE_XFI) { + dev_info(&oct->pci_dev->dev, + "Autonegotiation, duplex and speed settings cannot be modified.\n"); return -EINVAL; } @@ -978,9 +1495,9 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 1000; nctrl.netpndev = (u64)netdev; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex @@ -990,19 +1507,17 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) /* Autoneg ON */ nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON | OCTNIC_NCMD_AUTONEG_ON; - nctrl.ncmd.s.param2 = ecmd->advertising; + nctrl.ncmd.s.param1 = ecmd->advertising; } else { /* Autoneg OFF */ nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON; - nctrl.ncmd.s.param3 = ecmd->duplex; + nctrl.ncmd.s.param2 = ecmd->duplex; - nctrl.ncmd.s.param2 = ecmd->speed; + nctrl.ncmd.s.param1 = ecmd->speed; } - nparams.resp_order = OCTEON_RESP_ORDERED; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "Failed to set settings\n"); return -1; @@ -1026,7 +1541,7 @@ static int lio_nway_reset(struct net_device *netdev) } /* Return register dump len. */ -static int lio_get_regs_len(struct net_device *dev) +static int lio_get_regs_len(struct net_device *dev __attribute__((unused))) { return OCT_ETHTOOL_REGDUMP_LEN; } @@ -1170,13 +1685,12 @@ static void lio_get_regs(struct net_device *dev, int len = 0; struct octeon_device *oct = lio->oct_dev; - memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN); regs->version = OCT_ETHTOOL_REGSVER; switch (oct->chip_id) { - /* case OCTEON_CN73XX: Todo */ case OCTEON_CN68XX: case OCTEON_CN66XX: + memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN); len += cn6xxx_read_csr_reg(regbuf + len, oct); len += cn6xxx_read_config_reg(regbuf + len, oct); break; @@ -1186,6 +1700,23 @@ static void lio_get_regs(struct net_device *dev, } } +static u32 lio_get_priv_flags(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + return lio->oct_dev->priv_flags; +} + +static int lio_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct lio *lio = GET_LIO(netdev); + bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES)); + + lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES, + intr_by_tx_bytes); + return 0; +} + static const struct ethtool_ops lio_ethtool_ops = { .get_settings = lio_get_settings, .get_link = ethtool_op_get_link, @@ -1207,6 +1738,8 @@ static const struct ethtool_ops lio_ethtool_ops = { .set_settings = lio_set_settings, .get_coalesce = lio_get_intr_coalesce, .set_coalesce = lio_set_intr_coalesce, + .get_priv_flags = lio_get_priv_flags, + .set_priv_flags = lio_set_priv_flags, .get_ts_info = lio_get_ts_info, }; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 7708d6b14..f00b44bf9 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -20,24 +20,12 @@ * Contact Cavium, Inc. for more information **********************************************************************/ #include -#include -#include -#include #include -#include -#include -#include -#include #include #include #include -#include #include -#include -#include -#include -#include -#include "octeon_config.h" +#include #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" @@ -48,7 +36,6 @@ #include "octeon_network.h" #include "cn66xx_regs.h" #include "cn66xx_device.h" -#include "cn68xx_regs.h" #include "cn68xx_device.h" #include "liquidio_image.h" @@ -70,6 +57,9 @@ MODULE_PARM_DESC(console_bitmask, #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) +#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \ + (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count) + static int debug = -1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); @@ -82,6 +72,8 @@ static int conf_type; module_param(conf_type, int, 0); MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs"); +static int ptp_enable = 1; + /* Bit mask values for lio->ifstate */ #define LIO_IFSTATE_DROQ_OPS 0x01 #define LIO_IFSTATE_REGISTERED 0x02 @@ -164,6 +156,8 @@ struct octnic_gather { * received from the IP layer. */ struct octeon_sg_entry *sg; + + u64 sg_dma_ptr; }; /** This structure is used by NIC driver to store information required @@ -218,8 +212,8 @@ static void octeon_droq_bh(unsigned long pdev) (struct octeon_device_priv *)oct->priv; /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */ - for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES; q_no++) { - if (!(oct->io_qmask.oq & (1UL << q_no))) + for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { + if (!(oct->io_qmask.oq & (1ULL << q_no))) continue; reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], MAX_PACKET_BUDGET); @@ -239,11 +233,10 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct) do { pending_pkts = 0; - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct->io_qmask.oq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & (1ULL << i))) continue; - pkt_cnt += octeon_droq_check_hw_for_pkts(oct, - oct->droq[i]); + pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); } if (pkt_cnt > 0) { pending_pkts += pkt_cnt; @@ -359,7 +352,7 @@ static int wait_for_pending_requests(struct octeon_device *oct) [OCTEON_ORDERED_SC_LIST].pending_req_count); if (pcount) schedule_timeout_uninterruptible(HZ / 10); - else + else break; } @@ -390,10 +383,10 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct) dev_err(&oct->pci_dev->dev, "There were pending requests\n"); /* Force all requests waiting to be fetched by OCTEON to complete. */ - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { struct octeon_instr_queue *iq; - if (!(oct->io_qmask.iq & (1UL << i))) + if (!(oct->io_qmask.iq & (1ULL << i))) continue; iq = oct->instr_queue[i]; @@ -403,7 +396,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct) iq->octeon_read_index = iq->host_write_index; iq->stats.instr_processed += atomic_read(&iq->instr_pending); - lio_process_iq_request_list(oct, iq); + lio_process_iq_request_list(oct, iq, 0); spin_unlock_bh(&iq->lock); } } @@ -498,7 +491,8 @@ static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, * \brief mmio handler * @param pdev Pointer to PCI device */ -static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev) +static pci_ers_result_t liquidio_pcie_mmio_enabled( + struct pci_dev *pdev __attribute__((unused))) { /* We should never hit this since we never ask for a reset for a Fatal * Error. We always return DISCONNECT in io_error above. @@ -514,7 +508,8 @@ static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev) * Restart the card from scratch, as if from a cold-boot. Implementation * resembles the first-half of the octeon_resume routine. */ -static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev) +static pci_ers_result_t liquidio_pcie_slot_reset( + struct pci_dev *pdev __attribute__((unused))) { /* We should never hit this since we never ask for a reset for a Fatal * Error. We always return DISCONNECT in io_error above. @@ -531,7 +526,7 @@ static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev) * its OK to resume normal operation. Implementation resembles the * second-half of the octeon_resume routine. */ -static void liquidio_pcie_resume(struct pci_dev *pdev) +static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused))) { /* Nothing to be done here. */ } @@ -542,7 +537,8 @@ static void liquidio_pcie_resume(struct pci_dev *pdev) * @param pdev Pointer to PCI device * @param state state to suspend to */ -static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state) +static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)), + pm_message_t state __attribute__((unused))) { return 0; } @@ -551,7 +547,7 @@ static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state) * \brief called when resuming * @param pdev Pointer to PCI device */ -static int liquidio_resume(struct pci_dev *pdev) +static int liquidio_resume(struct pci_dev *pdev __attribute__((unused))) { return 0; } @@ -676,12 +672,24 @@ static inline void txqs_start(struct net_device *netdev) */ static inline void txqs_wake(struct net_device *netdev) { + struct lio *lio = GET_LIO(netdev); + if (netif_is_multiqueue(netdev)) { int i; - for (i = 0; i < netdev->num_tx_queues; i++) - netif_wake_subqueue(netdev, i); + for (i = 0; i < netdev->num_tx_queues; i++) { + int qno = lio->linfo.txpciq[i % + (lio->linfo.num_txpciq)].s.q_no; + + if (__netif_subqueue_stopped(netdev, i)) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, + tx_restart, 1); + netif_wake_subqueue(netdev, i); + } + } } else { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, + tx_restart, 1); netif_wake_queue(netdev); } } @@ -703,7 +711,7 @@ static void start_txq(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); - if (lio->linfo.link.s.status) { + if (lio->linfo.link.s.link_up) { txqs_start(netdev); return; } @@ -750,16 +758,23 @@ static inline int check_txq_status(struct lio *lio) /* check each sub-queue state */ for (q = 0; q < numqs; q++) { - iq = lio->linfo.txpciq[q & (lio->linfo.num_txpciq - 1)]; + iq = lio->linfo.txpciq[q % + (lio->linfo.num_txpciq)].s.q_no; if (octnet_iq_is_full(lio->oct_dev, iq)) continue; - wake_q(lio->netdev, q); - ret_val++; + if (__netif_subqueue_stopped(lio->netdev, q)) { + wake_q(lio->netdev, q); + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, + tx_restart, 1); + ret_val++; + } } } else { if (octnet_iq_is_full(lio->oct_dev, lio->txq)) return 0; wake_q(lio->netdev, lio->txq); + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, + tx_restart, 1); ret_val = 1; } return ret_val; @@ -785,64 +800,116 @@ static inline struct list_head *list_delete_head(struct list_head *root) } /** - * \brief Delete gather list + * \brief Delete gather lists * @param lio per-network private data */ -static void delete_glist(struct lio *lio) +static void delete_glists(struct lio *lio) { struct octnic_gather *g; + int i; - do { - g = (struct octnic_gather *) - list_delete_head(&lio->glist); - if (g) { - if (g->sg) - kfree((void *)((unsigned long)g->sg - - g->adjust)); - kfree(g); - } - } while (g); + if (!lio->glist) + return; + + for (i = 0; i < lio->linfo.num_txpciq; i++) { + do { + g = (struct octnic_gather *) + list_delete_head(&lio->glist[i]); + if (g) { + if (g->sg) { + dma_unmap_single(&lio->oct_dev-> + pci_dev->dev, + g->sg_dma_ptr, + g->sg_size, + DMA_TO_DEVICE); + kfree((void *)((unsigned long)g->sg - + g->adjust)); + } + kfree(g); + } + } while (g); + } + + kfree((void *)lio->glist); } /** - * \brief Setup gather list + * \brief Setup gather lists * @param lio per-network private data */ -static int setup_glist(struct lio *lio) +static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) { - int i; + int i, j; struct octnic_gather *g; - INIT_LIST_HEAD(&lio->glist); + lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock), + GFP_KERNEL); + if (!lio->glist_lock) + return 1; - for (i = 0; i < lio->tx_qsize; i++) { - g = kzalloc(sizeof(*g), GFP_KERNEL); - if (!g) - break; + lio->glist = kcalloc(num_iqs, sizeof(*lio->glist), + GFP_KERNEL); + if (!lio->glist) { + kfree((void *)lio->glist_lock); + return 1; + } - g->sg_size = - ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); + for (i = 0; i < num_iqs; i++) { + int numa_node = cpu_to_node(i % num_online_cpus()); - g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL); - if (!g->sg) { - kfree(g); - break; + spin_lock_init(&lio->glist_lock[i]); + + INIT_LIST_HEAD(&lio->glist[i]); + + for (j = 0; j < lio->tx_qsize; j++) { + g = kzalloc_node(sizeof(*g), GFP_KERNEL, + numa_node); + if (!g) + g = kzalloc(sizeof(*g), GFP_KERNEL); + if (!g) + break; + + g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * + OCT_SG_ENTRY_SIZE); + + g->sg = kmalloc_node(g->sg_size + 8, + GFP_KERNEL, numa_node); + if (!g->sg) + g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL); + if (!g->sg) { + kfree(g); + break; + } + + /* The gather component should be aligned on 64-bit + * boundary + */ + if (((unsigned long)g->sg) & 7) { + g->adjust = 8 - (((unsigned long)g->sg) & 7); + g->sg = (struct octeon_sg_entry *) + ((unsigned long)g->sg + g->adjust); + } + g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev, + g->sg, g->sg_size, + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, + g->sg_dma_ptr)) { + kfree((void *)((unsigned long)g->sg - + g->adjust)); + kfree(g); + break; + } + + list_add_tail(&g->list, &lio->glist[i]); } - /* The gather component should be aligned on 64-bit boundary */ - if (((unsigned long)g->sg) & 7) { - g->adjust = 8 - (((unsigned long)g->sg) & 7); - g->sg = (struct octeon_sg_entry *) - ((unsigned long)g->sg + g->adjust); + if (j != lio->tx_qsize) { + delete_glists(lio); + return 1; } - list_add_tail(&g->list, &lio->glist); } - if (i == lio->tx_qsize) - return 0; - - delete_glist(lio); - return 1; + return 0; } /** @@ -856,7 +923,7 @@ static void print_link_info(struct net_device *netdev) if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) { struct oct_link_info *linfo = &lio->linfo; - if (linfo->link.s.status) { + if (linfo->link.s.link_up) { netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", linfo->link.s.speed, (linfo->link.s.duplex) ? "Full" : "Half"); @@ -878,13 +945,15 @@ static inline void update_link_status(struct net_device *netdev, union oct_link_status *ls) { struct lio *lio = GET_LIO(netdev); + int changed = (lio->linfo.link.u64 != ls->u64); - if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { - lio->linfo.link.u64 = ls->u64; + lio->linfo.link.u64 = ls->u64; + if ((lio->intf_open) && (changed)) { print_link_info(netdev); + lio->link_changes++; - if (lio->linfo.link.s.status) { + if (lio->linfo.link.s.link_up) { netif_carrier_on(netdev); /* start_txq(netdev); */ txqs_wake(netdev); @@ -895,6 +964,42 @@ static inline void update_link_status(struct net_device *netdev, } } +/* Runs in interrupt context. */ +static void update_txq_status(struct octeon_device *oct, int iq_num) +{ + struct net_device *netdev; + struct lio *lio; + struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; + + /*octeon_update_iq_read_idx(oct, iq);*/ + + netdev = oct->props[iq->ifidx].netdev; + + /* This is needed because the first IQ does not have + * a netdev associated with it. + */ + if (!netdev) + return; + + lio = GET_LIO(netdev); + if (netif_is_multiqueue(netdev)) { + if (__netif_subqueue_stopped(netdev, iq->q_index) && + lio->linfo.link.s.link_up && + (!octnet_iq_is_full(oct, iq_num))) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, + tx_restart, 1); + netif_wake_subqueue(netdev, iq->q_index); + } else { + if (!octnet_iq_is_full(oct, lio->txq)) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, + lio->txq, + tx_restart, 1); + wake_q(netdev, lio->txq); + } + } + } +} + /** * \brief Droq packet processor sceduler * @param oct octeon device @@ -908,8 +1013,9 @@ void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) struct octeon_droq *droq; if (oct->int_status & OCT_DEV_INTR_PKT_DATA) { - for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) { - if (!(oct->droq_intr & (1 << oq_no))) + for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); + oq_no++) { + if (!(oct->droq_intr & (1ULL << oq_no))) continue; droq = oct->droq[oq_no]; @@ -985,7 +1091,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct) * @param pdev PCI device structure * @param ent unused */ -static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static int +liquidio_probe(struct pci_dev *pdev, + const struct pci_device_id *ent __attribute__((unused))) { struct octeon_device *oct_dev = NULL; struct handshake *hs; @@ -1020,6 +1128,9 @@ static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENOMEM; } + oct_dev->rx_pause = 1; + oct_dev->tx_pause = 1; + dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); return 0; @@ -1085,19 +1196,13 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (oct->flags & LIO_FLAG_MSI_ENABLED) pci_disable_msi(oct->pci_dev); - /* Soft reset the octeon device before exiting */ - oct->fn_list.soft_reset(oct); - - /* Disable the device, releasing the PCI INT */ - pci_disable_device(oct->pci_dev); - /* fallthrough */ case OCT_DEV_IN_RESET: case OCT_DEV_DROQ_INIT_DONE: /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/ mdelay(100); - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct->io_qmask.oq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & (1ULL << i))) continue; octeon_delete_droq(oct, i); } @@ -1124,8 +1229,8 @@ static void octeon_destroy_resources(struct octeon_device *oct) /* fallthrough */ case OCT_DEV_INSTR_QUEUE_INIT_DONE: - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { - if (!(oct->io_qmask.iq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & (1ULL << i))) continue; octeon_delete_instr_queue(oct, i); } @@ -1137,14 +1242,21 @@ static void octeon_destroy_resources(struct octeon_device *oct) /* fallthrough */ case OCT_DEV_PCI_MAP_DONE: + + /* Soft reset the octeon device before exiting */ + oct->fn_list.soft_reset(oct); + octeon_unmap_pci_barx(oct, 0); octeon_unmap_pci_barx(oct, 1); /* fallthrough */ case OCT_DEV_BEGIN_STATE: + /* Disable the device, releasing the PCI INT */ + pci_disable_device(oct->pci_dev); + /* Nothing to be done here either */ break; - } /* end switch(oct->status) */ + } /* end switch (oct->status) */ tasklet_kill(&oct_priv->droq_tasklet); } @@ -1157,18 +1269,15 @@ static void octeon_destroy_resources(struct octeon_device *oct) static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) { struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = start_stop; + nctrl.ncmd.s.param1 = start_stop; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)lio->netdev; - nparams.resp_order = OCTEON_RESP_NORESPONSE; - - if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams) < 0) + if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl) < 0) netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); } @@ -1184,6 +1293,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) { struct net_device *netdev = oct->props[ifidx].netdev; struct lio *lio; + struct napi_struct *napi, *n; if (!netdev) { dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", @@ -1200,13 +1310,22 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) txqs_stop(netdev); + if (oct->props[lio->ifidx].napi_enabled == 1) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + } + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); - delete_glist(lio); + delete_glists(lio); free_netdev(netdev); + oct->props[ifidx].gmxport = -1; + oct->props[ifidx].netdev = NULL; } @@ -1225,10 +1344,15 @@ static int liquidio_stop_nic_module(struct octeon_device *oct) return 1; } + spin_lock_bh(&oct->cmd_resp_wqlock); + oct->cmd_resp_state = OCT_DRV_OFFLINE; + spin_unlock_bh(&oct->cmd_resp_wqlock); + for (i = 0; i < oct->ifcount; i++) { lio = GET_LIO(oct->props[i].netdev); for (j = 0; j < lio->linfo.num_rxpciq; j++) - octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j]); + octeon_unregister_droq_ops(oct, + lio->linfo.rxpciq[j].s.q_no); } for (i = 0; i < oct->ifcount; i++) @@ -1272,6 +1396,7 @@ static int octeon_chip_specific_setup(struct octeon_device *oct) { u32 dev_id, rev_id; int ret = 1; + char *s; pci_read_config_dword(oct->pci_dev, 0, &dev_id); pci_read_config_dword(oct->pci_dev, 8, &rev_id); @@ -1281,22 +1406,27 @@ static int octeon_chip_specific_setup(struct octeon_device *oct) case OCTEON_CN68XX_PCIID: oct->chip_id = OCTEON_CN68XX; ret = lio_setup_cn68xx_octeon_device(oct); + s = "CN68XX"; break; case OCTEON_CN66XX_PCIID: oct->chip_id = OCTEON_CN66XX; ret = lio_setup_cn66xx_octeon_device(oct); + s = "CN66XX"; break; + default: + s = "?"; dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n", dev_id); } if (!ret) - dev_info(&oct->pci_dev->dev, "CN68XX PASS%d.%d %s\n", + dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s, OCTEON_MAJOR_REV(oct), OCTEON_MINOR_REV(oct), - octeon_get_conf(oct)->card_name); + octeon_get_conf(oct)->card_name, + LIQUIDIO_VERSION); return ret; } @@ -1324,6 +1454,16 @@ static int octeon_pci_os_setup(struct octeon_device *oct) return 0; } +static inline int skb_iq(struct lio *lio, struct sk_buff *skb) +{ + int q = 0; + + if (netif_is_multiqueue(lio->netdev)) + q = skb->queue_mapping % lio->linfo.num_txpciq; + + return q; +} + /** * \brief Check Tx queue state for a given network buffer * @param lio per-network private data @@ -1335,14 +1475,19 @@ static inline int check_txq_state(struct lio *lio, struct sk_buff *skb) if (netif_is_multiqueue(lio->netdev)) { q = skb->queue_mapping; - iq = lio->linfo.txpciq[(q & (lio->linfo.num_txpciq - 1))]; + iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no; } else { iq = lio->txq; + q = iq; } if (octnet_iq_is_full(lio->oct_dev, iq)) return 0; - wake_q(lio->netdev, q); + + if (__netif_subqueue_stopped(lio->netdev, q)) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1); + wake_q(lio->netdev, q); + } return 1; } @@ -1365,7 +1510,7 @@ static void free_netbuf(void *buf) check_txq_state(lio, skb); - recv_buffer_free((struct sk_buff *)skb); + tx_buffer_free(skb); } /** @@ -1378,7 +1523,7 @@ static void free_netsgbuf(void *buf) struct sk_buff *skb; struct lio *lio; struct octnic_gather *g; - int i, frags; + int i, frags, iq; finfo = (struct octnet_buf_free_info *)buf; skb = finfo->skb; @@ -1400,17 +1545,17 @@ static void free_netsgbuf(void *buf) i++; } - dma_unmap_single(&lio->oct_dev->pci_dev->dev, - finfo->dptr, g->sg_size, - DMA_TO_DEVICE); + dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev, + g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE); - spin_lock(&lio->lock); - list_add_tail(&g->list, &lio->glist); - spin_unlock(&lio->lock); + iq = skb_iq(lio, skb); + spin_lock(&lio->glist_lock[iq]); + list_add_tail(&g->list, &lio->glist[iq]); + spin_unlock(&lio->glist_lock[iq]); check_txq_state(lio, skb); /* mq support: sub-queue state check */ - recv_buffer_free((struct sk_buff *)skb); + tx_buffer_free(skb); } /** @@ -1424,7 +1569,7 @@ static void free_netsgbuf_with_resp(void *buf) struct sk_buff *skb; struct lio *lio; struct octnic_gather *g; - int i, frags; + int i, frags, iq; sc = (struct octeon_soft_command *)buf; skb = (struct sk_buff *)sc->callback_arg; @@ -1448,13 +1593,14 @@ static void free_netsgbuf_with_resp(void *buf) i++; } - dma_unmap_single(&lio->oct_dev->pci_dev->dev, - finfo->dptr, g->sg_size, - DMA_TO_DEVICE); + dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev, + g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE); + + iq = skb_iq(lio, skb); - spin_lock(&lio->lock); - list_add_tail(&g->list, &lio->glist); - spin_unlock(&lio->lock); + spin_lock(&lio->glist_lock[iq]); + list_add_tail(&g->list, &lio->glist[iq]); + spin_unlock(&lio->glist_lock[iq]); /* Don't free the skb yet */ @@ -1567,8 +1713,10 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp, * @param rq request * @param on is it on */ -static int liquidio_ptp_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) +static int +liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)), + struct ptp_clock_request *rq __attribute__((unused)), + int on __attribute__((unused))) { return -EOPNOTSUPP; } @@ -1655,6 +1803,7 @@ static int load_firmware(struct octeon_device *oct) if (ret) { dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.", fw_name); + release_firmware(fw); return ret; } @@ -1708,7 +1857,7 @@ static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, * @param buf pointer to resp structure */ static void if_cfg_callback(struct octeon_device *oct, - u32 status, + u32 status __attribute__((unused)), void *buf) { struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; @@ -1722,7 +1871,10 @@ static void if_cfg_callback(struct octeon_device *oct, if (resp->status) dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", CVM_CAST64(resp->status)); - ACCESS_ONCE(ctx->cond) = 1; + WRITE_ONCE(ctx->cond, 1); + + snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", + resp->cfg_info.liquidio_firmware_version); /* This barrier is required to be sure that the response has been * written fully before waking up the handler @@ -1739,16 +1891,16 @@ static void if_cfg_callback(struct octeon_device *oct, * @returns selected queue number */ static u16 select_q(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + void *accel_priv __attribute__((unused)), + select_queue_fallback_t fallback __attribute__((unused))) { - int qindex; + u32 qindex = 0; struct lio *lio; lio = GET_LIO(dev); - /* select queue on chosen queue_mapping or core */ - qindex = skb_rx_queue_recorded(skb) ? - skb_get_rx_queue(skb) : smp_processor_id(); - return (u16)(qindex & (lio->linfo.num_txpciq - 1)); + qindex = skb_tx_hash(dev, skb); + + return (u16)(qindex % (lio->linfo.num_txpciq)); } /** Routine to push packets arriving on Octeon interface upto network layer. @@ -1757,26 +1909,28 @@ static u16 select_q(struct net_device *dev, struct sk_buff *skb, * @param len - size of total data received. * @param rh - Control header associated with the packet * @param param - additional control data with the packet + * @param arg - farg registered in droq_ops */ static void -liquidio_push_packet(u32 octeon_id, +liquidio_push_packet(u32 octeon_id __attribute__((unused)), void *skbuff, u32 len, union octeon_rh *rh, - void *param) + void *param, + void *arg) { struct napi_struct *napi = param; - struct octeon_device *oct = lio_get_device(octeon_id); struct sk_buff *skb = (struct sk_buff *)skbuff; struct skb_shared_hwtstamps *shhwtstamps; u64 ns; - struct net_device *netdev = - (struct net_device *)oct->props[rh->r_dh.link].netdev; + u16 vtag = 0; + struct net_device *netdev = (struct net_device *)arg; struct octeon_droq *droq = container_of(param, struct octeon_droq, napi); if (netdev) { int packet_was_received; struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; /* Do not proceed if the interface is not in RUNNING state. */ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { @@ -1787,32 +1941,86 @@ liquidio_push_packet(u32 octeon_id, skb->dev = netdev; - if (rh->r_dh.has_hwtstamp) { - /* timestamp is included from the hardware at the - * beginning of the packet. - */ - if (ifstate_check(lio, - LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { - /* Nanoseconds are in the first 64-bits - * of the packet. + skb_record_rx_queue(skb, droq->q_no); + if (likely(len > MIN_SKB_SIZE)) { + struct octeon_skb_page_info *pg_info; + unsigned char *va; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + if (pg_info->page) { + /* For Paged allocation use the frags */ + va = page_address(pg_info->page) + + pg_info->page_offset; + memcpy(skb->data, va, MIN_SKB_SIZE); + skb_put(skb, MIN_SKB_SIZE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + pg_info->page, + pg_info->page_offset + + MIN_SKB_SIZE, + len - MIN_SKB_SIZE, + LIO_RXBUFFER_SZ); + } + } else { + struct octeon_skb_page_info *pg_info = + ((struct octeon_skb_page_info *)(skb->cb)); + skb_copy_to_linear_data(skb, page_address(pg_info->page) + + pg_info->page_offset, len); + skb_put(skb, len); + put_page(pg_info->page); + } + + if (((oct->chip_id == OCTEON_CN66XX) || + (oct->chip_id == OCTEON_CN68XX)) && + ptp_enable) { + if (rh->r_dh.has_hwtstamp) { + /* timestamp is included from the hardware at + * the beginning of the packet. */ - memcpy(&ns, (skb->data), sizeof(ns)); - shhwtstamps = skb_hwtstamps(skb); - shhwtstamps->hwtstamp = - ns_to_ktime(ns + lio->ptp_adjust); + if (ifstate_check + (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { + /* Nanoseconds are in the first 64-bits + * of the packet. + */ + memcpy(&ns, (skb->data), sizeof(ns)); + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = + ns_to_ktime(ns + + lio->ptp_adjust); + } + skb_pull(skb, sizeof(ns)); } - skb_pull(skb, sizeof(ns)); } skb->protocol = eth_type_trans(skb, skb->dev); - if ((netdev->features & NETIF_F_RXCSUM) && - (rh->r_dh.csum_verified == CNNIC_CSUM_VERIFIED)) + (((rh->r_dh.encap_on) && + (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) || + (!(rh->r_dh.encap_on) && + (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED)))) /* checksum has already been verified */ skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; + /* Setting Encapsulation field on basis of status received + * from the firmware + */ + if (rh->r_dh.encap_on) { + skb->encapsulation = 1; + skb->csum_level = 1; + droq->stats.rx_vxlan++; + } + + /* inbound VLAN tag */ + if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (rh->r_dh.vlan != 0)) { + u16 vid = rh->r_dh.vlan; + u16 priority = rh->r_dh.priority; + + vtag = priority << 13 | vid; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); + } + packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP; if (packet_was_received) { @@ -1866,39 +2074,6 @@ static void liquidio_napi_drv_callback(void *arg) } } -/** - * \brief Main NAPI poll function - * @param droq octeon output queue - * @param budget maximum number of items to process - */ -static int liquidio_napi_do_rx(struct octeon_droq *droq, int budget) -{ - int work_done; - struct lio *lio = GET_LIO(droq->napi.dev); - struct octeon_device *oct = lio->oct_dev; - - work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, - POLL_EVENT_PROCESS_PKTS, - budget); - if (work_done < 0) { - netif_info(lio, rx_err, lio->netdev, - "Receive work_done < 0, rxq:%d\n", droq->q_no); - goto octnet_napi_finish; - } - - if (work_done > budget) - dev_err(&oct->pci_dev->dev, ">>>> %s work_done: %d budget: %d\n", - __func__, work_done, budget); - - return work_done; - -octnet_napi_finish: - napi_complete(&droq->napi); - octeon_process_droq_poll_cmd(oct, droq->q_no, POLL_EVENT_ENABLE_INTR, - 0); - return 0; -} - /** * \brief Entry point for NAPI polling * @param napi NAPI structure @@ -1908,35 +2083,57 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) { struct octeon_droq *droq; int work_done; + int tx_done = 0, iq_no; + struct octeon_instr_queue *iq; + struct octeon_device *oct; droq = container_of(napi, struct octeon_droq, napi); + oct = droq->oct_dev; + iq_no = droq->q_no; + /* Handle Droq descriptors */ + work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, + POLL_EVENT_PROCESS_PKTS, + budget); - work_done = liquidio_napi_do_rx(droq, budget); + /* Flush the instruction queue */ + iq = oct->instr_queue[iq_no]; + if (iq) { + /* Process iq buffers with in the budget limits */ + tx_done = octeon_flush_iq(oct, iq, 1, budget); + /* Update iq read-index rather than waiting for next interrupt. + * Return back if tx_done is false. + */ + update_txq_status(oct, iq_no); + /*tx_done = (iq->flush_index == iq->octeon_read_index);*/ + } else { + dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", + __func__, iq_no); + } - if (work_done < budget) { + if ((work_done < budget) && (tx_done)) { napi_complete(napi); octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, POLL_EVENT_ENABLE_INTR, 0); return 0; } - return work_done; + return (!tx_done) ? (budget) : (work_done); } /** * \brief Setup input and output queues * @param octeon_dev octeon device - * @param net_device Net device + * @param ifidx Interface Index * * Note: Queues are with respect to the octeon device. Thus * an input queue is for egress packets, and output queues * are for ingress packets. */ static inline int setup_io_queues(struct octeon_device *octeon_dev, - struct net_device *net_device) + int ifidx) { - static int first_time = 1; - static struct octeon_droq_ops droq_ops; + struct octeon_droq_ops droq_ops; + struct net_device *netdev; static int cpu_id; static int cpu_id_modulus; struct octeon_droq *droq; @@ -1945,23 +2142,26 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev, struct lio *lio; int num_tx_descs; - lio = GET_LIO(net_device); - if (first_time) { - first_time = 0; - memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); + netdev = octeon_dev->props[ifidx].netdev; - droq_ops.fptr = liquidio_push_packet; + lio = GET_LIO(netdev); - droq_ops.poll_mode = 1; - droq_ops.napi_fn = liquidio_napi_drv_callback; - cpu_id = 0; - cpu_id_modulus = num_present_cpus(); - } + memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); + + droq_ops.fptr = liquidio_push_packet; + droq_ops.farg = (void *)netdev; + + droq_ops.poll_mode = 1; + droq_ops.napi_fn = liquidio_napi_drv_callback; + cpu_id = 0; + cpu_id_modulus = num_present_cpus(); /* set up DROQs. */ for (q = 0; q < lio->linfo.num_rxpciq; q++) { - q_no = lio->linfo.rxpciq[q]; - + q_no = lio->linfo.rxpciq[q].s.q_no; + dev_dbg(&octeon_dev->pci_dev->dev, + "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n", + q, q_no); retval = octeon_setup_droq(octeon_dev, q_no, CFG_GET_NUM_RX_DESCS_NIC_IF (octeon_get_conf(octeon_dev), @@ -1978,7 +2178,11 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev, droq = octeon_dev->droq[q_no]; napi = &droq->napi; - netif_napi_add(net_device, napi, liquidio_napi_poll, 64); + dev_dbg(&octeon_dev->pci_dev->dev, + "netif_napi_add netdev:%llx oct:%llx\n", + (u64)netdev, + (u64)octeon_dev); + netif_napi_add(netdev, napi, liquidio_napi_poll, 64); /* designate a CPU for this droq */ droq->cpu_id = cpu_id; @@ -1994,9 +2198,9 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev, num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf (octeon_dev), lio->ifidx); - retval = octeon_setup_iq(octeon_dev, lio->linfo.txpciq[q], - num_tx_descs, - netdev_get_tx_queue(net_device, q)); + retval = octeon_setup_iq(octeon_dev, ifidx, q, + lio->linfo.txpciq[q], num_tx_descs, + netdev_get_tx_queue(netdev, q)); if (retval) { dev_err(&octeon_dev->pci_dev->dev, " %s : Runtime IQ(TxQ) creation failed.\n", @@ -2034,7 +2238,8 @@ static inline void setup_tx_poll_fn(struct net_device *netdev) struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; - lio->txq_status_wq.wq = create_workqueue("txq-status"); + lio->txq_status_wq.wq = alloc_workqueue("txq-status", + WQ_MEM_RECLAIM, 0); if (!lio->txq_status_wq.wq) { dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n"); return; @@ -2046,6 +2251,14 @@ static inline void setup_tx_poll_fn(struct net_device *netdev) &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); } +static inline void cleanup_tx_poll_fn(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); + destroy_workqueue(lio->txq_status_wq.wq); +} + /** * \brief Net device open for LiquidIO * @param netdev network device @@ -2056,17 +2269,22 @@ static int liquidio_open(struct net_device *netdev) struct octeon_device *oct = lio->oct_dev; struct napi_struct *napi, *n; - list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) - napi_enable(napi); + if (oct->props[lio->ifidx].napi_enabled == 0) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_enable(napi); + + oct->props[lio->ifidx].napi_enabled = 1; + } oct_ptp_open(netdev); ifstate_set(lio, LIO_IFSTATE_RUNNING); + setup_tx_poll_fn(netdev); + start_txq(netdev); netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); - try_module_get(THIS_MODULE); /* tell Octeon to start forwarding packets to host */ send_rx_ctrl_cmd(lio, 1); @@ -2086,41 +2304,36 @@ static int liquidio_open(struct net_device *netdev) */ static int liquidio_stop(struct net_device *netdev) { - struct napi_struct *napi, *n; struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; - netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); + ifstate_reset(lio, LIO_IFSTATE_RUNNING); + + netif_tx_disable(netdev); + /* Inform that netif carrier is down */ + netif_carrier_off(netdev); lio->intf_open = 0; - lio->linfo.link.s.status = 0; + lio->linfo.link.s.link_up = 0; + lio->link_changes++; - netif_carrier_off(netdev); + /* Pause for a moment and wait for Octeon to flush out (to the wire) any + * egress packets that are in-flight. + */ + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(100)); - /* tell Octeon to stop forwarding packets to host */ + /* Now it should be safe to tell Octeon that nic interface is down. */ send_rx_ctrl_cmd(lio, 0); - cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); - flush_workqueue(lio->txq_status_wq.wq); - destroy_workqueue(lio->txq_status_wq.wq); + cleanup_tx_poll_fn(netdev); if (lio->ptp_clock) { ptp_clock_unregister(lio->ptp_clock); lio->ptp_clock = NULL; } - ifstate_reset(lio, LIO_IFSTATE_RUNNING); - - /* This is a hack that allows DHCP to continue working. */ - set_bit(__LINK_STATE_START, &lio->netdev->state); - - list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) - napi_disable(napi); - - txqs_stop(netdev); - dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); - module_put(THIS_MODULE); return 0; } @@ -2131,6 +2344,7 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) struct net_device *netdev = (struct net_device *)nctrl->netpndev; struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + u8 *mac; switch (nctrl->ncmd.s.cmd) { case OCTNET_CMD_CHANGE_DEVFLAGS: @@ -2138,22 +2352,24 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) break; case OCTNET_CMD_CHANGE_MACADDR: - /* If command is successful, change the MACADDR. */ - netif_info(lio, probe, lio->netdev, " MACAddr changed to 0x%llx\n", - CVM_CAST64(nctrl->udd[0])); - dev_info(&oct->pci_dev->dev, "%s MACAddr changed to 0x%llx\n", - netdev->name, CVM_CAST64(nctrl->udd[0])); - memcpy(netdev->dev_addr, ((u8 *)&nctrl->udd[0]) + 2, ETH_ALEN); + mac = ((u8 *)&nctrl->udd[0]) + 2; + netif_info(lio, probe, lio->netdev, + "%s %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", + "MACAddr changed to", mac[0], mac[1], + mac[2], mac[3], mac[4], mac[5]); break; case OCTNET_CMD_CHANGE_MTU: /* If command is successful, change the MTU. */ netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n", - netdev->mtu, nctrl->ncmd.s.param2); + netdev->mtu, nctrl->ncmd.s.param1); dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n", netdev->name, netdev->mtu, - nctrl->ncmd.s.param2); - netdev->mtu = nctrl->ncmd.s.param2; + nctrl->ncmd.s.param1); + rtnl_lock(); + netdev->mtu = nctrl->ncmd.s.param1; + call_netdevice_notifiers(NETDEV_CHANGEMTU, netdev); + rtnl_unlock(); break; case OCTNET_CMD_GPIO_ACCESS: @@ -2179,11 +2395,79 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) netdev->name); break; + case OCTNET_CMD_ENABLE_VLAN_FILTER: + dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n", + netdev->name); + break; + + case OCTNET_CMD_ADD_VLAN_FILTER: + dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n", + netdev->name, nctrl->ncmd.s.param1); + break; + + case OCTNET_CMD_DEL_VLAN_FILTER: + dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n", + netdev->name, nctrl->ncmd.s.param1); + break; + case OCTNET_CMD_SET_SETTINGS: dev_info(&oct->pci_dev->dev, "%s settings changed\n", netdev->name); break; + /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL" + * Command passed by NIC driver + */ + case OCTNET_CMD_TNL_RX_CSUM_CTL: + if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) { + netif_info(lio, probe, lio->netdev, + "%s RX Checksum Offload Enabled\n", + netdev->name); + } else if (nctrl->ncmd.s.param1 == + OCTNET_CMD_RXCSUM_DISABLE) { + netif_info(lio, probe, lio->netdev, + "%s RX Checksum Offload Disabled\n", + netdev->name); + } + break; + + /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL" + * Command passed by NIC driver + */ + case OCTNET_CMD_TNL_TX_CSUM_CTL: + if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) { + netif_info(lio, probe, lio->netdev, + "%s TX Checksum Offload Enabled\n", + netdev->name); + } else if (nctrl->ncmd.s.param1 == + OCTNET_CMD_TXCSUM_DISABLE) { + netif_info(lio, probe, lio->netdev, + "%s TX Checksum Offload Disabled\n", + netdev->name); + } + break; + + /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG" + * Command passed by NIC driver + */ + case OCTNET_CMD_VXLAN_PORT_CONFIG: + if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) { + netif_info(lio, probe, lio->netdev, + "%s VxLAN Destination UDP PORT:%d ADDED\n", + netdev->name, + nctrl->ncmd.s.param1); + } else if (nctrl->ncmd.s.more == + OCTNET_CMD_VXLAN_PORT_DEL) { + netif_info(lio, probe, lio->netdev, + "%s VxLAN Destination UDP PORT:%d DELETED\n", + netdev->name, + nctrl->ncmd.s.param1); + } + break; + + case OCTNET_CMD_SET_FLOW_CTL: + netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n"); + break; default: dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__, @@ -2233,10 +2517,9 @@ static void liquidio_set_mcast_list(struct net_device *netdev) struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; struct netdev_hw_addr *ha; u64 *mc; - int ret, i; + int ret; int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); @@ -2244,15 +2527,14 @@ static void liquidio_set_mcast_list(struct net_device *netdev) /* Create a ctrl pkt command to be sent to core app. */ nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = get_new_flags(netdev); - nctrl.ncmd.s.param3 = mc_count; + nctrl.ncmd.s.param1 = get_new_flags(netdev); + nctrl.ncmd.s.param2 = mc_count; nctrl.ncmd.s.more = mc_count; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; /* copy all the addresses into the udd */ - i = 0; mc = &nctrl.udd[0]; netdev_for_each_mc_addr(ha, netdev) { *mc = 0; @@ -2268,9 +2550,7 @@ static void liquidio_set_mcast_list(struct net_device *netdev) */ nctrl.wait_time = 0; - nparams.resp_order = OCTEON_RESP_NORESPONSE; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", ret); @@ -2288,19 +2568,17 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) struct octeon_device *oct = lio->oct_dev; struct sockaddr *addr = (struct sockaddr *)p; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; - if ((!is_valid_ether_addr(addr->sa_data)) || - (ifstate_check(lio, LIO_IFSTATE_RUNNING))) + if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = 0; + nctrl.ncmd.s.param1 = 0; nctrl.ncmd.s.more = 1; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; nctrl.wait_time = 100; @@ -2309,9 +2587,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) /* The MAC Address is presented in network byte order. */ memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN); - nparams.resp_order = OCTEON_RESP_ORDERED; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); return -ENOMEM; @@ -2339,7 +2615,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) oct = lio->oct_dev; for (i = 0; i < lio->linfo.num_txpciq; i++) { - iq_no = lio->linfo.txpciq[i]; + iq_no = lio->linfo.txpciq[i].s.q_no; iq_stats = &oct->instr_queue[iq_no]->stats; pkts += iq_stats->tx_done; drop += iq_stats->tx_dropped; @@ -2355,7 +2631,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) bytes = 0; for (i = 0; i < lio->linfo.num_rxpciq; i++) { - oq_no = lio->linfo.rxpciq[i]; + oq_no = lio->linfo.rxpciq[i].s.q_no; oq_stats = &oct->droq[oq_no]->stats; pkts += oq_stats->rx_pkts_received; drop += (oq_stats->rx_dropped + @@ -2381,19 +2657,16 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; - int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE; int ret = 0; - /* Limit the MTU to make sure the ethernet packets are between 64 bytes - * and 65535 bytes + /* Limit the MTU to make sure the ethernet packets are between 68 bytes + * and 16000 bytes */ - if ((max_frm_size < OCTNET_MIN_FRM_SIZE) || - (max_frm_size > OCTNET_MAX_FRM_SIZE)) { + if ((new_mtu < LIO_MIN_MTU_SIZE) || + (new_mtu > LIO_MAX_MTU_SIZE)) { dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu); dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n", - (OCTNET_MIN_FRM_SIZE - OCTNET_FRM_HEADER_SIZE), - (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)); + LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE); return -EINVAL; } @@ -2401,15 +2674,13 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = new_mtu; + nctrl.ncmd.s.param1 = new_mtu; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nparams.resp_order = OCTEON_RESP_ORDERED; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "Failed to set MTU\n"); return -1; @@ -2426,7 +2697,7 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) * @param ifr interface request * @param cmd command */ -static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) { struct hwtstamp_config conf; struct lio *lio = GET_LIO(netdev); @@ -2487,7 +2758,7 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { case SIOCSHWTSTAMP: - return hwtstamp_ioctl(netdev, ifr, cmd); + return hwtstamp_ioctl(netdev, ifr); default: return -EOPNOTSUPP; } @@ -2534,7 +2805,7 @@ static void handle_timestamp(struct octeon_device *oct, } octeon_free_soft_command(oct, sc); - recv_buffer_free(skb); + tx_buffer_free(skb); } /* \brief Send a data packet that will be timestamped @@ -2549,10 +2820,9 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct, { int retval; struct octeon_soft_command *sc; - struct octeon_instr_ih *ih; - struct octeon_instr_rdp *rdp; struct lio *lio; int ring_doorbell; + u32 len; lio = finfo->lio; @@ -2574,14 +2844,13 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct, sc->callback_arg = finfo->skb; sc->iq_no = ndata->q_no; - ih = (struct octeon_instr_ih *)&sc->cmd.ih; - rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; + len = (u32)((struct octeon_instr_ih2 *)(&sc->cmd.cmd2.ih2))->dlengsz; ring_doorbell = !xmit_more; retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, - sc, ih->dlengsz, ndata->reqtype); + sc, len, ndata->reqtype); - if (retval) { + if (retval == IQ_SEND_FAILED) { dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", retval); octeon_free_soft_command(oct, sc); @@ -2592,68 +2861,6 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct, return retval; } -static inline int is_ipv4(struct sk_buff *skb) -{ - return (skb->protocol == htons(ETH_P_IP)) && - (ip_hdr(skb)->version == 4); -} - -static inline int is_vlan(struct sk_buff *skb) -{ - return skb->protocol == htons(ETH_P_8021Q); -} - -static inline int is_ip_fragmented(struct sk_buff *skb) -{ - /* The Don't fragment and Reserved flag fields are ignored. - * IP is fragmented if - * - the More fragments bit is set (indicating this IP is a fragment - * with more to follow; the current offset could be 0 ). - * - ths offset field is non-zero. - */ - return (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ? 1 : 0; -} - -static inline int is_ipv6(struct sk_buff *skb) -{ - return (skb->protocol == htons(ETH_P_IPV6)) && - (ipv6_hdr(skb)->version == 6); -} - -static inline int is_with_extn_hdr(struct sk_buff *skb) -{ - return (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP) && - (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP); -} - -static inline int is_tcpudp(struct sk_buff *skb) -{ - return (ip_hdr(skb)->protocol == IPPROTO_TCP) || - (ip_hdr(skb)->protocol == IPPROTO_UDP); -} - -static inline u32 get_ipv4_5tuple_tag(struct sk_buff *skb) -{ - u32 tag; - struct iphdr *iphdr = ip_hdr(skb); - - tag = crc32(0, &iphdr->protocol, 1); - tag = crc32(tag, (u8 *)&iphdr->saddr, 8); - tag = crc32(tag, skb_transport_header(skb), 4); - return tag; -} - -static inline u32 get_ipv6_5tuple_tag(struct sk_buff *skb) -{ - u32 tag; - struct ipv6hdr *ipv6hdr = ipv6_hdr(skb); - - tag = crc32(0, &ipv6hdr->nexthdr, 1); - tag = crc32(tag, (u8 *)&ipv6hdr->saddr, 32); - tag = crc32(tag, skb_transport_header(skb), 4); - return tag; -} - /** \brief Transmit networks packets to the Octeon interface * @param skbuff skbuff struct to be passed to network layer. * @param netdev pointer to network device @@ -2668,18 +2875,22 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) struct octnic_data_pkt ndata; struct octeon_device *oct; struct oct_iq_stats *stats; - int cpu = 0, status = 0; + struct octeon_instr_irh *irh; + union tx_info *tx_info; + int status = 0; int q_idx = 0, iq_no = 0; - int xmit_more; + int xmit_more, j; + u64 dptr = 0; u32 tag = 0; lio = GET_LIO(netdev); oct = lio->oct_dev; if (netif_is_multiqueue(netdev)) { - cpu = skb->queue_mapping; - q_idx = (cpu & (lio->linfo.num_txpciq - 1)); - iq_no = lio->linfo.txpciq[q_idx]; + q_idx = skb->queue_mapping; + q_idx = (q_idx % (lio->linfo.num_txpciq)); + tag = q_idx; + iq_no = lio->linfo.txpciq[q_idx].s.q_no; } else { iq_no = lio->txq; } @@ -2690,11 +2901,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) * transmitted. */ if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || - (!lio->linfo.link.s.status) || + (!lio->linfo.link.s.link_up) || (skb->len <= 0)) { netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n", - lio->linfo.link.s.status); + lio->linfo.link.s.link_up); goto lio_xmit_failed; } @@ -2726,62 +2937,25 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) /* defer sending if queue is full */ stats->tx_iq_busy++; netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", - ndata.q_no); + lio->txq); return NETDEV_TX_BUSY; } } /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", - * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no ); + * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no); */ ndata.datasize = skb->len; cmdsetup.u64 = 0; - cmdsetup.s.ifidx = lio->linfo.ifidx; + cmdsetup.s.iq_no = iq_no; if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (is_ipv4(skb) && !is_ip_fragmented(skb) && is_tcpudp(skb)) { - tag = get_ipv4_5tuple_tag(skb); - - cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1; - - if (ip_hdr(skb)->ihl > 5) - cmdsetup.s.ipv4opts_ipv6exthdr = - OCT_PKT_PARAM_IPV4OPTS; - - } else if (is_ipv6(skb)) { - tag = get_ipv6_5tuple_tag(skb); - - cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1; - - if (is_with_extn_hdr(skb)) - cmdsetup.s.ipv4opts_ipv6exthdr = - OCT_PKT_PARAM_IPV6EXTHDR; - - } else if (is_vlan(skb)) { - if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto - == htons(ETH_P_IP) && - !is_ip_fragmented(skb) && is_tcpudp(skb)) { - tag = get_ipv4_5tuple_tag(skb); - - cmdsetup.s.cksum_offset = - sizeof(struct vlan_ethhdr) + 1; - - if (ip_hdr(skb)->ihl > 5) - cmdsetup.s.ipv4opts_ipv6exthdr = - OCT_PKT_PARAM_IPV4OPTS; - - } else if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto - == htons(ETH_P_IPV6)) { - tag = get_ipv6_5tuple_tag(skb); - - cmdsetup.s.cksum_offset = - sizeof(struct vlan_ethhdr) + 1; - - if (is_with_extn_hdr(skb)) - cmdsetup.s.ipv4opts_ipv6exthdr = - OCT_PKT_PARAM_IPV6EXTHDR; - } + if (skb->encapsulation) { + cmdsetup.s.tnl_csum = 1; + stats->tx_vxlan++; + } else { + cmdsetup.s.transport_csum = 1; } } if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { @@ -2791,20 +2965,21 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) if (skb_shinfo(skb)->nr_frags == 0) { cmdsetup.s.u.datasize = skb->len; - octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag); + octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); + /* Offload checksum calculation for TCP/UDP packets */ - ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev, - skb->data, - skb->len, - DMA_TO_DEVICE); - if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) { + dptr = dma_map_single(&oct->pci_dev->dev, + skb->data, + skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", __func__); return NETDEV_TX_BUSY; } - finfo->dptr = ndata.cmd.dptr; - + ndata.cmd.cmd2.dptr = dptr; + finfo->dptr = dptr; ndata.reqtype = REQTYPE_NORESP_NET; } else { @@ -2812,19 +2987,20 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) struct skb_frag_struct *frag; struct octnic_gather *g; - spin_lock(&lio->lock); - g = (struct octnic_gather *)list_delete_head(&lio->glist); - spin_unlock(&lio->lock); + spin_lock(&lio->glist_lock[q_idx]); + g = (struct octnic_gather *) + list_delete_head(&lio->glist[q_idx]); + spin_unlock(&lio->glist_lock[q_idx]); if (!g) { netif_info(lio, tx_err, lio->netdev, "Transmit scatter gather: glist null!\n"); - goto lio_xmit_dma_failed; + goto lio_xmit_failed; } cmdsetup.s.gather = 1; cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); - octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag); + octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); memset(g->sg, 0, g->sg_size); @@ -2851,36 +3027,52 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) frag->size, DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, + g->sg[i >> 2].ptr[i & 3])) { + dma_unmap_single(&oct->pci_dev->dev, + g->sg[0].ptr[0], + skb->len - skb->data_len, + DMA_TO_DEVICE); + for (j = 1; j < i; j++) { + frag = &skb_shinfo(skb)->frags[j - 1]; + dma_unmap_page(&oct->pci_dev->dev, + g->sg[j >> 2].ptr[j & 3], + frag->size, + DMA_TO_DEVICE); + } + dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", + __func__); + return NETDEV_TX_BUSY; + } + add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); i++; } - ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev, - g->sg, g->sg_size, - DMA_TO_DEVICE); - if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) { - dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", - __func__); - dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0], - skb->len - skb->data_len, - DMA_TO_DEVICE); - return NETDEV_TX_BUSY; - } + dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr, + g->sg_size, DMA_TO_DEVICE); + dptr = g->sg_dma_ptr; - finfo->dptr = ndata.cmd.dptr; + ndata.cmd.cmd2.dptr = dptr; + finfo->dptr = dptr; finfo->g = g; ndata.reqtype = REQTYPE_NORESP_NET_SG; } - if (skb_shinfo(skb)->gso_size) { - struct octeon_instr_irh *irh = - (struct octeon_instr_irh *)&ndata.cmd.irh; - union tx_info *tx_info = (union tx_info *)&ndata.cmd.ossp[0]; + irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh; + tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0]; - irh->len = 1; /* to indicate that ossp[0] contains tx_info */ + if (skb_shinfo(skb)->gso_size) { tx_info->s.gso_size = skb_shinfo(skb)->gso_size; tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; + stats->tx_gso++; + } + + /* HW insert VLAN tag */ + if (skb_vlan_tag_present(skb)) { + irh->priority = skb_vlan_tag_get(skb) >> 13; + irh->vlan = skb_vlan_tag_get(skb) & 0xfff; } xmit_more = skb->xmit_more; @@ -2890,7 +3082,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) else status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); if (status == IQ_SEND_FAILED) - goto lio_xmit_dma_failed; + goto lio_xmit_failed; netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); @@ -2899,19 +3091,22 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) netif_trans_update(netdev); - stats->tx_done++; + if (skb_shinfo(skb)->gso_size) + stats->tx_done += skb_shinfo(skb)->gso_segs; + else + stats->tx_done++; stats->tx_tot_bytes += skb->len; return NETDEV_TX_OK; -lio_xmit_dma_failed: - dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr, - ndata.datasize, DMA_TO_DEVICE); lio_xmit_failed: stats->tx_dropped++; netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", iq_no, stats->tx_dropped); - recv_buffer_free(skb); + if (dptr) + dma_unmap_single(&oct->pci_dev->dev, dptr, + ndata.datasize, DMA_TO_DEVICE); + tx_buffer_free(skb); return NETDEV_TX_OK; } @@ -2931,27 +3126,145 @@ static void liquidio_tx_timeout(struct net_device *netdev) txqs_wake(netdev); } -int liquidio_set_feature(struct net_device *netdev, int cmd) +static int liquidio_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto __attribute__((unused)), + u16 vid) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; int ret = 0; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); nctrl.ncmd.u64 = 0; - nctrl.ncmd.s.cmd = cmd; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = OCTNIC_LROIPV4 | OCTNIC_LROIPV6; + nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; + nctrl.ncmd.s.param1 = vid; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", + ret); + } + + return ret; +} + +static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto __attribute__((unused)), + u16 vid) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; + nctrl.ncmd.s.param1 = vid; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nparams.resp_order = OCTEON_RESP_NORESPONSE; + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", + ret); + } + return ret; +} + +/** Sending command to enable/disable RX checksum offload + * @param netdev pointer to network device + * @param command OCTNET_CMD_TNL_RX_CSUM_CTL + * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ + * OCTNET_CMD_RXCSUM_DISABLE + * @returns SUCCESS or FAILURE + */ +int liquidio_set_rxcsum_command(struct net_device *netdev, int command, + u8 rx_cmd) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = command; + nctrl.ncmd.s.param1 = rx_cmd; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, + "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", + ret); + } + return ret; +} + +/** Sending command to add/delete VxLAN UDP port to firmware + * @param netdev pointer to network device + * @param command OCTNET_CMD_VXLAN_PORT_CONFIG + * @param vxlan_port VxLAN port to be added or deleted + * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, + * OCTNET_CMD_VXLAN_PORT_DEL + * @returns SUCCESS or FAILURE + */ +static int liquidio_vxlan_port_command(struct net_device *netdev, int command, + u16 vxlan_port, u8 vxlan_cmd_bit) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = command; + nctrl.ncmd.s.more = vxlan_cmd_bit; + nctrl.ncmd.s.param1 = vxlan_port; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, + "VxLAN port add/delete failed in core (ret:0x%x)\n", + ret); + } + return ret; +} + +int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = cmd; + nctrl.ncmd.s.param1 = param1; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n", ret); @@ -3007,14 +3320,55 @@ static int liquidio_set_features(struct net_device *netdev, return 0; if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) - liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE); + liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); else if (!(features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) - liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE); + liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); + + /* Sending command to firmware to enable/disable RX checksum + * offload settings using ethtool + */ + if (!(netdev->features & NETIF_F_RXCSUM) && + (lio->enc_dev_capability & NETIF_F_RXCSUM) && + (features & NETIF_F_RXCSUM)) + liquidio_set_rxcsum_command(netdev, + OCTNET_CMD_TNL_RX_CSUM_CTL, + OCTNET_CMD_RXCSUM_ENABLE); + else if ((netdev->features & NETIF_F_RXCSUM) && + (lio->enc_dev_capability & NETIF_F_RXCSUM) && + !(features & NETIF_F_RXCSUM)) + liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, + OCTNET_CMD_RXCSUM_DISABLE); return 0; } +static void liquidio_add_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + liquidio_vxlan_port_command(netdev, + OCTNET_CMD_VXLAN_PORT_CONFIG, + htons(ti->port), + OCTNET_CMD_VXLAN_PORT_ADD); +} + +static void liquidio_del_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + liquidio_vxlan_port_command(netdev, + OCTNET_CMD_VXLAN_PORT_CONFIG, + htons(ti->port), + OCTNET_CMD_VXLAN_PORT_DEL); +} + static struct net_device_ops lionetdevops = { .ndo_open = liquidio_open, .ndo_stop = liquidio_stop, @@ -3023,10 +3377,15 @@ static struct net_device_ops lionetdevops = { .ndo_set_mac_address = liquidio_set_mac, .ndo_set_rx_mode = liquidio_set_mcast_list, .ndo_tx_timeout = liquidio_tx_timeout, + + .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, .ndo_change_mtu = liquidio_change_mtu, .ndo_do_ioctl = liquidio_ioctl, .ndo_fix_features = liquidio_fix_features, .ndo_set_features = liquidio_set_features, + .ndo_udp_tunnel_add = liquidio_add_vxlan_port, + .ndo_udp_tunnel_del = liquidio_del_vxlan_port, }; /** \brief Entry point for the liquidio module @@ -3081,24 +3440,27 @@ static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) { struct octeon_device *oct = (struct octeon_device *)buf; struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; - int ifidx = 0; + int gmxport = 0; union oct_link_status *ls; int i; - if ((recv_pkt->buffer_size[0] != sizeof(*ls)) || - (recv_pkt->rh.r_nic_info.ifidx > oct->ifcount)) { + if (recv_pkt->buffer_size[0] != sizeof(*ls)) { dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", recv_pkt->buffer_size[0], - recv_pkt->rh.r_nic_info.ifidx); + recv_pkt->rh.r_nic_info.gmxport); goto nic_info_err; } - ifidx = recv_pkt->rh.r_nic_info.ifidx; + gmxport = recv_pkt->rh.r_nic_info.gmxport; ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]); octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); - - update_link_status(oct->props[ifidx].netdev, ls); + for (i = 0; i < oct->ifcount; i++) { + if (oct->props[i].gmxport == gmxport) { + update_link_status(oct->props[i].netdev, ls); + break; + } + } nic_info_err: for (i = 0; i < recv_pkt->buffer_count; i++) @@ -3124,13 +3486,12 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) struct liquidio_if_cfg_context *ctx; struct liquidio_if_cfg_resp *resp; struct octdev_props *props; - int retval, num_iqueues, num_oqueues, q_no; - u64 q_mask; - int num_cpus = num_online_cpus(); + int retval, num_iqueues, num_oqueues; union oct_nic_if_cfg if_cfg; unsigned int base_queue; unsigned int gmx_port_id; u32 resp_size, ctx_size; + u32 ifidx_or_pfnum; /* This is to handle link status changes */ octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, @@ -3166,14 +3527,12 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i); gmx_port_id = CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i); - if (num_iqueues > num_cpus) - num_iqueues = num_cpus; - if (num_oqueues > num_cpus) - num_oqueues = num_cpus; + ifidx_or_pfnum = i; + dev_dbg(&octeon_dev->pci_dev->dev, "requesting config for interface %d, iqs %d, oqs %d\n", - i, num_iqueues, num_oqueues); - ACCESS_ONCE(ctx->cond) = 0; + ifidx_or_pfnum, num_iqueues, num_oqueues); + WRITE_ONCE(ctx->cond, 0); ctx->octeon_id = lio_get_device_id(octeon_dev); init_waitqueue_head(&ctx->wc); @@ -3182,16 +3541,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if_cfg.s.num_oqueues = num_oqueues; if_cfg.s.base_queue = base_queue; if_cfg.s.gmx_port_id = gmx_port_id; + + sc->iq_no = 0; + octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, - OPCODE_NIC_IF_CFG, i, + OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 0); sc->callback = if_cfg_callback; sc->callback_arg = sc; - sc->wait_time = 1000; + sc->wait_time = 3000; retval = octeon_send_soft_command(octeon_dev, sc); - if (retval) { + if (retval == IQ_SEND_FAILED) { dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed status: %x\n", retval); @@ -3233,8 +3595,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) goto setup_nic_dev_fail; } - props = &octeon_dev->props[i]; - props->netdev = netdev; + SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); if (num_iqueues > 1) lionetdevops.ndo_select_queue = select_q; @@ -3248,23 +3609,21 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) memset(lio, 0, sizeof(struct lio)); - lio->linfo.ifidx = resp->cfg_info.ifidx; - lio->ifidx = resp->cfg_info.ifidx; + lio->ifidx = ifidx_or_pfnum; + + props = &octeon_dev->props[i]; + props->gmxport = resp->cfg_info.linfo.gmxport; + props->netdev = netdev; lio->linfo.num_rxpciq = num_oqueues; lio->linfo.num_txpciq = num_iqueues; - q_mask = resp->cfg_info.oqmask; - /* q_mask is 0-based and already verified mask is nonzero */ for (j = 0; j < num_oqueues; j++) { - q_no = __ffs64(q_mask); - q_mask &= (~(1UL << q_no)); - lio->linfo.rxpciq[j] = q_no; + lio->linfo.rxpciq[j].u64 = + resp->cfg_info.linfo.rxpciq[j].u64; } - q_mask = resp->cfg_info.iqmask; for (j = 0; j < num_iqueues; j++) { - q_no = __ffs64(q_mask); - q_mask &= (~(1UL << q_no)); - lio->linfo.txpciq[j] = q_no; + lio->linfo.txpciq[j].u64 = + resp->cfg_info.linfo.txpciq[j].u64; } lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; @@ -3273,16 +3632,41 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); lio->dev_capability = NETIF_F_HIGHDMA - | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM - | NETIF_F_SG | NETIF_F_RXCSUM - | NETIF_F_TSO | NETIF_F_TSO6 - | NETIF_F_LRO; + | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_SG | NETIF_F_RXCSUM + | NETIF_F_GRO + | NETIF_F_TSO | NETIF_F_TSO6 + | NETIF_F_LRO; netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); - netdev->features = lio->dev_capability; + /* Copy of transmit encapsulation capabilities: + * TSO, TSO6, Checksums for this device + */ + lio->enc_dev_capability = NETIF_F_IP_CSUM + | NETIF_F_IPV6_CSUM + | NETIF_F_GSO_UDP_TUNNEL + | NETIF_F_HW_CSUM | NETIF_F_SG + | NETIF_F_RXCSUM + | NETIF_F_TSO | NETIF_F_TSO6 + | NETIF_F_LRO; + + netdev->hw_enc_features = (lio->enc_dev_capability & + ~NETIF_F_LRO); + + lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL; + netdev->vlan_features = lio->dev_capability; + /* Add any unchangeable hw features */ + lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + netdev->features = (lio->dev_capability & ~NETIF_F_LRO); netdev->hw_features = lio->dev_capability; + /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/ + netdev->hw_features = netdev->hw_features & + ~NETIF_F_HW_VLAN_CTAG_RX; /* Point to the properties for octeon device to which this * interface belongs. @@ -3290,7 +3674,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->oct_dev = octeon_dev; lio->octprops = props; lio->netdev = netdev; - spin_lock_init(&lio->lock); dev_dbg(&octeon_dev->pci_dev->dev, "if%d gmx: %d hw_addr: 0x%llx\n", i, @@ -3305,23 +3688,22 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) ether_addr_copy(netdev->dev_addr, mac); - if (setup_io_queues(octeon_dev, netdev)) { + /* By default all interfaces on a single Octeon uses the same + * tx and rx queues + */ + lio->txq = lio->linfo.txpciq[0].s.q_no; + lio->rxq = lio->linfo.rxpciq[0].s.q_no; + if (setup_io_queues(octeon_dev, i)) { dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); goto setup_nic_dev_fail; } ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); - /* By default all interfaces on a single Octeon uses the same - * tx and rx queues - */ - lio->txq = lio->linfo.txpciq[0]; - lio->rxq = lio->linfo.rxpciq[0]; - lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); - if (setup_glist(lio)) { + if (setup_glists(octeon_dev, lio, num_iqueues)) { dev_err(&octeon_dev->pci_dev->dev, "Gather list allocation failed\n"); goto setup_nic_dev_fail; @@ -3329,11 +3711,17 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) /* Register ethtool support */ liquidio_set_ethtool_ops(netdev); + octeon_dev->priv_flags = 0x0; - liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE); + if (netdev->features & NETIF_F_LRO) + liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); + + liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0); if ((debug != -1) && (debug & NETIF_MSG_HW)) - liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE); + liquidio_set_feature(netdev, + OCTNET_CMD_VERBOSE_ENABLE, 0); /* Register the network device with the OS */ if (register_netdev(netdev)) { @@ -3345,16 +3733,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); netif_carrier_off(netdev); - - if (lio->linfo.link.s.status) { - netif_carrier_on(netdev); - start_txq(netdev); - } else { - netif_carrier_off(netdev); - } + lio->link_changes++; ifstate_set(lio, LIO_IFSTATE_REGISTERED); + /* Sending command to firmware to enable Rx checksum offload + * by default at the time of setup of Liquidio driver for + * this device + */ + liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, + OCTNET_CMD_RXCSUM_ENABLE); + liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, + OCTNET_CMD_TXCSUM_ENABLE); + dev_dbg(&octeon_dev->pci_dev->dev, "NIC ifidx:%d Setup successful\n", i); @@ -3385,7 +3776,7 @@ setup_nic_dev_fail: static int liquidio_init_nic_module(struct octeon_device *oct) { struct oct_intrmod_cfg *intrmod_cfg; - int retval = 0; + int i, retval = 0; int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); @@ -3399,6 +3790,9 @@ static int liquidio_init_nic_module(struct octeon_device *oct) memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports); + for (i = 0; i < MAX_OCTEON_LINKS; i++) + oct->props[i].gmxport = -1; + retval = setup_nic_devices(oct); if (retval) { dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); @@ -3409,15 +3803,19 @@ static int liquidio_init_nic_module(struct octeon_device *oct) /* Initialize interrupt moderation params */ intrmod_cfg = &((struct octeon_device *)oct)->intrmod; - intrmod_cfg->intrmod_enable = 1; - intrmod_cfg->intrmod_check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; - intrmod_cfg->intrmod_maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; - intrmod_cfg->intrmod_minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; - intrmod_cfg->intrmod_maxcnt_trigger = LIO_INTRMOD_MAXCNT_TRIGGER; - intrmod_cfg->intrmod_maxtmr_trigger = LIO_INTRMOD_MAXTMR_TRIGGER; - intrmod_cfg->intrmod_mintmr_trigger = LIO_INTRMOD_MINTMR_TRIGGER; - intrmod_cfg->intrmod_mincnt_trigger = LIO_INTRMOD_MINCNT_TRIGGER; - + intrmod_cfg->rx_enable = 1; + intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; + intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; + intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; + intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER; + intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER; + intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER; + intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER; + intrmod_cfg->tx_enable = 1; + intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER; + intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER; + intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); + intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); return retval; @@ -3480,6 +3878,7 @@ static void nic_starter(struct work_struct *work) static int octeon_device_init(struct octeon_device *octeon_dev) { int j, ret; + char bootcmd[] = "\n"; struct octeon_device_priv *oct_priv = (struct octeon_device_priv *)octeon_dev->priv; atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); @@ -3557,6 +3956,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev) /* Release any previously allocated queues */ for (j = 0; j < octeon_dev->num_oqs; j++) octeon_delete_droq(octeon_dev, j); + return 1; } atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE); @@ -3579,7 +3979,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev) /* Setup the interrupt handler and record the INT SUM register address */ - octeon_setup_interrupt(octeon_dev); + if (octeon_setup_interrupt(octeon_dev)) + return 1; /* Enable Octeon device interrupts */ octeon_dev->fn_list.enable_interrupt(octeon_dev->chip); @@ -3591,14 +3992,19 @@ static int octeon_device_init(struct octeon_device *octeon_dev) dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n"); - if (ddr_timeout == 0) { - dev_info(&octeon_dev->pci_dev->dev, - "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); - } + if (ddr_timeout == 0) + dev_info(&octeon_dev->pci_dev->dev, "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS); /* Wait for the octeon to initialize DDR after the soft-reset. */ + while (ddr_timeout == 0) { + set_current_state(TASK_INTERRUPTIBLE); + if (schedule_timeout(HZ / 10)) { + /* user probably pressed Control-C */ + return 1; + } + } ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout); if (ret) { dev_err(&octeon_dev->pci_dev->dev, @@ -3612,6 +4018,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev) return 1; } + /* Divert uboot to take commands from host instead. */ + ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50); + dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n"); ret = octeon_init_consoles(octeon_dev); if (ret) { diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 0ac347ccc..199a8b9c7 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -30,10 +30,10 @@ #include "octeon_config.h" -#define LIQUIDIO_VERSION "1.1.9" -#define LIQUIDIO_MAJOR_VERSION 1 -#define LIQUIDIO_MINOR_VERSION 1 -#define LIQUIDIO_MICRO_VERSION 9 +#define LIQUIDIO_BASE_VERSION "1.4" +#define LIQUIDIO_MICRO_VERSION ".1" +#define LIQUIDIO_PACKAGE "" +#define LIQUIDIO_VERSION "1.4.1" #define CONTROL_IQ 0 /** Tag types used by Octeon cores in its work. */ @@ -174,9 +174,11 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, /*------------------------- End Scatter/Gather ---------------------------*/ #define OCTNET_FRM_PTP_HEADER_SIZE 8 -#define OCTNET_FRM_HEADER_SIZE 30 /* PTP timestamp + VLAN + Ethernet */ -#define OCTNET_MIN_FRM_SIZE (64 + OCTNET_FRM_PTP_HEADER_SIZE) +#define OCTNET_FRM_HEADER_SIZE 22 /* VLAN + Ethernet */ + +#define OCTNET_MIN_FRM_SIZE 64 + #define OCTNET_MAX_FRM_SIZE (16000 + OCTNET_FRM_HEADER_SIZE) #define OCTNET_DEFAULT_FRM_SIZE (1500 + OCTNET_FRM_HEADER_SIZE) @@ -212,6 +214,17 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_VERBOSE_ENABLE 0x14 #define OCTNET_CMD_VERBOSE_DISABLE 0x15 +#define OCTNET_CMD_ENABLE_VLAN_FILTER 0x16 +#define OCTNET_CMD_ADD_VLAN_FILTER 0x17 +#define OCTNET_CMD_DEL_VLAN_FILTER 0x18 +#define OCTNET_CMD_VXLAN_PORT_CONFIG 0x19 +#define OCTNET_CMD_VXLAN_PORT_ADD 0x0 +#define OCTNET_CMD_VXLAN_PORT_DEL 0x1 +#define OCTNET_CMD_RXCSUM_ENABLE 0x0 +#define OCTNET_CMD_RXCSUM_DISABLE 0x1 +#define OCTNET_CMD_TXCSUM_ENABLE 0x0 +#define OCTNET_CMD_TXCSUM_DISABLE 0x1 + /* RX(packets coming from wire) Checksum verification flags */ /* TCP/UDP csum */ #define CNNIC_L4SUM_VERIFIED 0x1 @@ -258,19 +271,19 @@ union octnet_cmd { u64 more:6; /* How many udd words follow the command */ - u64 param1:29; + u64 reserved:29; - u64 param2:16; + u64 param1:16; - u64 param3:8; + u64 param2:8; #else - u64 param3:8; + u64 param2:8; - u64 param2:16; + u64 param1:16; - u64 param1:29; + u64 reserved:29; u64 more:6; @@ -283,8 +296,140 @@ union octnet_cmd { #define OCTNET_CMD_SIZE (sizeof(union octnet_cmd)) +/* Instruction Header(DPI) - for OCTEON-III models */ +struct octeon_instr_ih3 { +#ifdef __BIG_ENDIAN_BITFIELD + + /** Reserved3 */ + u64 reserved3:1; + + /** Gather indicator 1=gather*/ + u64 gather:1; + + /** Data length OR no. of entries in gather list */ + u64 dlengsz:14; + + /** Front Data size */ + u64 fsz:6; + + /** Reserved2 */ + u64 reserved2:4; + + /** PKI port kind - PKIND */ + u64 pkind:6; + + /** Reserved1 */ + u64 reserved1:32; + +#else + /** Reserved1 */ + u64 reserved1:32; + + /** PKI port kind - PKIND */ + u64 pkind:6; + + /** Reserved2 */ + u64 reserved2:4; + + /** Front Data size */ + u64 fsz:6; + + /** Data length OR no. of entries in gather list */ + u64 dlengsz:14; + + /** Gather indicator 1=gather*/ + u64 gather:1; + + /** Reserved3 */ + u64 reserved3:1; + +#endif +}; + +/* Optional PKI Instruction Header(PKI IH) - for OCTEON-III models */ +/** BIG ENDIAN format. */ +struct octeon_instr_pki_ih3 { +#ifdef __BIG_ENDIAN_BITFIELD + + /** Wider bit */ + u64 w:1; + + /** Raw mode indicator 1 = RAW */ + u64 raw:1; + + /** Use Tag */ + u64 utag:1; + + /** Use QPG */ + u64 uqpg:1; + + /** Reserved2 */ + u64 reserved2:1; + + /** Parse Mode */ + u64 pm:3; + + /** Skip Length */ + u64 sl:8; + + /** Use Tag Type */ + u64 utt:1; + + /** Tag type */ + u64 tagtype:2; + + /** Reserved1 */ + u64 reserved1:2; + + /** QPG Value */ + u64 qpg:11; + + /** Tag Value */ + u64 tag:32; + +#else + + /** Tag Value */ + u64 tag:32; + + /** QPG Value */ + u64 qpg:11; + + /** Reserved1 */ + u64 reserved1:2; + + /** Tag type */ + u64 tagtype:2; + + /** Use Tag Type */ + u64 utt:1; + + /** Skip Length */ + u64 sl:8; + + /** Parse Mode */ + u64 pm:3; + + /** Reserved2 */ + u64 reserved2:1; + + /** Use QPG */ + u64 uqpg:1; + + /** Use Tag */ + u64 utag:1; + + /** Raw mode indicator 1 = RAW */ + u64 raw:1; + + /** Wider bit */ + u64 w:1; +#endif + +}; + /** Instruction Header */ -struct octeon_instr_ih { +struct octeon_instr_ih2 { #ifdef __BIG_ENDIAN_BITFIELD /** Raw mode indicator 1 = RAW */ u64 raw:1; @@ -348,15 +493,15 @@ struct octeon_instr_irh { u64 opcode:4; u64 rflag:1; u64 subcode:7; - u64 len:3; - u64 rid:13; - u64 reserved:4; + u64 vlan:12; + u64 priority:3; + u64 reserved:5; u64 ossp:32; /* opcode/subcode specific parameters */ #else u64 ossp:32; /* opcode/subcode specific parameters */ - u64 reserved:4; - u64 rid:13; - u64 len:3; + u64 reserved:5; + u64 priority:3; + u64 vlan:12; u64 subcode:7; u64 rflag:1; u64 opcode:4; @@ -383,75 +528,77 @@ union octeon_rh { struct { u64 opcode:4; u64 subcode:8; - u64 len:3; /** additional 64-bit words */ - u64 rid:13; /** request id in response to pkt sent by host */ - u64 reserved:4; - u64 ossp:32; /** opcode/subcode specific parameters */ + u64 len:3; /** additional 64-bit words */ + u64 reserved:17; + u64 ossp:32; /** opcode/subcode specific parameters */ } r; struct { u64 opcode:4; u64 subcode:8; - u64 len:3; /** additional 64-bit words */ - u64 rid:13; /** request id in response to pkt sent by host */ - u64 extra:24; - u64 link:8; + u64 len:3; /** additional 64-bit words */ + u64 extra:28; + u64 vlan:12; + u64 priority:3; u64 csum_verified:3; /** checksum verified. */ u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */ + u64 encap_on:1; + u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */ } r_dh; struct { u64 opcode:4; u64 subcode:8; - u64 len:3; /** additional 64-bit words */ - u64 rid:13; /** request id in response to pkt sent by host */ + u64 len:3; /** additional 64-bit words */ + u64 reserved:11; u64 num_gmx_ports:8; - u64 max_nic_ports:8; + u64 max_nic_ports:10; u64 app_cap_flags:4; - u64 app_mode:16; + u64 app_mode:8; + u64 pkind:8; } r_core_drv_init; struct { u64 opcode:4; u64 subcode:8; u64 len:3; /** additional 64-bit words */ - u64 rid:13; - u64 reserved:4; + u64 reserved:8; u64 extra:25; - u64 ifidx:7; + u64 gmxport:16; } r_nic_info; #else u64 u64; struct { u64 ossp:32; /** opcode/subcode specific parameters */ - u64 reserved:4; - u64 rid:13; /** req id in response to pkt sent by host */ + u64 reserved:17; u64 len:3; /** additional 64-bit words */ u64 subcode:8; u64 opcode:4; } r; struct { + u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */ + u64 encap_on:1; u64 has_hwtstamp:1; /** 1 = has hwtstamp */ u64 csum_verified:3; /** checksum verified. */ - u64 link:8; - u64 extra:24; - u64 rid:13; /** req id in response to pkt sent by host */ + u64 priority:3; + u64 vlan:12; + u64 extra:28; u64 len:3; /** additional 64-bit words */ u64 subcode:8; u64 opcode:4; } r_dh; struct { - u64 app_mode:16; + u64 pkind:8; + u64 app_mode:8; u64 app_cap_flags:4; - u64 max_nic_ports:8; + u64 max_nic_ports:10; u64 num_gmx_ports:8; - u64 rid:13; + u64 reserved:11; u64 len:3; /** additional 64-bit words */ u64 subcode:8; u64 opcode:4; } r_core_drv_init; struct { - u64 ifidx:7; + u64 gmxport:16; u64 extra:25; - u64 reserved:4; - u64 rid:13; + u64 reserved:8; u64 len:3; /** additional 64-bit words */ u64 subcode:8; u64 opcode:4; @@ -461,30 +608,25 @@ union octeon_rh { #define OCT_RH_SIZE (sizeof(union octeon_rh)) -#define OCT_PKT_PARAM_IPV4OPTS 1 -#define OCT_PKT_PARAM_IPV6EXTHDR 2 - union octnic_packet_params { u32 u32; struct { #ifdef __BIG_ENDIAN_BITFIELD - u32 reserved:6; + u32 reserved:24; + u32 ip_csum:1; /* Perform IP header checksum(s) */ + /* Perform Outer transport header checksum */ + u32 transport_csum:1; + /* Find tunnel, and perform transport csum. */ u32 tnl_csum:1; - u32 ip_csum:1; - u32 ipv4opts_ipv6exthdr:2; - u32 ipsec_ops:4; - u32 tsflag:1; - u32 csoffset:9; - u32 ifidx:8; + u32 tsflag:1; /* Timestamp this packet */ + u32 ipsec_ops:4; /* IPsec operation */ #else - u32 ifidx:8; - u32 csoffset:9; - u32 tsflag:1; u32 ipsec_ops:4; - u32 ipv4opts_ipv6exthdr:2; - u32 ip_csum:1; + u32 tsflag:1; u32 tnl_csum:1; - u32 reserved:6; + u32 transport_csum:1; + u32 ip_csum:1; + u32 reserved:24; #endif } s; }; @@ -496,56 +638,96 @@ union oct_link_status { struct { #ifdef __BIG_ENDIAN_BITFIELD u64 duplex:8; - u64 status:8; u64 mtu:16; u64 speed:16; + u64 link_up:1; u64 autoneg:1; - u64 interface:4; + u64 if_mode:5; u64 pause:1; - u64 reserved:10; + u64 flashing:1; + u64 reserved:15; #else - u64 reserved:10; + u64 reserved:15; + u64 flashing:1; u64 pause:1; - u64 interface:4; + u64 if_mode:5; u64 autoneg:1; + u64 link_up:1; u64 speed:16; u64 mtu:16; - u64 status:8; u64 duplex:8; #endif } s; }; +/** The txpciq info passed to host from the firmware */ + +union oct_txpciq { + u64 u64; + + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u64 q_no:8; + u64 port:8; + u64 pkind:6; + u64 use_qpg:1; + u64 qpg:11; + u64 reserved:30; +#else + u64 reserved:30; + u64 qpg:11; + u64 use_qpg:1; + u64 pkind:6; + u64 port:8; + u64 q_no:8; +#endif + } s; +}; + +/** The rxpciq info passed to host from the firmware */ + +union oct_rxpciq { + u64 u64; + + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u64 q_no:8; + u64 reserved:56; +#else + u64 reserved:56; + u64 q_no:8; +#endif + } s; +}; + /** Information for a OCTEON ethernet interface shared between core & host. */ struct oct_link_info { union oct_link_status link; u64 hw_addr; #ifdef __BIG_ENDIAN_BITFIELD - u16 gmxport; - u8 rsvd[3]; - u8 num_txpciq; - u8 num_rxpciq; - u8 ifidx; + u64 gmxport:16; + u64 rsvd:32; + u64 num_txpciq:8; + u64 num_rxpciq:8; #else - u8 ifidx; - u8 num_rxpciq; - u8 num_txpciq; - u8 rsvd[3]; - u16 gmxport; + u64 num_rxpciq:8; + u64 num_txpciq:8; + u64 rsvd:32; + u64 gmxport:16; #endif - u8 txpciq[MAX_IOQS_PER_NICIF]; - u8 rxpciq[MAX_IOQS_PER_NICIF]; + union oct_txpciq txpciq[MAX_IOQS_PER_NICIF]; + union oct_rxpciq rxpciq[MAX_IOQS_PER_NICIF]; }; #define OCT_LINK_INFO_SIZE (sizeof(struct oct_link_info)) struct liquidio_if_cfg_info { - u64 ifidx; u64 iqmask; /** mask for IQs enabled for the port */ u64 oqmask; /** mask for OQs enabled for the port */ struct oct_link_info linfo; /** initial link information */ + char liquidio_firmware_version[32]; }; /** Stats for each NIC port in RX direction. */ @@ -570,10 +752,18 @@ struct nic_rx_stats { u64 fw_err_pko; u64 fw_err_link; u64 fw_err_drop; + u64 fw_rx_vxlan; + u64 fw_rx_vxlan_err; + + /* LRO */ u64 fw_lro_pkts; /* Number of packets that are LROed */ u64 fw_lro_octs; /* Number of octets that are LROed */ u64 fw_total_lro; /* Number of LRO packets formed */ u64 fw_lro_aborts; /* Number of times lRO of packet aborted */ + u64 fw_lro_aborts_port; + u64 fw_lro_aborts_seq; + u64 fw_lro_aborts_tsval; + u64 fw_lro_aborts_timer; /* intrmod: packet forward rate */ u64 fwd_rate; }; @@ -597,9 +787,14 @@ struct nic_tx_stats { /* firmware stats */ u64 fw_total_sent; u64 fw_total_fwd; + u64 fw_total_fwd_bytes; u64 fw_err_pko; u64 fw_err_link; u64 fw_err_drop; + u64 fw_err_tso; + u64 fw_tso; /* number of tso requests */ + u64 fw_tso_fwd; /* number of packets segmented in tso */ + u64 fw_tx_vxlan; }; struct oct_link_stats { @@ -630,23 +825,44 @@ struct oct_mdio_cmd { #define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats)) +/* intrmod: max. packet rate threshold */ +#define LIO_INTRMOD_MAXPKT_RATETHR 196608 +/* intrmod: min. packet rate threshold */ +#define LIO_INTRMOD_MINPKT_RATETHR 9216 +/* intrmod: max. packets to trigger interrupt */ +#define LIO_INTRMOD_RXMAXCNT_TRIGGER 384 +/* intrmod: min. packets to trigger interrupt */ +#define LIO_INTRMOD_RXMINCNT_TRIGGER 1 +/* intrmod: max. time to trigger interrupt */ +#define LIO_INTRMOD_RXMAXTMR_TRIGGER 128 +/* 66xx:intrmod: min. time to trigger interrupt + * (value of 1 is optimum for TCP_RR) + */ +#define LIO_INTRMOD_RXMINTMR_TRIGGER 1 + +/* intrmod: max. packets to trigger interrupt */ +#define LIO_INTRMOD_TXMAXCNT_TRIGGER 64 +/* intrmod: min. packets to trigger interrupt */ +#define LIO_INTRMOD_TXMINCNT_TRIGGER 0 + +/* intrmod: poll interval in seconds */ #define LIO_INTRMOD_CHECK_INTERVAL 1 -#define LIO_INTRMOD_MAXPKT_RATETHR 196608 /* max pkt rate threshold */ -#define LIO_INTRMOD_MINPKT_RATETHR 9216 /* min pkt rate threshold */ -#define LIO_INTRMOD_MAXCNT_TRIGGER 384 /* max pkts to trigger interrupt */ -#define LIO_INTRMOD_MINCNT_TRIGGER 1 /* min pkts to trigger interrupt */ -#define LIO_INTRMOD_MAXTMR_TRIGGER 128 /* max time to trigger interrupt */ -#define LIO_INTRMOD_MINTMR_TRIGGER 32 /* min time to trigger interrupt */ struct oct_intrmod_cfg { - u64 intrmod_enable; - u64 intrmod_check_intrvl; - u64 intrmod_maxpkt_ratethr; - u64 intrmod_minpkt_ratethr; - u64 intrmod_maxcnt_trigger; - u64 intrmod_maxtmr_trigger; - u64 intrmod_mincnt_trigger; - u64 intrmod_mintmr_trigger; + u64 rx_enable; + u64 tx_enable; + u64 check_intrvl; + u64 maxpkt_ratethr; + u64 minpkt_ratethr; + u64 rx_maxcnt_trigger; + u64 rx_mincnt_trigger; + u64 rx_maxtmr_trigger; + u64 rx_mintmr_trigger; + u64 tx_mincnt_trigger; + u64 tx_maxcnt_trigger; + u64 rx_frames; + u64 tx_frames; + u64 rx_usecs; }; #define BASE_QUEUE_NOT_REQUESTED 65535 @@ -659,9 +875,9 @@ union oct_nic_if_cfg { u64 num_iqueues:16; u64 num_oqueues:16; u64 gmx_port_id:8; - u64 reserved:8; + u64 vf_id:8; #else - u64 reserved:8; + u64 vf_id:8; u64 gmx_port_id:8; u64 num_oqueues:16; u64 num_iqueues:16; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h index 62a8dd5cd..b3396e3a8 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -37,7 +37,7 @@ /* Maximum octeon devices defined as MAX_OCTEON_NICIF to support * multiple(<= MAX_OCTEON_NICIF) Miniports */ -#define MAX_OCTEON_NICIF 32 +#define MAX_OCTEON_NICIF 128 #define MAX_OCTEON_DEVICES MAX_OCTEON_NICIF #define MAX_OCTEON_LINKS MAX_OCTEON_NICIF #define MAX_OCTEON_MULTICAST_ADDR 32 @@ -135,7 +135,7 @@ #define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp) /* Max IOQs per OCTEON Link */ -#define MAX_IOQS_PER_NICIF 32 +#define MAX_IOQS_PER_NICIF 64 enum lio_card_type { LIO_210SV = 0, /* Two port, 66xx */ @@ -226,7 +226,7 @@ struct octeon_oq_config { */ u64 refill_threshold:16; - /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */ + /** If set, the Output queue uses info-pointer mode. (Default: 1) */ u64 info_ptr:32; /* Max number of OQs available */ @@ -236,7 +236,7 @@ struct octeon_oq_config { /* Max number of OQs available */ u64 max_oqs:8; - /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */ + /** If set, the Output queue uses info-pointer mode. (Default: 1) */ u64 info_ptr:32; /** The number of buffers that were consumed during packet processing by @@ -416,9 +416,11 @@ struct octeon_config { #define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS) /* Maximum number of Octeon Instruction (command) queues */ -#define MAX_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES +#define MAX_OCTEON_INSTR_QUEUES(oct) CN6XXX_MAX_INPUT_QUEUES +/* Maximum number of Octeon Output queues */ +#define MAX_OCTEON_OUTPUT_QUEUES(oct) CN6XXX_MAX_OUTPUT_QUEUES -/* Maximum number of Octeon Instruction (command) queues */ -#define MAX_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES +#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES +#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES #endif /* __OCTEON_CONFIG_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index 466147e40..bbb50ea66 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -23,27 +23,14 @@ /** * @file octeon_console.c */ -#include -#include -#include -#include #include -#include #include -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" #include "response_manager.h" #include "octeon_device.h" -#include "octeon_nic.h" #include "octeon_main.h" -#include "octeon_network.h" -#include "cn66xx_regs.h" -#include "cn66xx_device.h" -#include "cn68xx_regs.h" -#include "cn68xx_device.h" -#include "liquidio_image.h" #include "octeon_mem_ops.h" static void octeon_remote_lock(void); @@ -51,6 +38,8 @@ static void octeon_remote_unlock(void); static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, const char *name, u32 flags); +static int octeon_console_read(struct octeon_device *oct, u32 console_num, + char *buffer, u32 buf_size); #define MIN(a, b) min((a), (b)) #define CAST_ULL(v) ((u64)(v)) @@ -170,8 +159,8 @@ struct octeon_pci_console_desc { offsetof(struct cvmx_bootmem_desc, field), \ SIZEOF_FIELD(struct cvmx_bootmem_desc, field)) -#define __cvmx_bootmem_lock(flags) -#define __cvmx_bootmem_unlock(flags) +#define __cvmx_bootmem_lock(flags) (flags = flags) +#define __cvmx_bootmem_unlock(flags) (flags = flags) /** * This macro returns a member of the @@ -234,7 +223,7 @@ static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct, u32 len) { addr += offsetof(struct cvmx_bootmem_named_block_desc, name); - octeon_pci_read_core_mem(oct, addr, str, len); + octeon_pci_read_core_mem(oct, addr, (u8 *)str, len); str[len] = 0; } @@ -323,6 +312,9 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, if (name && named_size) { char *name_tmp = kmalloc(name_length + 1, GFP_KERNEL); + if (!name_tmp) + break; + CVMX_BOOTMEM_NAMED_GET_NAME(oct, named_addr, name_tmp, name_length); @@ -383,7 +375,7 @@ static void octeon_remote_unlock(void) int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str, u32 wait_hundredths) { - u32 len = strlen(cmd_str); + u32 len = (u32)strlen(cmd_str); dev_dbg(&oct->pci_dev->dev, "sending \"%s\" to bootloader\n", cmd_str); @@ -440,8 +432,7 @@ int octeon_wait_for_bootloader(struct octeon_device *oct, } static void octeon_console_handle_result(struct octeon_device *oct, - size_t console_num, - char *buffer, s32 bytes_read) + size_t console_num) { struct octeon_console *console; @@ -492,7 +483,7 @@ static void check_console(struct work_struct *work) struct octeon_console *console; struct cavium_wk *wk = (struct cavium_wk *)work; struct octeon_device *oct = (struct octeon_device *)wk->ctxptr; - size_t console_num = wk->ctxul; + u32 console_num = (u32)wk->ctxul; u32 delay; console = &oct->console[console_num]; @@ -505,20 +496,17 @@ static void check_console(struct work_struct *work) */ bytes_read = octeon_console_read(oct, console_num, console_buffer, - sizeof(console_buffer) - 1, 0); + sizeof(console_buffer) - 1); if (bytes_read > 0) { total_read += bytes_read; - if (console->waiting) { - octeon_console_handle_result(oct, console_num, - console_buffer, - bytes_read); - } + if (console->waiting) + octeon_console_handle_result(oct, console_num); if (octeon_console_debug_enabled(console_num)) { output_console_line(oct, console, console_num, console_buffer, bytes_read); } } else if (bytes_read < 0) { - dev_err(&oct->pci_dev->dev, "Error reading console %lu, ret=%d\n", + dev_err(&oct->pci_dev->dev, "Error reading console %u, ret=%d\n", console_num, bytes_read); } @@ -530,7 +518,7 @@ static void check_console(struct work_struct *work) */ if (octeon_console_debug_enabled(console_num) && (total_read == 0) && (console->leftover[0])) { - dev_info(&oct->pci_dev->dev, "%lu: %s\n", + dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, console->leftover); console->leftover[0] = '\0'; } @@ -675,8 +663,8 @@ static inline int octeon_console_avail_bytes(u32 buffer_size, octeon_console_free_bytes(buffer_size, wr_idx, rd_idx); } -int octeon_console_read(struct octeon_device *oct, u32 console_num, - char *buffer, u32 buf_size, u32 flags) +static int octeon_console_read(struct octeon_device *oct, u32 console_num, + char *buffer, u32 buf_size) { int bytes_to_read; u32 rd_idx, wr_idx; @@ -712,7 +700,7 @@ int octeon_console_read(struct octeon_device *oct, u32 console_num, bytes_to_read = console->buffer_size - rd_idx; octeon_pci_read_core_mem(oct, console->output_base_addr + rd_idx, - buffer, bytes_to_read); + (u8 *)buffer, bytes_to_read); octeon_write_device_mem32(oct, console->addr + offsetof(struct octeon_pci_console, output_read_index), diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 8e23e3fad..0eb504a43 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -19,28 +19,19 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include -#include -#include -#include #include #include -#include #include #include -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" #include "response_manager.h" #include "octeon_device.h" -#include "octeon_nic.h" #include "octeon_main.h" #include "octeon_network.h" #include "cn66xx_regs.h" #include "cn66xx_device.h" -#include "cn68xx_regs.h" -#include "cn68xx_device.h" #include "liquidio_image.h" #include "octeon_mem_ops.h" @@ -449,10 +440,10 @@ static struct octeon_config_ptr { }; static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = { - "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE", + "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE", "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE", "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE", - "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET", + "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET", "INVALID" }; @@ -550,17 +541,19 @@ static char *get_oct_app_string(u32 app_mode) return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START]; } +u8 fbuf[4 * 1024 * 1024]; + int octeon_download_firmware(struct octeon_device *oct, const u8 *data, size_t size) { int ret = 0; - u8 *p; - u8 *buffer; + u8 *p = fbuf; u32 crc32_result; u64 load_addr; u32 image_len; struct octeon_firmware_file_header *h; - u32 i; + u32 i, rem, base_len = strlen(LIQUIDIO_BASE_VERSION); + char *base; if (size < sizeof(struct octeon_firmware_file_header)) { dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n", @@ -576,19 +569,26 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data, return -EINVAL; } - crc32_result = - crc32(~0, data, - sizeof(struct octeon_firmware_file_header) - - sizeof(u32)) ^ ~0U; + crc32_result = crc32((unsigned int)~0, data, + sizeof(struct octeon_firmware_file_header) - + sizeof(u32)) ^ ~0U; if (crc32_result != be32_to_cpu(h->crc32)) { dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n", crc32_result, be32_to_cpu(h->crc32)); return -EINVAL; } - if (memcmp(LIQUIDIO_VERSION, h->version, strlen(LIQUIDIO_VERSION))) { - dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s, got %s.\n", - LIQUIDIO_VERSION, h->version); + if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) { + dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n", + LIQUIDIO_PACKAGE, h->version); + return -EINVAL; + } + + base = h->version + strlen(LIQUIDIO_PACKAGE); + ret = memcmp(LIQUIDIO_BASE_VERSION, base, base_len); + if (ret) { + dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n", + LIQUIDIO_BASE_VERSION, base); return -EINVAL; } @@ -602,58 +602,58 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data, snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s", h->version); - buffer = kmemdup(data, size, GFP_KERNEL); - if (!buffer) - return -ENOMEM; - - p = buffer + sizeof(struct octeon_firmware_file_header); + data += sizeof(struct octeon_firmware_file_header); + dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__, + be32_to_cpu(h->num_images)); /* load all images */ for (i = 0; i < be32_to_cpu(h->num_images); i++) { load_addr = be64_to_cpu(h->desc[i].addr); image_len = be32_to_cpu(h->desc[i].len); - /* validate the image */ - crc32_result = crc32(~0, p, image_len) ^ ~0U; - if (crc32_result != be32_to_cpu(h->desc[i].crc32)) { - dev_err(&oct->pci_dev->dev, - "Firmware CRC mismatch in image %d (0x%08x != 0x%08x).\n", - i, crc32_result, - be32_to_cpu(h->desc[i].crc32)); - ret = -EINVAL; - goto done_downloading; - } + dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n", + image_len, load_addr); - /* download the image */ - octeon_pci_write_core_mem(oct, load_addr, p, image_len); + /* Write in 4MB chunks*/ + rem = image_len; - p += image_len; - dev_dbg(&oct->pci_dev->dev, - "Downloaded image %d (%d bytes) to address 0x%016llx\n", - i, image_len, load_addr); + while (rem) { + if (rem < (4 * 1024 * 1024)) + size = rem; + else + size = 4 * 1024 * 1024; + + memcpy(p, data, size); + + /* download the image */ + octeon_pci_write_core_mem(oct, load_addr, p, (u32)size); + + data += size; + rem -= (u32)size; + load_addr += size; + } } + dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n", + h->bootcmd); /* Invoke the bootcmd */ ret = octeon_console_send_cmd(oct, h->bootcmd, 50); -done_downloading: - kfree(buffer); - - return ret; + return 0; } void octeon_free_device_mem(struct octeon_device *oct) { - u32 i; + int i; - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - /* could check mask as well */ - vfree(oct->droq[i]); + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (oct->io_qmask.oq & (1ULL << i)) + vfree(oct->droq[i]); } - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { - /* could check mask as well */ - vfree(oct->instr_queue[i]); + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (oct->io_qmask.iq & (1ULL << i)) + vfree(oct->instr_queue[i]); } i = oct->octeon_id; @@ -735,55 +735,61 @@ struct octeon_device *octeon_allocate_device(u32 pci_id, octeon_device[oct_idx] = oct; oct->octeon_id = oct_idx; - snprintf((oct->device_name), sizeof(oct->device_name), + snprintf(oct->device_name, sizeof(oct->device_name), "LiquidIO%d", (oct->octeon_id)); return oct; } +/* this function is only for setting up the first queue */ int octeon_setup_instr_queues(struct octeon_device *oct) { - u32 i, num_iqs = 0; u32 num_descs = 0; + u32 iq_no = 0; + union oct_txpciq txpciq; + int numa_node = cpu_to_node(iq_no % num_online_cpus()); /* this causes queue 0 to be default queue */ - if (OCTEON_CN6XXX(oct)) { - num_iqs = 1; + if (OCTEON_CN6XXX(oct)) num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf)); - } oct->num_iqs = 0; - for (i = 0; i < num_iqs; i++) { - oct->instr_queue[i] = + oct->instr_queue[0] = vmalloc_node(sizeof(*oct->instr_queue[0]), + numa_node); + if (!oct->instr_queue[0]) + oct->instr_queue[0] = vmalloc(sizeof(struct octeon_instr_queue)); - if (!oct->instr_queue[i]) - return 1; - - memset(oct->instr_queue[i], 0, - sizeof(struct octeon_instr_queue)); - - oct->instr_queue[i]->app_ctx = (void *)(size_t)i; - if (octeon_init_instr_queue(oct, i, num_descs)) - return 1; - - oct->num_iqs++; + if (!oct->instr_queue[0]) + return 1; + memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue)); + oct->instr_queue[0]->q_index = 0; + oct->instr_queue[0]->app_ctx = (void *)(size_t)0; + oct->instr_queue[0]->ifidx = 0; + txpciq.u64 = 0; + txpciq.s.q_no = iq_no; + txpciq.s.use_qpg = 0; + txpciq.s.qpg = 0; + if (octeon_init_instr_queue(oct, txpciq, num_descs)) { + /* prevent memory leak */ + vfree(oct->instr_queue[0]); + return 1; } + oct->num_iqs++; return 0; } int octeon_setup_output_queues(struct octeon_device *oct) { - u32 i, num_oqs = 0; u32 num_descs = 0; u32 desc_size = 0; + u32 oq_no = 0; + int numa_node = cpu_to_node(oq_no % num_online_cpus()); /* this causes queue 0 to be default queue */ if (OCTEON_CN6XXX(oct)) { - /* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */ - num_oqs = 1; num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf)); desc_size = @@ -791,19 +797,15 @@ int octeon_setup_output_queues(struct octeon_device *oct) } oct->num_oqs = 0; + oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node); + if (!oct->droq[0]) + oct->droq[0] = vmalloc(sizeof(*oct->droq[0])); + if (!oct->droq[0]) + return 1; - for (i = 0; i < num_oqs; i++) { - oct->droq[i] = vmalloc(sizeof(*oct->droq[i])); - if (!oct->droq[i]) - return 1; - - memset(oct->droq[i], 0, sizeof(struct octeon_droq)); - - if (octeon_init_droq(oct, i, num_descs, desc_size, NULL)) - return 1; - - oct->num_oqs++; - } + if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL)) + return 1; + oct->num_oqs++; return 0; } @@ -1005,79 +1007,6 @@ octeon_register_dispatch_fn(struct octeon_device *oct, return 0; } -/* octeon_unregister_dispatch_fn - * Parameters: - * oct - octeon device - * opcode - driver should unregister the function for this opcode - * subcode - driver should unregister the function for this subcode - * Description: - * Unregister the function set for this opcode+subcode. - * Returns: - * Success: 0 - * Failure: 1 - * Locks: - * No locks are held. - */ -int -octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode, - u16 subcode) -{ - int retval = 0; - u32 idx; - struct list_head *dispatch, *dfree = NULL, *tmp2; - u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode); - - idx = combined_opcode & OCTEON_OPCODE_MASK; - - spin_lock_bh(&oct->dispatch.lock); - - if (oct->dispatch.count == 0) { - spin_unlock_bh(&oct->dispatch.lock); - dev_err(&oct->pci_dev->dev, - "No dispatch functions registered for this device\n"); - return 1; - } - - if (oct->dispatch.dlist[idx].opcode == combined_opcode) { - dispatch = &oct->dispatch.dlist[idx].list; - if (dispatch->next != dispatch) { - dispatch = dispatch->next; - oct->dispatch.dlist[idx].opcode = - ((struct octeon_dispatch *)dispatch)->opcode; - oct->dispatch.dlist[idx].dispatch_fn = - ((struct octeon_dispatch *) - dispatch)->dispatch_fn; - oct->dispatch.dlist[idx].arg = - ((struct octeon_dispatch *)dispatch)->arg; - list_del(dispatch); - dfree = dispatch; - } else { - oct->dispatch.dlist[idx].opcode = 0; - oct->dispatch.dlist[idx].dispatch_fn = NULL; - oct->dispatch.dlist[idx].arg = NULL; - } - } else { - retval = 1; - list_for_each_safe(dispatch, tmp2, - &(oct->dispatch.dlist[idx]. - list)) { - if (((struct octeon_dispatch *)dispatch)->opcode == - combined_opcode) { - list_del(dispatch); - dfree = dispatch; - retval = 0; - } - } - } - - if (!retval) - oct->dispatch.count--; - - spin_unlock_bh(&oct->dispatch.lock); - vfree(dfree); - return retval; -} - int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf) { u32 i; @@ -1152,8 +1081,8 @@ core_drv_init_err: int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) { - if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) && - (oct->io_qmask.iq & (1UL << q_no))) + if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) && + (oct->io_qmask.iq & (1ULL << q_no))) return oct->instr_queue[q_no]->max_count; return -1; @@ -1161,8 +1090,8 @@ int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no) { - if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) && - (oct->io_qmask.oq & (1UL << q_no))) + if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) && + (oct->io_qmask.oq & (1ULL << q_no))) return oct->droq[q_no]->max_count; return -1; } @@ -1253,10 +1182,10 @@ void lio_pci_writeq(struct octeon_device *oct, int octeon_mem_access_ok(struct octeon_device *oct) { u64 access_okay = 0; + u64 lmc0_reset_ctl; /* Check to make sure a DDR interface is enabled */ - u64 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL); - + lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL); access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK); return access_okay ? 0 : 1; @@ -1270,9 +1199,6 @@ int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout) if (!timeout) return ret; - while (*timeout == 0) - schedule_timeout_uninterruptible(HZ / 10); - for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout)); ms += HZ / 10) { ret = octeon_mem_access_ok(oct); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 36e1f85df..01edfb404 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -152,9 +152,9 @@ struct octeon_mmio { #define MAX_OCTEON_MAPS 32 struct octeon_io_enable { - u32 iq; - u32 oq; - u32 iq64B; + u64 iq; + u64 oq; + u64 iq64B; }; struct octeon_reg_list { @@ -204,8 +204,7 @@ struct octeon_fn_list { void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int); void (*bar1_idx_write)(struct octeon_device *, u32, u32); u32 (*bar1_idx_read)(struct octeon_device *, u32); - u32 (*update_iq_read_idx)(struct octeon_device *, - struct octeon_instr_queue *); + u32 (*update_iq_read_idx)(struct octeon_instr_queue *); void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32); void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32); @@ -222,7 +221,7 @@ struct octeon_fn_list { /* Structure for named memory blocks * Number of descriptors - * available can be changed without affecting compatiblity, + * available can be changed without affecting compatibility, * but name length changes require a bump in the bootmem * descriptor version * Note: This structure must be naturally 64 bit aligned, as a single @@ -255,7 +254,7 @@ struct oct_fw_info { struct cavium_wk { struct delayed_work work; void *ctxptr; - size_t ctxul; + u64 ctxul; }; struct cavium_wq { @@ -267,6 +266,8 @@ struct octdev_props { /* Each interface in the Octeon device has a network * device pointer (used for OS specific calls). */ + int napi_enabled; + int gmxport; struct net_device *netdev; }; @@ -324,7 +325,8 @@ struct octeon_device { struct octeon_sc_buffer_pool sc_buf_pool; /** The input instruction queues */ - struct octeon_instr_queue *instr_queue[MAX_OCTEON_INSTR_QUEUES]; + struct octeon_instr_queue *instr_queue + [MAX_POSSIBLE_OCTEON_INSTR_QUEUES]; /** The doubly-linked list of instruction response */ struct octeon_response_list response_list[MAX_RESPONSE_LISTS]; @@ -332,7 +334,7 @@ struct octeon_device { u32 num_oqs; /** The DROQ output queues */ - struct octeon_droq *droq[MAX_OCTEON_OUTPUT_QUEUES]; + struct octeon_droq *droq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES]; struct octeon_io_enable io_qmask; @@ -381,15 +383,29 @@ struct octeon_device { struct cavium_wq dma_comp_wq; - struct cavium_wq check_db_wq[MAX_OCTEON_INSTR_QUEUES]; + /** Lock for dma response list */ + spinlock_t cmd_resp_wqlock; + u32 cmd_resp_state; + + struct cavium_wq check_db_wq[MAX_POSSIBLE_OCTEON_INSTR_QUEUES]; struct cavium_wk nic_poll_work; struct cavium_wk console_poll_work[MAX_OCTEON_MAPS]; void *priv; + + int rx_pause; + int tx_pause; + + struct oct_link_stats link_stats; /*stastics from firmware*/ + + /* private flags to control driver-specific features through ethtool */ + u32 priv_flags; }; +#define OCT_DRV_ONLINE 1 +#define OCT_DRV_OFFLINE 2 #define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \ (oct->chip_id == OCTEON_CN68XX)) #define CHIP_FIELD(oct, TYPE, field) \ @@ -569,8 +585,7 @@ int octeon_add_console(struct octeon_device *oct, u32 console_num); int octeon_console_write(struct octeon_device *oct, u32 console_num, char *buffer, u32 write_request_size, u32 flags); int octeon_console_write_avail(struct octeon_device *oct, u32 console_num); -int octeon_console_read(struct octeon_device *oct, u32 console_num, - char *buffer, u32 buf_size, u32 flags); + int octeon_console_read_avail(struct octeon_device *oct, u32 console_num); /** Removes all attached consoles. */ @@ -646,4 +661,17 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type); */ struct octeon_config *octeon_get_conf(struct octeon_device *oct); +/* LiquidIO driver pivate flags */ +enum { + OCT_PRIV_FLAG_TX_BYTES = 0, /* Tx interrupts by pending byte count */ +}; + +static inline void lio_set_priv_flag(struct octeon_device *octdev, u32 flag, + u32 val) +{ + if (val) + octdev->priv_flags |= (0x1 << flag); + else + octdev->priv_flags &= ~(0x1 << flag); +} #endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index 174072b37..e0afe4c1f 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -19,30 +19,18 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include -#include -#include #include -#include #include #include -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" #include "response_manager.h" #include "octeon_device.h" -#include "octeon_nic.h" #include "octeon_main.h" #include "octeon_network.h" #include "cn66xx_regs.h" #include "cn66xx_device.h" -#include "cn68xx_regs.h" -#include "cn68xx_device.h" -#include "liquidio_image.h" -#include "octeon_mem_ops.h" - -/* #define CAVIUM_ONLY_PERF_MODE */ #define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2)) #define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2)) @@ -104,8 +92,12 @@ static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev, return fn_arg; } -u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct, - struct octeon_droq *droq) +/** Check for packets on Droq. This function should be called with + * lock held. + * @param droq - Droq on which count is checked. + * @return Returns packet count. + */ +u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq) { u32 pkt_count = 0; @@ -151,22 +143,26 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct, struct octeon_droq *droq) { u32 i; + struct octeon_skb_page_info *pg_info; for (i = 0; i < droq->max_count; i++) { - if (droq->recv_buf_list[i].buffer) { - if (droq->desc_ring) { - lio_unmap_ring_info(oct->pci_dev, - (u64)droq-> - desc_ring[i].info_ptr, - OCT_DROQ_INFO_SIZE); - lio_unmap_ring(oct->pci_dev, - (u64)droq->desc_ring[i]. - buffer_ptr, - droq->buffer_size); - } - recv_buffer_free(droq->recv_buf_list[i].buffer); - droq->recv_buf_list[i].buffer = NULL; - } + pg_info = &droq->recv_buf_list[i].pg_info; + + if (pg_info->dma) + lio_unmap_ring(oct->pci_dev, + (u64)pg_info->dma); + pg_info->dma = 0; + + if (pg_info->page) + recv_buffer_destroy(droq->recv_buf_list[i].buffer, + pg_info); + + if (droq->desc_ring && droq->desc_ring[i].info_ptr) + lio_unmap_ring_info(oct->pci_dev, + (u64)droq-> + desc_ring[i].info_ptr, + OCT_DROQ_INFO_SIZE); + droq->recv_buf_list[i].buffer = NULL; } octeon_droq_reset_indices(droq); @@ -181,25 +177,23 @@ octeon_droq_setup_ring_buffers(struct octeon_device *oct, struct octeon_droq_desc *desc_ring = droq->desc_ring; for (i = 0; i < droq->max_count; i++) { - buf = recv_buffer_alloc(oct, droq->q_no, droq->buffer_size); + buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info); if (!buf) { dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n", __func__); + droq->stats.rx_alloc_failure++; return -ENOMEM; } droq->recv_buf_list[i].buffer = buf; droq->recv_buf_list[i].data = get_rbd(buf); - droq->info_list[i].length = 0; /* map ring buffers into memory */ desc_ring[i].info_ptr = lio_map_ring_info(droq, i); desc_ring[i].buffer_ptr = - lio_map_ring(oct->pci_dev, - droq->recv_buf_list[i].buffer, - droq->buffer_size); + lio_map_ring(droq->recv_buf_list[i].buffer); } octeon_droq_reset_indices(droq); @@ -242,6 +236,8 @@ int octeon_init_droq(struct octeon_device *oct, struct octeon_droq *droq; u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0; u32 c_pkts_per_intr = 0, c_refill_threshold = 0; + int orig_node = dev_to_node(&oct->pci_dev->dev); + int numa_node = cpu_to_node(q_no % num_online_cpus()); dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); @@ -261,15 +257,23 @@ int octeon_init_droq(struct octeon_device *oct, struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf); c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x); - c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x); + c_refill_threshold = + (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x); + } else { + return 1; } droq->max_count = c_num_descs; droq->buffer_size = c_buf_size; desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE; + set_dev_node(&oct->pci_dev->dev, numa_node); droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, (dma_addr_t *)&droq->desc_ring_dma); + set_dev_node(&oct->pci_dev->dev, orig_node); + if (!droq->desc_ring) + droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, + (dma_addr_t *)&droq->desc_ring_dma); if (!droq->desc_ring) { dev_err(&oct->pci_dev->dev, @@ -283,12 +287,11 @@ int octeon_init_droq(struct octeon_device *oct, droq->max_count); droq->info_list = - cnnic_alloc_aligned_dma(oct->pci_dev, - (droq->max_count * OCT_DROQ_INFO_SIZE), - &droq->info_alloc_size, - &droq->info_base_addr, - &droq->info_list_dma); - + cnnic_numa_alloc_aligned_dma((droq->max_count * + OCT_DROQ_INFO_SIZE), + &droq->info_alloc_size, + &droq->info_base_addr, + numa_node); if (!droq->info_list) { dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n"); lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), @@ -297,7 +300,12 @@ int octeon_init_droq(struct octeon_device *oct, } droq->recv_buf_list = (struct octeon_recv_buffer *) - vmalloc(droq->max_count * + vmalloc_node(droq->max_count * + OCT_DROQ_RECVBUF_SIZE, + numa_node); + if (!droq->recv_buf_list) + droq->recv_buf_list = (struct octeon_recv_buffer *) + vmalloc(droq->max_count * OCT_DROQ_RECVBUF_SIZE); if (!droq->recv_buf_list) { dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n"); @@ -320,7 +328,7 @@ int octeon_init_droq(struct octeon_device *oct, /* For 56xx Pass1, this function won't be called, so no checks. */ oct->fn_list.setup_oq_regs(oct, q_no); - oct->io_qmask.oq |= (1 << q_no); + oct->io_qmask.oq |= (1ULL << q_no); return 0; @@ -358,6 +366,7 @@ static inline struct octeon_recv_info *octeon_create_recv_info( struct octeon_recv_pkt *recv_pkt; struct octeon_recv_info *recv_info; u32 i, bytes_left; + struct octeon_skb_page_info *pg_info; info = &droq->info_list[idx]; @@ -375,9 +384,14 @@ static inline struct octeon_recv_info *octeon_create_recv_info( bytes_left = (u32)info->length; while (buf_cnt) { - lio_unmap_ring(octeon_dev->pci_dev, - (u64)droq->desc_ring[idx].buffer_ptr, - droq->buffer_size); + { + pg_info = &droq->recv_buf_list[idx].pg_info; + + lio_unmap_ring(octeon_dev->pci_dev, + (u64)pg_info->dma); + pg_info->page = NULL; + pg_info->dma = 0; + } recv_pkt->buffer_size[i] = (bytes_left >= @@ -449,6 +463,7 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) void *buf = NULL; u8 *data; u32 desc_refilled = 0; + struct octeon_skb_page_info *pg_info; desc_ring = droq->desc_ring; @@ -458,13 +473,22 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) * the buffer, else allocate. */ if (!droq->recv_buf_list[droq->refill_idx].buffer) { - buf = recv_buffer_alloc(octeon_dev, droq->q_no, - droq->buffer_size); + pg_info = + &droq->recv_buf_list[droq->refill_idx].pg_info; + /* Either recycle the existing pages or go for + * new page alloc + */ + if (pg_info->page) + buf = recv_buffer_reuse(octeon_dev, pg_info); + else + buf = recv_buffer_alloc(octeon_dev, pg_info); /* If a buffer could not be allocated, no point in * continuing */ - if (!buf) + if (!buf) { + droq->stats.rx_alloc_failure++; break; + } droq->recv_buf_list[droq->refill_idx].buffer = buf; data = get_rbd(buf); @@ -476,11 +500,8 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) droq->recv_buf_list[droq->refill_idx].data = data; desc_ring[droq->refill_idx].buffer_ptr = - lio_map_ring(octeon_dev->pci_dev, - droq->recv_buf_list[droq-> - refill_idx].buffer, - droq->buffer_size); - + lio_map_ring(droq->recv_buf_list[droq-> + refill_idx].buffer); /* Reset any previous values in the length field. */ droq->info_list[droq->refill_idx].length = 0; @@ -539,7 +560,9 @@ octeon_droq_dispatch_pkt(struct octeon_device *oct, droq->stats.dropped_nomem++; } } else { - dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function\n"); + dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n", + (unsigned int)rh->r.opcode, + (unsigned int)rh->r.subcode); droq->stats.dropped_nodispatch++; } /* else (dispatch_fn ... */ @@ -586,6 +609,8 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, for (pkt = 0; pkt < pkt_count; pkt++) { u32 pkt_len = 0; struct sk_buff *nicbuf = NULL; + struct octeon_skb_page_info *pg_info; + void *buf; info = &droq->info_list[droq->read_idx]; octeon_swap_8B_data((u64 *)info, 2); @@ -605,7 +630,6 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, rh = &info->rh; total_len += (u32)info->length; - if (OPCODE_SLOW_PATH(rh)) { u32 buf_cnt; @@ -614,50 +638,45 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, droq->refill_count += buf_cnt; } else { if (info->length <= droq->buffer_size) { - lio_unmap_ring(oct->pci_dev, - (u64)droq->desc_ring[ - droq->read_idx].buffer_ptr, - droq->buffer_size); pkt_len = (u32)info->length; nicbuf = droq->recv_buf_list[ droq->read_idx].buffer; + pg_info = &droq->recv_buf_list[ + droq->read_idx].pg_info; + if (recv_buffer_recycle(oct, pg_info)) + pg_info->page = NULL; droq->recv_buf_list[droq->read_idx].buffer = NULL; + INCR_INDEX_BY1(droq->read_idx, droq->max_count); - skb_put(nicbuf, pkt_len); droq->refill_count++; } else { - nicbuf = octeon_fast_packet_alloc(oct, droq, - droq->q_no, - (u32) + nicbuf = octeon_fast_packet_alloc((u32) info->length); pkt_len = 0; /* nicbuf allocation can fail. We'll handle it * inside the loop. */ while (pkt_len < info->length) { - int cpy_len; + int cpy_len, idx = droq->read_idx; - cpy_len = ((pkt_len + - droq->buffer_size) > - info->length) ? + cpy_len = ((pkt_len + droq->buffer_size) + > info->length) ? ((u32)info->length - pkt_len) : droq->buffer_size; if (nicbuf) { - lio_unmap_ring(oct->pci_dev, - (u64) - droq->desc_ring - [droq->read_idx]. - buffer_ptr, - droq-> - buffer_size); octeon_fast_packet_next(droq, nicbuf, cpy_len, - droq-> - read_idx - ); + idx); + buf = droq->recv_buf_list[idx]. + buffer; + recv_buffer_fast_free(buf); + droq->recv_buf_list[idx].buffer + = NULL; + } else { + droq->stats.rx_alloc_failure++; } pkt_len += cpy_len; @@ -668,12 +687,14 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, } if (nicbuf) { - if (droq->ops.fptr) + if (droq->ops.fptr) { droq->ops.fptr(oct->octeon_id, - nicbuf, pkt_len, - rh, &droq->napi); - else + nicbuf, pkt_len, + rh, &droq->napi, + droq->ops.farg); + } else { recv_buffer_free(nicbuf); + } } } @@ -681,16 +702,16 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, int desc_refilled = octeon_droq_refill(oct, droq); /* Flush the droq descriptor data to memory to be sure - * that when we update the credits the data in memory - * is accurate. - */ + * that when we update the credits the data in memory + * is accurate. + */ wmb(); writel((desc_refilled), droq->pkts_credit_reg); /* make sure mmio write completes */ mmiowb(); } - } /* for ( each packet )... */ + } /* for (each packet)... */ /* Increment refill_count by the number of buffers processed. */ droq->stats.pkts_received += pkt; @@ -721,7 +742,7 @@ octeon_droq_process_packets(struct octeon_device *oct, if (pkt_count > budget) pkt_count = budget; - /* Grab the lock */ + /* Grab the droq lock */ spin_lock(&droq->lock); pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count); @@ -783,7 +804,7 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct, total_pkts_processed += pkts_processed; - octeon_droq_check_hw_for_pkts(oct, droq); + octeon_droq_check_hw_for_pkts(droq); } spin_unlock(&droq->lock); @@ -807,18 +828,6 @@ octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd, u32 arg) { struct octeon_droq *droq; - struct octeon_config *oct_cfg = NULL; - - oct_cfg = octeon_get_conf(oct); - - if (!oct_cfg) - return -EINVAL; - - if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { - dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n", - __func__, q_no, (oct->num_oqs - 1)); - return -EINVAL; - } droq = oct->droq[q_no]; @@ -937,6 +946,7 @@ int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) spin_lock_irqsave(&droq->lock, flags); droq->ops.fptr = NULL; + droq->ops.farg = NULL; droq->ops.drop_on_max = 0; spin_unlock_irqrestore(&droq->lock, flags); @@ -949,6 +959,7 @@ int octeon_create_droq(struct octeon_device *oct, u32 desc_size, void *app_ctx) { struct octeon_droq *droq; + int numa_node = cpu_to_node(q_no % num_online_cpus()); if (oct->droq[q_no]) { dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n", @@ -957,7 +968,9 @@ int octeon_create_droq(struct octeon_device *oct, } /* Allocate the DS for the new droq. */ - droq = vmalloc(sizeof(*droq)); + droq = vmalloc_node(sizeof(*droq), numa_node); + if (!droq) + droq = vmalloc(sizeof(*droq)); if (!droq) goto create_droq_fail; memset(droq, 0, sizeof(struct octeon_droq)); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h index 7940ccee1..5a6fb9113 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h @@ -65,6 +65,17 @@ struct octeon_droq_info { #define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info)) +struct octeon_skb_page_info { + /* DMA address for the page */ + dma_addr_t dma; + + /* Page for the rx dma **/ + struct page *page; + + /** which offset into page */ + unsigned int page_offset; +}; + /** Pointer to data buffer. * Driver keeps a pointer to the data buffer that it made available to * the Octeon device. Since the descriptor ring keeps physical (bus) @@ -77,6 +88,9 @@ struct octeon_recv_buffer { /** Data in the packet buffer. */ u8 *data; + + /** pg_info **/ + struct octeon_skb_page_info pg_info; }; #define OCT_DROQ_RECVBUF_SIZE (sizeof(struct octeon_recv_buffer)) @@ -106,6 +120,13 @@ struct oct_droq_stats { /** Num of Packets dropped due to receive path failures. */ u64 rx_dropped; + + /** Num of vxlan packets received; */ + u64 rx_vxlan; + + /** Num of failures of recv_buffer_alloc() */ + u64 rx_alloc_failure; + }; #define POLL_EVENT_INTR_ARRIVED 1 @@ -213,7 +234,8 @@ struct octeon_droq_ops { * data in the buffer. The receive header gives the port * number to the caller. Function pointer is set by caller. */ - void (*fptr)(u32, void *, u32, union octeon_rh *, void *); + void (*fptr)(u32, void *, u32, union octeon_rh *, void *, void *); + void *farg; /* This function will be called by the driver for all NAPI related * events. The first param is the octeon id. The second param is the @@ -394,24 +416,9 @@ int octeon_register_dispatch_fn(struct octeon_device *oct, u16 subcode, octeon_dispatch_fn_t fn, void *fn_arg); -/** Remove registration for an opcode/subcode. This will delete the mapping for - * an opcode/subcode. The dispatch function will be unregistered and will no - * longer be called if a packet with the opcode/subcode arrives in the driver - * output queues. - * @param oct - the octeon device to unregister from. - * @param opcode - the opcode to be unregistered. - * @param subcode - the subcode to be unregistered. - * - * @return Success: 0; Failure: 1 - */ -int octeon_unregister_dispatch_fn(struct octeon_device *oct, - u16 opcode, - u16 subcode); - void octeon_droq_print_stats(void); -u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct, - struct octeon_droq *droq); +u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq); int octeon_create_droq(struct octeon_device *oct, u32 q_no, u32 num_descs, u32 desc_size, void *app_ctx); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index 592fe49b5..ff4b1d6f0 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -65,6 +65,11 @@ struct oct_iq_stats { u64 tx_iq_busy;/**< Numof times this iq was found to be full. */ u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */ u64 tx_tot_bytes;/**< Total count of bytes sento to network. */ + u64 tx_gso; /* count of tso */ + u64 tx_vxlan; /* tunnel */ + u64 tx_dmamap_fail; + u64 tx_restart; + /*u64 tx_timeout_count;*/ }; #define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats)) @@ -75,18 +80,26 @@ struct oct_iq_stats { * a Octeon device has one such structure to represent it. */ struct octeon_instr_queue { + struct octeon_device *oct_dev; + /** A spinlock to protect access to the input ring. */ spinlock_t lock; + /** A spinlock to protect while posting on the ring. */ + spinlock_t post_lock; + + /** A spinlock to protect access to the input ring.*/ + spinlock_t iq_flush_running_lock; + /** Flag that indicates if the queue uses 64 byte commands. */ u32 iqcmd_64B:1; - /** Queue Number. */ - u32 iq_no:5; + /** Queue info. */ + union oct_txpciq txpciq; u32 rsvd:17; - /* Controls the periodic flushing of iq */ + /* Controls whether extra flushing of IQ is done on Tx */ u32 do_auto_flush:1; u32 status:8; @@ -147,6 +160,13 @@ struct octeon_instr_queue { /** Application context */ void *app_ctx; + + /* network stack queue index */ + int q_index; + + /*os ifidx associated with this queue */ + int ifidx; + }; /*---------------------- INSTRUCTION FORMAT ----------------------------*/ @@ -176,12 +196,12 @@ struct octeon_instr_32B { /** 64-byte instruction format. * Format of instruction for a 64-byte mode input queue. */ -struct octeon_instr_64B { +struct octeon_instr2_64B { /** Pointer where the input data is available. */ u64 dptr; /** Instruction Header. */ - u64 ih; + u64 ih2; /** Input Request Header. */ u64 irh; @@ -198,14 +218,44 @@ struct octeon_instr_64B { u64 rptr; u64 reserved; +}; + +struct octeon_instr3_64B { + /** Pointer where the input data is available. */ + u64 dptr; + + /** Instruction Header. */ + u64 ih3; + + /** Instruction Header. */ + u64 pki_ih3; + + /** Input Request Header. */ + u64 irh; + /** opcode/subcode specific parameters */ + u64 ossp[2]; + + /** Return Data Parameters */ + u64 rdp; + + /** Pointer where the response for a RAW mode packet will be written + * by Octeon. + */ + u64 rptr; + +}; + +union octeon_instr_64B { + struct octeon_instr2_64B cmd2; + struct octeon_instr3_64B cmd3; }; -#define OCT_64B_INSTR_SIZE (sizeof(struct octeon_instr_64B)) +#define OCT_64B_INSTR_SIZE (sizeof(union octeon_instr_64B)) /** The size of each buffer in soft command buffer pool */ -#define SOFT_COMMAND_BUFFER_SIZE 1024 +#define SOFT_COMMAND_BUFFER_SIZE 1536 struct octeon_soft_command { /** Soft command buffer info. */ @@ -214,7 +264,8 @@ struct octeon_soft_command { u32 size; /** Command and return status */ - struct octeon_instr_64B cmd; + union octeon_instr_64B cmd; + #define COMPLETION_WORD_INIT 0xffffffffffffffffULL u64 *status_word; @@ -242,7 +293,7 @@ struct octeon_soft_command { /** Maximum number of buffers to allocate into soft command buffer pool */ -#define MAX_SOFT_COMMAND_BUFFERS 16 +#define MAX_SOFT_COMMAND_BUFFERS 256 /** Head of a soft command buffer pool. */ @@ -268,14 +319,15 @@ void octeon_free_soft_command(struct octeon_device *oct, /** * octeon_init_instr_queue() * @param octeon_dev - pointer to the octeon device structure. - * @param iq_no - queue to be initialized (0 <= q_no <= 3). + * @param txpciq - queue to be initialized (0 <= q_no <= 3). * * Called at driver init time for each input queue. iq_conf has the * configuration parameters for the queue. * * @return Success: 0 Failure: 1 */ -int octeon_init_instr_queue(struct octeon_device *octeon_dev, u32 iq_no, +int octeon_init_instr_queue(struct octeon_device *octeon_dev, + union oct_txpciq txpciq, u32 num_descs); /** @@ -298,7 +350,7 @@ octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype, int lio_process_iq_request_list(struct octeon_device *oct, - struct octeon_instr_queue *iq); + struct octeon_instr_queue *iq, u32 napi_budget); int octeon_send_command(struct octeon_device *oct, u32 iq_no, u32 force_db, void *cmd, void *buf, @@ -313,7 +365,10 @@ void octeon_prepare_soft_command(struct octeon_device *oct, int octeon_send_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc); -int octeon_setup_iq(struct octeon_device *oct, u32 iq_no, - u32 num_descs, void *app_ctx); - +int octeon_setup_iq(struct octeon_device *oct, int ifidx, + int q_index, union oct_txpciq iq_no, u32 num_descs, + void *app_ctx); +int +octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, + u32 pending_thresh, u32 napi_budget); #endif /* __OCTEON_IQ_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h index cbd081981..bc14e4c27 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h @@ -126,22 +126,27 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct, } static inline void * -cnnic_alloc_aligned_dma(struct pci_dev *pci_dev, - u32 size, - u32 *alloc_size, - size_t *orig_ptr, - size_t *dma_addr __attribute__((unused))) +cnnic_numa_alloc_aligned_dma(u32 size, + u32 *alloc_size, + size_t *orig_ptr, + int numa_node) { int retries = 0; void *ptr = NULL; #define OCTEON_MAX_ALLOC_RETRIES 1 do { - ptr = - (void *)__get_free_pages(GFP_KERNEL, - get_order(size)); + struct page *page = NULL; + + page = alloc_pages_node(numa_node, + GFP_KERNEL, + get_order(size)); + if (!page) + page = alloc_pages(GFP_KERNEL, + get_order(size)); + ptr = (void *)page_address(page); if ((unsigned long)ptr & 0x07) { - free_pages((unsigned long)ptr, get_order(size)); + __free_pages(page, get_order(size)); ptr = NULL; /* Increment the size required if the first * attempt failed. @@ -169,7 +174,7 @@ sleep_cond(wait_queue_head_t *wait_queue, int *condition) init_waitqueue_entry(&we, current); add_wait_queue(wait_queue, &we); - while (!(ACCESS_ONCE(*condition))) { + while (!(READ_ONCE(*condition))) { set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) goto out; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c index 5aecef870..95a4bbedf 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c @@ -19,43 +19,29 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include -#include -#include -#include #include -#include #include -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" #include "response_manager.h" #include "octeon_device.h" -#include "octeon_nic.h" -#include "octeon_main.h" -#include "octeon_network.h" -#include "cn66xx_regs.h" -#include "cn66xx_device.h" -#include "cn68xx_regs.h" -#include "cn68xx_device.h" -#include "liquidio_image.h" -#include "octeon_mem_ops.h" #define MEMOPS_IDX MAX_BAR1_MAP_INDEX +#ifdef __BIG_ENDIAN_BITFIELD static inline void -octeon_toggle_bar1_swapmode(struct octeon_device *oct __attribute__((unused)), - u32 idx __attribute__((unused))) +octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx) { -#ifdef __BIG_ENDIAN_BITFIELD u32 mask; mask = oct->fn_list.bar1_idx_read(oct, idx); mask = (mask & 0x2) ? (mask & ~2) : (mask | 2); oct->fn_list.bar1_idx_write(oct, idx, mask); -#endif } +#else +#define octeon_toggle_bar1_swapmode(oct, idx) (oct = oct) +#endif static void octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr, diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index b3abe5818..fb820dc7f 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -30,6 +30,20 @@ #include #include +#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) +#define LIO_MIN_MTU_SIZE 68 + +struct oct_nic_stats_resp { + u64 rh; + struct oct_link_stats stats; + u64 status; +}; + +struct oct_nic_stats_ctrl { + struct completion complete; + struct net_device *netdev; +}; + /** LiquidIO per-interface network private data */ struct lio { /** State of the interface. Rx/Tx happens only in the RUNNING state. */ @@ -48,11 +62,11 @@ struct lio { */ int rxq; - /** Guards the glist */ - spinlock_t lock; + /** Guards each glist */ + spinlock_t *glist_lock; - /** Linked list of gather components */ - struct list_head glist; + /** Array of gather component linked lists */ + struct list_head *glist; /** Pointer to the NIC properties for the Octeon device this network * interface is associated with. @@ -67,6 +81,9 @@ struct lio { /** Link information sent by the core application for this interface. */ struct oct_link_info linfo; + /** counter of link changes */ + u64 link_changes; + /** Size of Tx queue for this octeon device. */ u32 tx_qsize; @@ -82,6 +99,12 @@ struct lio { /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */ u64 dev_capability; + /* Copy of transmit encapsulation capabilities: + * TSO, TSO6, Checksums for this device for Kernel + * 3.10.0 onwards + */ + u64 enc_dev_capability; + /** Copy of beacaon reg in phy */ u32 phy_beacon_val; @@ -101,7 +124,6 @@ struct lio { /* work queue for txq status */ struct cavium_wq txq_status_wq; - }; #define LIO_SIZE (sizeof(struct lio)) @@ -111,8 +133,9 @@ struct lio { * \brief Enable or disable feature * @param netdev pointer to network device * @param cmd Command that just requires acknowledgment + * @param param1 Parameter to command */ -int liquidio_set_feature(struct net_device *netdev, int cmd); +int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1); /** * \brief Link control command completion callback @@ -131,14 +154,30 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr); */ void liquidio_set_ethtool_ops(struct net_device *netdev); -static inline void -*recv_buffer_alloc(struct octeon_device *oct __attribute__((unused)), - u32 q_no __attribute__((unused)), u32 size) -{ #define SKB_ADJ_MASK 0x3F #define SKB_ADJ (SKB_ADJ_MASK + 1) - struct sk_buff *skb = dev_alloc_skb(size + SKB_ADJ); +#define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */ +#define LIO_RXBUFFER_SZ 2048 + +static inline void +*recv_buffer_alloc(struct octeon_device *oct, + struct octeon_skb_page_info *pg_info) +{ + struct page *page; + struct sk_buff *skb; + struct octeon_skb_page_info *skb_pg_info; + + page = alloc_page(GFP_ATOMIC | __GFP_COLD); + if (unlikely(!page)) + return NULL; + + skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ); + if (unlikely(!skb)) { + __free_page(page); + pg_info->page = NULL; + return NULL; + } if ((unsigned long)skb->data & SKB_ADJ_MASK) { u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); @@ -146,10 +185,150 @@ static inline void skb_reserve(skb, r); } + skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + /* Get DMA info */ + pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + + /* Mapping failed!! */ + if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) { + __free_page(page); + dev_kfree_skb_any((struct sk_buff *)skb); + pg_info->page = NULL; + return NULL; + } + + pg_info->page = page; + pg_info->page_offset = 0; + skb_pg_info->page = page; + skb_pg_info->page_offset = 0; + skb_pg_info->dma = pg_info->dma; + return (void *)skb; } +static inline void +*recv_buffer_fast_alloc(u32 size) +{ + struct sk_buff *skb; + struct octeon_skb_page_info *skb_pg_info; + + skb = dev_alloc_skb(size + SKB_ADJ); + if (unlikely(!skb)) + return NULL; + + if ((unsigned long)skb->data & SKB_ADJ_MASK) { + u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); + + skb_reserve(skb, r); + } + + skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + skb_pg_info->page = NULL; + skb_pg_info->page_offset = 0; + skb_pg_info->dma = 0; + + return skb; +} + +static inline int +recv_buffer_recycle(struct octeon_device *oct, void *buf) +{ + struct octeon_skb_page_info *pg_info = buf; + + if (!pg_info->page) { + dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n", + __func__); + return -ENOMEM; + } + + if (unlikely(page_count(pg_info->page) != 1) || + unlikely(page_to_nid(pg_info->page) != numa_node_id())) { + dma_unmap_page(&oct->pci_dev->dev, + pg_info->dma, (PAGE_SIZE << 0), + DMA_FROM_DEVICE); + pg_info->dma = 0; + pg_info->page = NULL; + pg_info->page_offset = 0; + return -ENOMEM; + } + + /* Flip to other half of the buffer */ + if (pg_info->page_offset == 0) + pg_info->page_offset = LIO_RXBUFFER_SZ; + else + pg_info->page_offset = 0; + page_ref_inc(pg_info->page); + + return 0; +} + +static inline void +*recv_buffer_reuse(struct octeon_device *oct, void *buf) +{ + struct octeon_skb_page_info *pg_info = buf, *skb_pg_info; + struct sk_buff *skb; + + skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ); + if (unlikely(!skb)) { + dma_unmap_page(&oct->pci_dev->dev, + pg_info->dma, (PAGE_SIZE << 0), + DMA_FROM_DEVICE); + return NULL; + } + + if ((unsigned long)skb->data & SKB_ADJ_MASK) { + u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); + + skb_reserve(skb, r); + } + + skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + skb_pg_info->page = pg_info->page; + skb_pg_info->page_offset = pg_info->page_offset; + skb_pg_info->dma = pg_info->dma; + + return skb; +} + +static inline void +recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info) +{ + struct sk_buff *skb = (struct sk_buff *)buffer; + + put_page(pg_info->page); + pg_info->dma = 0; + pg_info->page = NULL; + pg_info->page_offset = 0; + + if (skb) + dev_kfree_skb_any(skb); +} + static inline void recv_buffer_free(void *buffer) +{ + struct sk_buff *skb = (struct sk_buff *)buffer; + struct octeon_skb_page_info *pg_info; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + + if (pg_info->page) { + put_page(pg_info->page); + pg_info->dma = 0; + pg_info->page = NULL; + pg_info->page_offset = 0; + } + + dev_kfree_skb_any((struct sk_buff *)buffer); +} + +static inline void +recv_buffer_fast_free(void *buffer) +{ + dev_kfree_skb_any((struct sk_buff *)buffer); +} + +static inline void tx_buffer_free(void *buffer) { dev_kfree_skb_any((struct sk_buff *)buffer); } @@ -159,7 +338,17 @@ static inline void recv_buffer_free(void *buffer) #define lio_dma_free(oct, size, virt_addr, dma_addr) \ dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr) -#define get_rbd(ptr) (((struct sk_buff *)(ptr))->data) +static inline +void *get_rbd(struct sk_buff *skb) +{ + struct octeon_skb_page_info *pg_info; + unsigned char *va; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + va = page_address(pg_info->page) + pg_info->page_offset; + + return va; +} static inline u64 lio_map_ring_info(struct octeon_droq *droq, u32 i) @@ -170,7 +359,7 @@ lio_map_ring_info(struct octeon_droq *droq, u32 i) dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i], OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE); - BUG_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr)); + WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr)); return (u64)dma_addr; } @@ -183,33 +372,44 @@ lio_unmap_ring_info(struct pci_dev *pci_dev, } static inline u64 -lio_map_ring(struct pci_dev *pci_dev, - void *buf, u32 size) +lio_map_ring(void *buf) { dma_addr_t dma_addr; - dma_addr = dma_map_single(&pci_dev->dev, get_rbd(buf), size, - DMA_FROM_DEVICE); + struct sk_buff *skb = (struct sk_buff *)buf; + struct octeon_skb_page_info *pg_info; - BUG_ON(dma_mapping_error(&pci_dev->dev, dma_addr)); + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + if (!pg_info->page) { + pr_err("%s: pg_info->page NULL\n", __func__); + WARN_ON(1); + } + + /* Get DMA info */ + dma_addr = pg_info->dma; + if (!pg_info->dma) { + pr_err("%s: ERROR it should be already available\n", + __func__); + WARN_ON(1); + } + dma_addr += pg_info->page_offset; return (u64)dma_addr; } static inline void lio_unmap_ring(struct pci_dev *pci_dev, - u64 buf_ptr, u32 size) + u64 buf_ptr) + { - dma_unmap_single(&pci_dev->dev, - buf_ptr, size, - DMA_FROM_DEVICE); + dma_unmap_page(&pci_dev->dev, + buf_ptr, (PAGE_SIZE << 0), + DMA_FROM_DEVICE); } -static inline void *octeon_fast_packet_alloc(struct octeon_device *oct, - struct octeon_droq *droq, - u32 q_no, u32 size) +static inline void *octeon_fast_packet_alloc(u32 size) { - return recv_buffer_alloc(oct, q_no, size); + return recv_buffer_fast_alloc(size); } static inline void octeon_fast_packet_next(struct octeon_droq *droq, diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c index 1a0191549..166727be9 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c @@ -19,14 +19,9 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include -#include -#include #include #include -#include #include -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" @@ -34,21 +29,14 @@ #include "octeon_device.h" #include "octeon_nic.h" #include "octeon_main.h" -#include "octeon_network.h" -#include "cn66xx_regs.h" -#include "cn66xx_device.h" -#include "cn68xx_regs.h" -#include "cn68xx_device.h" -#include "liquidio_image.h" -#include "octeon_mem_ops.h" void * octeon_alloc_soft_command_resp(struct octeon_device *oct, - struct octeon_instr_64B *cmd, - size_t rdatasize) + union octeon_instr_64B *cmd, + u32 rdatasize) { struct octeon_soft_command *sc; - struct octeon_instr_ih *ih; + struct octeon_instr_ih2 *ih2; struct octeon_instr_irh *irh; struct octeon_instr_rdp *rdp; @@ -59,24 +47,25 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct, return NULL; /* Copy existing command structure into the soft command */ - memcpy(&sc->cmd, cmd, sizeof(struct octeon_instr_64B)); + memcpy(&sc->cmd, cmd, sizeof(union octeon_instr_64B)); /* Add in the response related fields. Opcode and Param are already * there. */ - ih = (struct octeon_instr_ih *)&sc->cmd.ih; - ih->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */ + ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2; + rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp; + irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; + ih2->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */ - irh = (struct octeon_instr_irh *)&sc->cmd.irh; irh->rflag = 1; /* a response is required */ - irh->len = 4; /* means four 64-bit words immediately follow irh */ - rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; rdp->pcie_port = oct->pcie_port; rdp->rlen = rdatasize; *sc->status_word = COMPLETION_WORD_INIT; + sc->cmd.cmd2.rptr = sc->dmarptr; + sc->wait_time = 1000; sc->timeout = jiffies + sc->wait_time; @@ -119,12 +108,11 @@ static void octnet_link_ctrl_callback(struct octeon_device *oct, static inline struct octeon_soft_command *octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct, - struct octnic_ctrl_pkt *nctrl, - struct octnic_ctrl_params nparams) + struct octnic_ctrl_pkt *nctrl) { struct octeon_soft_command *sc = NULL; u8 *data; - size_t rdatasize; + u32 rdatasize; u32 uddsize = 0, datasize = 0; uddsize = (u32)(nctrl->ncmd.s.more * 8); @@ -143,7 +131,7 @@ static inline struct octeon_soft_command data = (u8 *)sc->virtdptr; - memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE); + memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE); octeon_swap_8B_data((u64 *)data, (OCTNET_CMD_SIZE >> 3)); @@ -152,6 +140,8 @@ static inline struct octeon_soft_command memcpy(data + OCTNET_CMD_SIZE, nctrl->udd, uddsize); } + sc->iq_no = (u32)nctrl->iq_no; + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, 0, 0, 0); @@ -164,26 +154,41 @@ static inline struct octeon_soft_command int octnet_send_nic_ctrl_pkt(struct octeon_device *oct, - struct octnic_ctrl_pkt *nctrl, - struct octnic_ctrl_params nparams) + struct octnic_ctrl_pkt *nctrl) { int retval; struct octeon_soft_command *sc = NULL; - sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl, nparams); + spin_lock_bh(&oct->cmd_resp_wqlock); + /* Allow only rx ctrl command to stop traffic on the chip + * during offline operations + */ + if ((oct->cmd_resp_state == OCT_DRV_OFFLINE) && + (nctrl->ncmd.s.cmd != OCTNET_CMD_RX_CTL)) { + spin_unlock_bh(&oct->cmd_resp_wqlock); + dev_err(&oct->pci_dev->dev, + "%s cmd:%d not processed since driver offline\n", + __func__, nctrl->ncmd.s.cmd); + return -1; + } + + sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl); if (!sc) { dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n", __func__); + spin_unlock_bh(&oct->cmd_resp_wqlock); return -1; } retval = octeon_send_soft_command(oct, sc); - if (retval) { + if (retval == IQ_SEND_FAILED) { octeon_free_soft_command(oct, sc); - dev_err(&oct->pci_dev->dev, "%s soft command send failed status: %x\n", - __func__, retval); + dev_err(&oct->pci_dev->dev, "%s soft command:%d send failed status: %x\n", + __func__, nctrl->ncmd.s.cmd, retval); + spin_unlock_bh(&oct->cmd_resp_wqlock); return -1; } + spin_unlock_bh(&oct->cmd_resp_wqlock); return retval; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h index 0238857c8..b71a2bbe4 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h @@ -52,6 +52,9 @@ struct octnic_ctrl_pkt { /** Additional data that may be needed by some commands. */ u64 udd[MAX_NCTRL_UDD]; + /** Input queue to use to send this command. */ + u64 iq_no; + /** Time to wait for Octeon software to respond to this control command. * If wait_time is 0, OSI assumes no response is expected. */ @@ -82,7 +85,7 @@ struct octnic_data_pkt { u32 datasize; /** Command to be passed to the Octeon device software. */ - struct octeon_instr_64B cmd; + union octeon_instr_64B cmd; /** Input queue to use to send this command. */ u32 q_no; @@ -94,15 +97,14 @@ struct octnic_data_pkt { */ union octnic_cmd_setup { struct { - u32 ifidx:8; - u32 cksum_offset:7; + u32 iq_no:8; u32 gather:1; u32 timestamp:1; - u32 ipv4opts_ipv6exthdr:2; u32 ip_csum:1; + u32 transport_csum:1; u32 tnl_csum:1; + u32 rsvd:19; - u32 rsvd:11; union { u32 datasize; u32 gatherptrs; @@ -113,79 +115,146 @@ union octnic_cmd_setup { }; -struct octnic_ctrl_params { - u32 resp_order; -}; - static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no) { return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending) >= (oct->instr_queue[q_no]->max_count - 2)); } -/** Utility function to prepare a 64B NIC instruction based on a setup command - * @param cmd - pointer to instruction to be filled in. - * @param setup - pointer to the setup structure - * @param q_no - which queue for back pressure - * - * Assumes the cmd instruction is pre-allocated, but no fields are filled in. - */ static inline void -octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd, - union octnic_cmd_setup *setup, u32 tag) +octnet_prepare_pci_cmd_o2(struct octeon_device *oct, + union octeon_instr_64B *cmd, + union octnic_cmd_setup *setup, u32 tag) { - struct octeon_instr_ih *ih; + struct octeon_instr_ih2 *ih2; struct octeon_instr_irh *irh; union octnic_packet_params packet_params; + int port; - memset(cmd, 0, sizeof(struct octeon_instr_64B)); + memset(cmd, 0, sizeof(union octeon_instr_64B)); - ih = (struct octeon_instr_ih *)&cmd->ih; + ih2 = (struct octeon_instr_ih2 *)&cmd->cmd2.ih2; /* assume that rflag is cleared so therefore front data will only have - * irh and ossp[1] and ossp[2] for a total of 24 bytes + * irh and ossp[0], ossp[1] for a total of 32 bytes */ - ih->fsz = 24; + ih2->fsz = 24; + + ih2->tagtype = ORDERED_TAG; + ih2->grp = DEFAULT_POW_GRP; - ih->tagtype = ORDERED_TAG; - ih->grp = DEFAULT_POW_GRP; + port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port; if (tag) - ih->tag = tag; + ih2->tag = tag; else - ih->tag = LIO_DATA(setup->s.ifidx); + ih2->tag = LIO_DATA(port); - ih->raw = 1; - ih->qos = (setup->s.ifidx & 3) + 4; /* map qos based on interface */ + ih2->raw = 1; + ih2->qos = (port & 3) + 4; /* map qos based on interface */ if (!setup->s.gather) { - ih->dlengsz = setup->s.u.datasize; + ih2->dlengsz = setup->s.u.datasize; } else { - ih->gather = 1; - ih->dlengsz = setup->s.u.gatherptrs; + ih2->gather = 1; + ih2->dlengsz = setup->s.u.gatherptrs; } - irh = (struct octeon_instr_irh *)&cmd->irh; + irh = (struct octeon_instr_irh *)&cmd->cmd2.irh; irh->opcode = OPCODE_NIC; irh->subcode = OPCODE_NIC_NW_DATA; packet_params.u32 = 0; - if (setup->s.cksum_offset) { - packet_params.s.csoffset = setup->s.cksum_offset; - packet_params.s.ipv4opts_ipv6exthdr = - setup->s.ipv4opts_ipv6exthdr; + packet_params.s.ip_csum = setup->s.ip_csum; + packet_params.s.transport_csum = setup->s.transport_csum; + packet_params.s.tnl_csum = setup->s.tnl_csum; + packet_params.s.tsflag = setup->s.timestamp; + + irh->ossp = packet_params.u32; +} + +static inline void +octnet_prepare_pci_cmd_o3(struct octeon_device *oct, + union octeon_instr_64B *cmd, + union octnic_cmd_setup *setup, u32 tag) +{ + struct octeon_instr_irh *irh; + struct octeon_instr_ih3 *ih3; + struct octeon_instr_pki_ih3 *pki_ih3; + union octnic_packet_params packet_params; + int port; + + memset(cmd, 0, sizeof(union octeon_instr_64B)); + + ih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3; + pki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3; + + /* assume that rflag is cleared so therefore front data will only have + * irh and ossp[1] and ossp[2] for a total of 24 bytes + */ + ih3->pkind = oct->instr_queue[setup->s.iq_no]->txpciq.s.pkind; + /*PKI IH*/ + ih3->fsz = 24 + 8; + + if (!setup->s.gather) { + ih3->dlengsz = setup->s.u.datasize; + } else { + ih3->gather = 1; + ih3->dlengsz = setup->s.u.gatherptrs; } + pki_ih3->w = 1; + pki_ih3->raw = 1; + pki_ih3->utag = 1; + pki_ih3->utt = 1; + pki_ih3->uqpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg; + + port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port; + + if (tag) + pki_ih3->tag = tag; + else + pki_ih3->tag = LIO_DATA(port); + + pki_ih3->tagtype = ORDERED_TAG; + pki_ih3->qpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.qpg; + pki_ih3->pm = 0x7; /*0x7 - meant for Parse nothing, uninterpreted*/ + pki_ih3->sl = 8; /* sl will be sizeof(pki_ih3)*/ + + irh = (struct octeon_instr_irh *)&cmd->cmd3.irh; + + irh->opcode = OPCODE_NIC; + irh->subcode = OPCODE_NIC_NW_DATA; + + packet_params.u32 = 0; + packet_params.s.ip_csum = setup->s.ip_csum; + packet_params.s.transport_csum = setup->s.transport_csum; packet_params.s.tnl_csum = setup->s.tnl_csum; - packet_params.s.ifidx = setup->s.ifidx; packet_params.s.tsflag = setup->s.timestamp; irh->ossp = packet_params.u32; } +/** Utility function to prepare a 64B NIC instruction based on a setup command + * @param cmd - pointer to instruction to be filled in. + * @param setup - pointer to the setup structure + * @param q_no - which queue for back pressure + * + * Assumes the cmd instruction is pre-allocated, but no fields are filled in. + */ +static inline void +octnet_prepare_pci_cmd(struct octeon_device *oct, union octeon_instr_64B *cmd, + union octnic_cmd_setup *setup, u32 tag) +{ + if (OCTEON_CN6XXX(oct)) + octnet_prepare_pci_cmd_o2(oct, cmd, setup, tag); + else + octnet_prepare_pci_cmd_o3(oct, cmd, setup, tag); +} + /** Allocate and a soft command with space for a response immediately following * the commnad. * @param oct - octeon device pointer @@ -198,8 +267,8 @@ octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd, */ void * octeon_alloc_soft_command_resp(struct octeon_device *oct, - struct octeon_instr_64B *cmd, - size_t rdatasize); + union octeon_instr_64B *cmd, + u32 rdatasize); /** Send a NIC data packet to the device * @param oct - octeon device pointer @@ -214,14 +283,11 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct, /** Send a NIC control packet to the device * @param oct - octeon device pointer * @param nctrl - control structure with command, timout, and callback info - * @param nparams - response control structure - * * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the * queue should be stopped, and IQ_SEND_OK if it sent okay. */ int octnet_send_nic_ctrl_pkt(struct octeon_device *oct, - struct octnic_ctrl_pkt *nctrl, - struct octnic_ctrl_params nparams); + struct octnic_ctrl_pkt *nctrl); #endif diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index a2a24652c..d32492f18 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -19,28 +19,17 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include -#include -#include -#include #include -#include #include #include -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" #include "response_manager.h" #include "octeon_device.h" -#include "octeon_nic.h" #include "octeon_main.h" #include "octeon_network.h" -#include "cn66xx_regs.h" #include "cn66xx_device.h" -#include "cn68xx_regs.h" -#include "cn68xx_device.h" -#include "liquidio_image.h" #define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \ (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count) @@ -51,7 +40,7 @@ struct iq_post_status { }; static void check_db_timeout(struct work_struct *work); -static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no); +static void __check_db_timeout(struct octeon_device *oct, u64 iq_no); static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *); @@ -69,12 +58,16 @@ static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no) /* Return 0 on success, 1 on failure */ int octeon_init_instr_queue(struct octeon_device *oct, - u32 iq_no, u32 num_descs) + union oct_txpciq txpciq, + u32 num_descs) { struct octeon_instr_queue *iq; struct octeon_iq_config *conf = NULL; + u32 iq_no = (u32)txpciq.s.q_no; u32 q_size; struct cavium_wq *db_wq; + int orig_node = dev_to_node(&oct->pci_dev->dev); + int numa_node = cpu_to_node(iq_no % num_online_cpus()); if (OCTEON_CN6XXX(oct)) conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf))); @@ -95,9 +88,15 @@ int octeon_init_instr_queue(struct octeon_device *oct, q_size = (u32)conf->instr_type * num_descs; iq = oct->instr_queue[iq_no]; + iq->oct_dev = oct; + set_dev_node(&oct->pci_dev->dev, numa_node); iq->base_addr = lio_dma_alloc(oct, q_size, (dma_addr_t *)&iq->base_addr_dma); + set_dev_node(&oct->pci_dev->dev, orig_node); + if (!iq->base_addr) + iq->base_addr = lio_dma_alloc(oct, q_size, + (dma_addr_t *)&iq->base_addr_dma); if (!iq->base_addr) { dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n", iq_no); @@ -109,7 +108,11 @@ int octeon_init_instr_queue(struct octeon_device *oct, /* Initialize a list to holds requests that have been posted to Octeon * but has yet to be fetched by octeon */ - iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs); + iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs), + numa_node); + if (!iq->request_list) + iq->request_list = vmalloc(sizeof(*iq->request_list) * + num_descs); if (!iq->request_list) { lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n", @@ -122,7 +125,7 @@ int octeon_init_instr_queue(struct octeon_device *oct, dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n", iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count); - iq->iq_no = iq_no; + iq->txpciq.u64 = txpciq.u64; iq->fill_threshold = (u32)conf->db_min; iq->fill_cnt = 0; iq->host_write_index = 0; @@ -135,8 +138,11 @@ int octeon_init_instr_queue(struct octeon_device *oct, /* Initialize the spinlock for this instruction queue */ spin_lock_init(&iq->lock); + spin_lock_init(&iq->post_lock); - oct->io_qmask.iq |= (1 << iq_no); + spin_lock_init(&iq->iq_flush_running_lock); + + oct->io_qmask.iq |= (1ULL << iq_no); /* Set the 32B/64B mode for each input queue */ oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); @@ -144,7 +150,9 @@ int octeon_init_instr_queue(struct octeon_device *oct, oct->fn_list.setup_iq_regs(oct, iq_no); - oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db"); + oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db", + WQ_MEM_RECLAIM, + 0); if (!oct->check_db_wq[iq_no].wq) { lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n", @@ -168,7 +176,6 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no) struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work); - flush_workqueue(oct->check_db_wq[iq_no].wq); destroy_workqueue(oct->check_db_wq[iq_no].wq); if (OCTEON_CN6XXX(oct)) @@ -188,26 +195,38 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no) /* Return 0 on success, 1 on failure */ int octeon_setup_iq(struct octeon_device *oct, - u32 iq_no, + int ifidx, + int q_index, + union oct_txpciq txpciq, u32 num_descs, void *app_ctx) { + u32 iq_no = (u32)txpciq.s.q_no; + int numa_node = cpu_to_node(iq_no % num_online_cpus()); + if (oct->instr_queue[iq_no]) { dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n", iq_no); + oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64; oct->instr_queue[iq_no]->app_ctx = app_ctx; return 0; } oct->instr_queue[iq_no] = - vmalloc(sizeof(struct octeon_instr_queue)); + vmalloc_node(sizeof(struct octeon_instr_queue), numa_node); + if (!oct->instr_queue[iq_no]) + oct->instr_queue[iq_no] = + vmalloc(sizeof(struct octeon_instr_queue)); if (!oct->instr_queue[iq_no]) return 1; memset(oct->instr_queue[iq_no], 0, sizeof(struct octeon_instr_queue)); + oct->instr_queue[iq_no]->q_index = q_index; oct->instr_queue[iq_no]->app_ctx = app_ctx; - if (octeon_init_instr_queue(oct, iq_no, num_descs)) { + oct->instr_queue[iq_no]->ifidx = ifidx; + + if (octeon_init_instr_queue(oct, txpciq, num_descs)) { vfree(oct->instr_queue[iq_no]); oct->instr_queue[iq_no] = NULL; return 1; @@ -226,8 +245,8 @@ int lio_wait_for_instr_fetch(struct octeon_device *oct) instr_cnt = 0; /*for (i = 0; i < oct->num_iqs; i++) {*/ - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { - if (!(oct->io_qmask.iq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & (1ULL << i))) continue; pending = atomic_read(&oct-> @@ -271,40 +290,8 @@ static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq, memcpy(iqptr, cmd, cmdsize); } -static inline int -__post_command(struct octeon_device *octeon_dev __attribute__((unused)), - struct octeon_instr_queue *iq, - u32 force_db __attribute__((unused)), u8 *cmd) -{ - u32 index = -1; - - /* This ensures that the read index does not wrap around to the same - * position if queue gets full before Octeon could fetch any instr. - */ - if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) - return -1; - - __copy_cmd_into_iq(iq, cmd); - - /* "index" is returned, host_write_index is modified. */ - index = iq->host_write_index; - INCR_INDEX_BY1(iq->host_write_index, iq->max_count); - iq->fill_cnt++; - - /* Flush the command into memory. We need to be sure the data is in - * memory before indicating that the instruction is pending. - */ - wmb(); - - atomic_inc(&iq->instr_pending); - - return index; -} - static inline struct iq_post_status -__post_command2(struct octeon_device *octeon_dev __attribute__((unused)), - struct octeon_instr_queue *iq, - u32 force_db __attribute__((unused)), u8 *cmd) +__post_command2(struct octeon_instr_queue *iq, u8 *cmd) { struct iq_post_status st; @@ -362,17 +349,19 @@ __add_to_request_list(struct octeon_instr_queue *iq, iq->request_list[idx].reqtype = reqtype; } +/* Can only run in process context */ int lio_process_iq_request_list(struct octeon_device *oct, - struct octeon_instr_queue *iq) + struct octeon_instr_queue *iq, u32 napi_budget) { int reqtype; void *buf; u32 old = iq->flush_index; u32 inst_count = 0; - unsigned pkts_compl = 0, bytes_compl = 0; + unsigned int pkts_compl = 0, bytes_compl = 0; struct octeon_soft_command *sc; struct octeon_instr_irh *irh; + unsigned long flags; while (old != iq->octeon_read_index) { reqtype = iq->request_list[old].reqtype; @@ -394,7 +383,7 @@ lio_process_iq_request_list(struct octeon_device *oct, case REQTYPE_SOFT_COMMAND: sc = buf; - irh = (struct octeon_instr_irh *)&sc->cmd.irh; + irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; if (irh->rflag) { /* We're expecting a response from Octeon. * It's up to lio_process_ordered_list() to @@ -402,17 +391,22 @@ lio_process_iq_request_list(struct octeon_device *oct, * command response list because we expect * a response from Octeon. */ - spin_lock_bh(&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock); + spin_lock_irqsave + (&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, + flags); atomic_inc(&oct->response_list [OCTEON_ORDERED_SC_LIST]. pending_req_count); list_add_tail(&sc->node, &oct->response_list [OCTEON_ORDERED_SC_LIST].head); - spin_unlock_bh(&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock); + spin_unlock_irqrestore + (&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, + flags); } else { if (sc->callback) { + /* This callback must not sleep */ sc->callback(oct, OCTEON_REQUEST_DONE, sc->callback_arg); } @@ -430,6 +424,9 @@ lio_process_iq_request_list(struct octeon_device *oct, skip_this: inst_count++; INCR_INDEX_BY1(old, iq->max_count); + + if ((napi_budget) && (inst_count >= napi_budget)) + break; } if (bytes_compl) octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl, @@ -439,38 +436,63 @@ lio_process_iq_request_list(struct octeon_device *oct, return inst_count; } -static inline void -update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq) +/* Can only be called from process context */ +int +octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, + u32 pending_thresh, u32 napi_budget) { u32 inst_processed = 0; + u32 tot_inst_processed = 0; + int tx_done = 1; - /* Calculate how many commands Octeon has read and move the read index - * accordingly. - */ - iq->octeon_read_index = oct->fn_list.update_iq_read_idx(oct, iq); + if (!spin_trylock(&iq->iq_flush_running_lock)) + return tx_done; - /* Move the NORESPONSE requests to the per-device completion list. */ - if (iq->flush_index != iq->octeon_read_index) - inst_processed = lio_process_iq_request_list(oct, iq); + spin_lock_bh(&iq->lock); - if (inst_processed) { - atomic_sub(inst_processed, &iq->instr_pending); - iq->stats.instr_processed += inst_processed; - } -} + iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq); -static void -octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, - u32 pending_thresh) -{ if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) { - spin_lock_bh(&iq->lock); - update_iq_indices(oct, iq); - spin_unlock_bh(&iq->lock); + do { + /* Process any outstanding IQ packets. */ + if (iq->flush_index == iq->octeon_read_index) + break; + + if (napi_budget) + inst_processed = lio_process_iq_request_list + (oct, iq, + napi_budget - tot_inst_processed); + else + inst_processed = + lio_process_iq_request_list(oct, iq, 0); + + if (inst_processed) { + atomic_sub(inst_processed, &iq->instr_pending); + iq->stats.instr_processed += inst_processed; + } + + tot_inst_processed += inst_processed; + inst_processed = 0; + + } while (tot_inst_processed < napi_budget); + + if (napi_budget && (tot_inst_processed >= napi_budget)) + tx_done = 0; } + + iq->last_db_time = jiffies; + + spin_unlock_bh(&iq->lock); + + spin_unlock(&iq->iq_flush_running_lock); + + return tx_done; } -static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no) +/* Process instruction queue after timeout. + * This routine gets called from a workqueue or when removing the module. + */ +static void __check_db_timeout(struct octeon_device *oct, u64 iq_no) { struct octeon_instr_queue *iq; u64 next_time; @@ -481,24 +503,17 @@ static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no) if (!iq) return; + /* return immediately, if no work pending */ + if (!atomic_read(&iq->instr_pending)) + return; /* If jiffies - last_db_time < db_timeout do nothing */ next_time = iq->last_db_time + iq->db_timeout; if (!time_after(jiffies, (unsigned long)next_time)) return; iq->last_db_time = jiffies; - /* Get the lock and prevent tasklets. This routine gets called from - * the poll thread. Instructions can now be posted in tasklet context - */ - spin_lock_bh(&iq->lock); - if (iq->fill_cnt != 0) - ring_doorbell(oct, iq); - - spin_unlock_bh(&iq->lock); - /* Flush the instruction queue */ - if (iq->do_auto_flush) - octeon_flush_iq(oct, iq, 1); + octeon_flush_iq(oct, iq, 1, 0); } /* Called by the Poll thread at regular intervals to check the instruction @@ -508,11 +523,12 @@ static void check_db_timeout(struct work_struct *work) { struct cavium_wk *wk = (struct cavium_wk *)work; struct octeon_device *oct = (struct octeon_device *)wk->ctxptr; - unsigned long iq_no = wk->ctxul; + u64 iq_no = wk->ctxul; struct cavium_wq *db_wq = &oct->check_db_wq[iq_no]; + u32 delay = 10; __check_db_timeout(oct, iq_no); - queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1)); + queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay)); } int @@ -523,9 +539,12 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no, struct iq_post_status st; struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; - spin_lock_bh(&iq->lock); + /* Get the lock and prevent other tasks and tx interrupt handler from + * running. + */ + spin_lock_bh(&iq->post_lock); - st = __post_command2(oct, iq, force_db, cmd); + st = __post_command2(iq, cmd); if (st.status != IQ_SEND_FAILED) { octeon_report_sent_bytes_to_bql(buf, reqtype); @@ -533,16 +552,19 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no, INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize); INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1); - if (iq->fill_cnt >= iq->fill_threshold || force_db) + if (force_db) ring_doorbell(oct, iq); } else { INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1); } - spin_unlock_bh(&iq->lock); + spin_unlock_bh(&iq->post_lock); - if (iq->do_auto_flush) - octeon_flush_iq(oct, iq, 2); + /* This is only done here to expedite packets being flushed + * for cases where there are no IQ completion interrupts. + */ + /*if (iq->do_auto_flush)*/ + /* octeon_flush_iq(oct, iq, 2, 0);*/ return st.status; } @@ -557,82 +579,75 @@ octeon_prepare_soft_command(struct octeon_device *oct, u64 ossp1) { struct octeon_config *oct_cfg; - struct octeon_instr_ih *ih; + struct octeon_instr_ih2 *ih2; struct octeon_instr_irh *irh; struct octeon_instr_rdp *rdp; - BUG_ON(opcode > 15); - BUG_ON(subcode > 127); + WARN_ON(opcode > 15); + WARN_ON(subcode > 127); oct_cfg = octeon_get_conf(oct); - ih = (struct octeon_instr_ih *)&sc->cmd.ih; - ih->tagtype = ATOMIC_TAG; - ih->tag = LIO_CONTROL; - ih->raw = 1; - ih->grp = CFG_GET_CTRL_Q_GRP(oct_cfg); + ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2; + ih2->tagtype = ATOMIC_TAG; + ih2->tag = LIO_CONTROL; + ih2->raw = 1; + ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg); if (sc->datasize) { - ih->dlengsz = sc->datasize; - ih->rs = 1; + ih2->dlengsz = sc->datasize; + ih2->rs = 1; } - irh = (struct octeon_instr_irh *)&sc->cmd.irh; + irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; irh->opcode = opcode; irh->subcode = subcode; /* opcode/subcode specific parameters (ossp) */ irh->ossp = irh_ossp; - sc->cmd.ossp[0] = ossp0; - sc->cmd.ossp[1] = ossp1; + sc->cmd.cmd2.ossp[0] = ossp0; + sc->cmd.cmd2.ossp[1] = ossp1; if (sc->rdatasize) { - rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; + rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp; rdp->pcie_port = oct->pcie_port; rdp->rlen = sc->rdatasize; irh->rflag = 1; - irh->len = 4; - ih->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */ + ih2->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */ } else { irh->rflag = 0; - irh->len = 2; - ih->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */ + ih2->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */ } - - while (!(oct->io_qmask.iq & (1 << sc->iq_no))) - sc->iq_no++; } int octeon_send_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc) { - struct octeon_instr_ih *ih; + struct octeon_instr_ih2 *ih2; struct octeon_instr_irh *irh; - struct octeon_instr_rdp *rdp; + u32 len; - ih = (struct octeon_instr_ih *)&sc->cmd.ih; - if (ih->dlengsz) { - BUG_ON(!sc->dmadptr); - sc->cmd.dptr = sc->dmadptr; + ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2; + if (ih2->dlengsz) { + WARN_ON(!sc->dmadptr); + sc->cmd.cmd2.dptr = sc->dmadptr; } - - irh = (struct octeon_instr_irh *)&sc->cmd.irh; + irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; if (irh->rflag) { - BUG_ON(!sc->dmarptr); - BUG_ON(!sc->status_word); + WARN_ON(!sc->dmarptr); + WARN_ON(!sc->status_word); *sc->status_word = COMPLETION_WORD_INIT; - rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; - - sc->cmd.rptr = sc->dmarptr; + sc->cmd.cmd2.rptr = sc->dmarptr; } + len = (u32)ih2->dlengsz; if (sc->wait_time) sc->timeout = jiffies + sc->wait_time; - return octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, - (u32)ih->dlengsz, REQTYPE_SOFT_COMMAND); + return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, + len, REQTYPE_SOFT_COMMAND)); } int octeon_setup_sc_buffer_pool(struct octeon_device *oct) @@ -667,7 +682,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct) struct list_head *tmp, *tmp2; struct octeon_soft_command *sc; - spin_lock(&oct->sc_buf_pool.lock); + spin_lock_bh(&oct->sc_buf_pool.lock); list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) { list_del(tmp); @@ -679,7 +694,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct) INIT_LIST_HEAD(&oct->sc_buf_pool.head); - spin_unlock(&oct->sc_buf_pool.lock); + spin_unlock_bh(&oct->sc_buf_pool.lock); return 0; } @@ -695,13 +710,13 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc = NULL; struct list_head *tmp; - BUG_ON((offset + datasize + rdatasize + ctxsize) > + WARN_ON((offset + datasize + rdatasize + ctxsize) > SOFT_COMMAND_BUFFER_SIZE); - spin_lock(&oct->sc_buf_pool.lock); + spin_lock_bh(&oct->sc_buf_pool.lock); if (list_empty(&oct->sc_buf_pool.head)) { - spin_unlock(&oct->sc_buf_pool.lock); + spin_unlock_bh(&oct->sc_buf_pool.lock); return NULL; } @@ -712,7 +727,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, atomic_inc(&oct->sc_buf_pool.alloc_buf_count); - spin_unlock(&oct->sc_buf_pool.lock); + spin_unlock_bh(&oct->sc_buf_pool.lock); sc = (struct octeon_soft_command *)tmp; @@ -742,7 +757,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, offset = (offset + datasize + 127) & 0xffffff80; if (rdatasize) { - BUG_ON(rdatasize < 16); + WARN_ON(rdatasize < 16); sc->virtrptr = (u8 *)sc + offset; sc->dmarptr = dma_addr + offset; sc->rdatasize = rdatasize; @@ -755,11 +770,11 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, void octeon_free_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc) { - spin_lock(&oct->sc_buf_pool.lock); + spin_lock_bh(&oct->sc_buf_pool.lock); list_add_tail(&sc->node, &oct->sc_buf_pool.head); atomic_dec(&oct->sc_buf_pool.alloc_buf_count); - spin_unlock(&oct->sc_buf_pool.lock); + spin_unlock_bh(&oct->sc_buf_pool.lock); } diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c index 091f537a9..709049e36 100644 --- a/drivers/net/ethernet/cavium/liquidio/response_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c @@ -19,28 +19,14 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include -#include -#include -#include -#include #include -#include #include -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" #include "response_manager.h" #include "octeon_device.h" -#include "octeon_nic.h" #include "octeon_main.h" -#include "octeon_network.h" -#include "cn66xx_regs.h" -#include "cn66xx_device.h" -#include "cn68xx_regs.h" -#include "cn68xx_device.h" -#include "liquidio_image.h" static void oct_poll_req_completion(struct work_struct *work); @@ -54,8 +40,9 @@ int octeon_setup_response_list(struct octeon_device *oct) spin_lock_init(&oct->response_list[i].lock); atomic_set(&oct->response_list[i].pending_req_count, 0); } + spin_lock_init(&oct->cmd_resp_wqlock); - oct->dma_comp_wq.wq = create_workqueue("dma-comp"); + oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0); if (!oct->dma_comp_wq.wq) { dev_err(&oct->pci_dev->dev, "failed to create wq thread\n"); return -ENOMEM; @@ -64,7 +51,8 @@ int octeon_setup_response_list(struct octeon_device *oct) cwq = &oct->dma_comp_wq; INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion); cwq->wk.ctxptr = oct; - queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100)); + oct->cmd_resp_state = OCT_DRV_ONLINE; + queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50)); return ret; } @@ -72,7 +60,6 @@ int octeon_setup_response_list(struct octeon_device *oct) void octeon_delete_response_list(struct octeon_device *oct) { cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work); - flush_workqueue(oct->dma_comp_wq.wq); destroy_workqueue(oct->dma_comp_wq.wq); } @@ -86,6 +73,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, u32 status; u64 status64; struct octeon_instr_rdp *rdp; + u64 rptr; ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST]; @@ -103,7 +91,8 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, sc = (struct octeon_soft_command *)ordered_sc_list-> head.next; - rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; + rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp; + rptr = sc->cmd.cmd2.rptr; status = OCTEON_REQUEST_PENDING; @@ -111,7 +100,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, * to where rptr is pointing to */ dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev, - sc->cmd.rptr, rdp->rlen, + rptr, rdp->rlen, DMA_FROM_DEVICE); status64 = *sc->status_word; @@ -173,6 +162,5 @@ static void oct_poll_req_completion(struct work_struct *work) struct cavium_wq *cwq = &oct->dma_comp_wq; lio_process_ordered_list(oct, 0); - - queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100)); + queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50)); } diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 388cd799d..4ab404f45 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -146,7 +146,6 @@ struct octeon_mgmt { struct device *dev; struct napi_struct napi; struct tasklet_struct tx_clean_tasklet; - struct phy_device *phydev; struct device_node *phy_np; resource_size_t mix_phys; resource_size_t mix_size; @@ -787,14 +786,12 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, static int octeon_mgmt_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { - struct octeon_mgmt *p = netdev_priv(netdev); - switch (cmd) { case SIOCSHWTSTAMP: return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd); default: - if (p->phydev) - return phy_mii_ioctl(p->phydev, rq, cmd); + if (netdev->phydev) + return phy_mii_ioctl(netdev->phydev, rq, cmd); return -EINVAL; } } @@ -836,16 +833,18 @@ static void octeon_mgmt_enable_link(struct octeon_mgmt *p) static void octeon_mgmt_update_link(struct octeon_mgmt *p) { + struct net_device *ndev = p->netdev; + struct phy_device *phydev = ndev->phydev; union cvmx_agl_gmx_prtx_cfg prtx_cfg; prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); - if (!p->phydev->link) + if (!phydev->link) prtx_cfg.s.duplex = 1; else - prtx_cfg.s.duplex = p->phydev->duplex; + prtx_cfg.s.duplex = phydev->duplex; - switch (p->phydev->speed) { + switch (phydev->speed) { case 10: prtx_cfg.s.speed = 0; prtx_cfg.s.slottime = 0; @@ -871,7 +870,7 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p) prtx_cfg.s.speed_msb = 0; /* Only matters for half-duplex */ prtx_cfg.s.slottime = 1; - prtx_cfg.s.burst = p->phydev->duplex; + prtx_cfg.s.burst = phydev->duplex; } break; case 0: /* No link */ @@ -894,9 +893,9 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p) /* MII (both speeds) and RGMII 1000 speed. */ agl_clk.s.clk_cnt = 1; if (prtx_ctl.s.mode == 0) { /* RGMII mode */ - if (p->phydev->speed == 10) + if (phydev->speed == 10) agl_clk.s.clk_cnt = 50; - else if (p->phydev->speed == 100) + else if (phydev->speed == 100) agl_clk.s.clk_cnt = 5; } cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64); @@ -906,39 +905,40 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p) static void octeon_mgmt_adjust_link(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; unsigned long flags; int link_changed = 0; - if (!p->phydev) + if (!phydev) return; spin_lock_irqsave(&p->lock, flags); - if (!p->phydev->link && p->last_link) + if (!phydev->link && p->last_link) link_changed = -1; - if (p->phydev->link - && (p->last_duplex != p->phydev->duplex - || p->last_link != p->phydev->link - || p->last_speed != p->phydev->speed)) { + if (phydev->link && + (p->last_duplex != phydev->duplex || + p->last_link != phydev->link || + p->last_speed != phydev->speed)) { octeon_mgmt_disable_link(p); link_changed = 1; octeon_mgmt_update_link(p); octeon_mgmt_enable_link(p); } - p->last_link = p->phydev->link; - p->last_speed = p->phydev->speed; - p->last_duplex = p->phydev->duplex; + p->last_link = phydev->link; + p->last_speed = phydev->speed; + p->last_duplex = phydev->duplex; spin_unlock_irqrestore(&p->lock, flags); if (link_changed != 0) { if (link_changed > 0) { pr_info("%s: Link is up - %d/%s\n", netdev->name, - p->phydev->speed, - DUPLEX_FULL == p->phydev->duplex ? + phydev->speed, + phydev->duplex == DUPLEX_FULL ? "Full" : "Half"); } else { pr_info("%s: Link is down\n", netdev->name); @@ -949,6 +949,7 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev) static int octeon_mgmt_init_phy(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); + struct phy_device *phydev = NULL; if (octeon_is_simulation() || p->phy_np == NULL) { /* No PHYs in the simulator. */ @@ -956,11 +957,11 @@ static int octeon_mgmt_init_phy(struct net_device *netdev) return 0; } - p->phydev = of_phy_connect(netdev, p->phy_np, - octeon_mgmt_adjust_link, 0, - PHY_INTERFACE_MODE_MII); + phydev = of_phy_connect(netdev, p->phy_np, + octeon_mgmt_adjust_link, 0, + PHY_INTERFACE_MODE_MII); - if (!p->phydev) + if (!phydev) return -ENODEV; return 0; @@ -1080,9 +1081,9 @@ static int octeon_mgmt_open(struct net_device *netdev) } /* Set the mode of the interface, RGMII/MII. */ - if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) { + if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) { union cvmx_agl_prtx_ctl agl_prtx_ctl; - int rgmii_mode = (p->phydev->supported & + int rgmii_mode = (netdev->phydev->supported & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0; agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); @@ -1205,7 +1206,7 @@ static int octeon_mgmt_open(struct net_device *netdev) /* Configure the port duplex, speed and enables */ octeon_mgmt_disable_link(p); - if (p->phydev) + if (netdev->phydev) octeon_mgmt_update_link(p); octeon_mgmt_enable_link(p); @@ -1214,9 +1215,9 @@ static int octeon_mgmt_open(struct net_device *netdev) /* PHY is not present in simulator. The carrier is enabled * while initializing the phy for simulator, leave it enabled. */ - if (p->phydev) { + if (netdev->phydev) { netif_carrier_off(netdev); - phy_start_aneg(p->phydev); + phy_start_aneg(netdev->phydev); } netif_wake_queue(netdev); @@ -1244,9 +1245,8 @@ static int octeon_mgmt_stop(struct net_device *netdev) napi_disable(&p->napi); netif_stop_queue(netdev); - if (p->phydev) - phy_disconnect(p->phydev); - p->phydev = NULL; + if (netdev->phydev) + phy_disconnect(netdev->phydev); netif_carrier_off(netdev); @@ -1346,50 +1346,23 @@ static void octeon_mgmt_get_drvinfo(struct net_device *netdev, strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); } -static int octeon_mgmt_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - - if (p->phydev) - return phy_ethtool_gset(p->phydev, cmd); - - return -EOPNOTSUPP; -} - -static int octeon_mgmt_set_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (p->phydev) - return phy_ethtool_sset(p->phydev, cmd); - - return -EOPNOTSUPP; -} - static int octeon_mgmt_nway_reset(struct net_device *dev) { - struct octeon_mgmt *p = netdev_priv(dev); - if (!capable(CAP_NET_ADMIN)) return -EPERM; - if (p->phydev) - return phy_start_aneg(p->phydev); + if (dev->phydev) + return phy_start_aneg(dev->phydev); return -EOPNOTSUPP; } static const struct ethtool_ops octeon_mgmt_ethtool_ops = { .get_drvinfo = octeon_mgmt_get_drvinfo, - .get_settings = octeon_mgmt_get_settings, - .set_settings = octeon_mgmt_set_settings, .nway_reset = octeon_mgmt_nway_reset, .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops octeon_mgmt_ops = { @@ -1540,6 +1513,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev) return 0; err: + of_node_put(p->phy_np); free_netdev(netdev); return result; } @@ -1547,8 +1521,10 @@ err: static int octeon_mgmt_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); + struct octeon_mgmt *p = netdev_priv(netdev); unregister_netdev(netdev); + of_node_put(p->phy_np); free_netdev(netdev); return 0; } diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 83025bb47..e29815d9e 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -279,6 +279,7 @@ struct nicvf { u8 sqs_id; bool sqs_mode; bool hw_tso; + bool t88; /* Receive buffer alloc */ u32 rb_page_offset; diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 16ed20357..85cc782b9 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -251,9 +251,14 @@ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size) int lmac; u64 lmac_cfg; - /* Max value that can be set is 60 */ - if (size > 60) - size = 60; + /* There is a issue in HW where-in while sending GSO sized + * pkts as part of TSO, if pkt len falls below this size + * NIC will zero PAD packet and also updates IP total length. + * Hence set this value to lessthan min pkt size of MAC+IP+TCP + * headers, BGX will do the padding to transmit 64 byte pkt. + */ + if (size > 52) + size = 52; for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) { lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index a19e73f11..324034961 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -513,6 +513,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, struct nicvf *nic = netdev_priv(netdev); struct snd_queue *sq; struct sq_hdr_subdesc *hdr; + struct sq_hdr_subdesc *tso_sqe; sq = &nic->qs->sq[cqe_tx->sq_idx]; @@ -527,17 +528,21 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; - /* For TSO offloaded packets only one SQE will have a valid SKB */ if (skb) { + /* Check for dummy descriptor used for HW TSO offload on 88xx */ + if (hdr->dont_send) { + /* Get actual TSO descriptors and free them */ + tso_sqe = + (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); + nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1); + } nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); prefetch(skb); dev_consume_skb_any(skb); sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; } else { - /* In case of HW TSO, HW sends a CQE for each segment of a TSO - * packet instead of a single CQE for the whole TSO packet - * transmitted. Each of this CQE points to the same SQE, so - * avoid freeing same SQE multiple times. + /* In case of SW TSO on 88xx, only last segment will have + * a SKB attached, so just free SQEs here. */ if (!nic->hw_tso) nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); @@ -1502,6 +1507,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct net_device *netdev; struct nicvf *nic; int err, qcount; + u16 sdevid; err = pci_enable_device(pdev); if (err) { @@ -1575,6 +1581,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!pass1_silicon(nic->pdev)) nic->hw_tso = true; + pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid); + if (sdevid == 0xA134) + nic->t88 = true; + /* Check if this VF is in QS only mode */ if (nic->sqs_mode) return 0; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 0ff8e60de..dda3ea3f3 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -938,6 +938,8 @@ static int nicvf_tso_count_subdescs(struct sk_buff *skb) return num_edescs + sh->gso_segs; } +#define POST_CQE_DESC_COUNT 2 + /* Get the number of SQ descriptors needed to xmit this skb */ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) { @@ -948,6 +950,10 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) return subdesc_cnt; } + /* Dummy descriptors to get TSO pkt completion notification */ + if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) + subdesc_cnt += POST_CQE_DESC_COUNT; + if (skb_shinfo(skb)->nr_frags) subdesc_cnt += skb_shinfo(skb)->nr_frags; @@ -965,14 +971,21 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, struct sq_hdr_subdesc *hdr; hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); - sq->skbuff[qentry] = (u64)skb; - memset(hdr, 0, SND_QUEUE_DESC_SIZE); hdr->subdesc_type = SQ_DESC_TYPE_HEADER; - /* Enable notification via CQE after processing SQE */ - hdr->post_cqe = 1; - /* No of subdescriptors following this */ - hdr->subdesc_cnt = subdesc_cnt; + + if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) { + /* post_cqe = 0, to avoid HW posting a CQE for every TSO + * segment transmitted on 88xx. + */ + hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT; + } else { + sq->skbuff[qentry] = (u64)skb; + /* Enable notification via CQE after processing SQE */ + hdr->post_cqe = 1; + /* No of subdescriptors following this */ + hdr->subdesc_cnt = subdesc_cnt; + } hdr->tot_len = len; /* Offload checksum calculation to HW */ @@ -1023,6 +1036,37 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, gather->addr = data; } +/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO + * packet so that a CQE is posted as a notifation for transmission of + * TSO packet. + */ +static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry, + int tso_sqe, struct sk_buff *skb) +{ + struct sq_imm_subdesc *imm; + struct sq_hdr_subdesc *hdr; + + sq->skbuff[qentry] = (u64)skb; + + hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); + memset(hdr, 0, SND_QUEUE_DESC_SIZE); + hdr->subdesc_type = SQ_DESC_TYPE_HEADER; + /* Enable notification via CQE after processing SQE */ + hdr->post_cqe = 1; + /* There is no packet to transmit here */ + hdr->dont_send = 1; + hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1; + hdr->tot_len = 1; + /* Actual TSO header SQE index, needed for cleanup */ + hdr->rsvd2 = tso_sqe; + + qentry = nicvf_get_nxt_sqentry(sq, qentry); + imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry); + memset(imm, 0, SND_QUEUE_DESC_SIZE); + imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE; + imm->len = 1; +} + /* Segment a TSO packet into 'gso_size' segments and append * them to SQ for transfer */ @@ -1096,7 +1140,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) { int i, size; - int subdesc_cnt; + int subdesc_cnt, tso_sqe = 0; int sq_num, qentry; struct queue_set *qs; struct snd_queue *sq; @@ -1131,6 +1175,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) /* Add SQ header subdesc */ nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, skb, skb->len); + tso_sqe = qentry; /* Add SQ gather subdescs */ qentry = nicvf_get_nxt_sqentry(sq, qentry); @@ -1154,6 +1199,11 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) } doorbell: + if (nic->t88 && skb_shinfo(skb)->gso_size) { + qentry = nicvf_get_nxt_sqentry(sq, qentry); + nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb); + } + /* make sure all memory stores are done before ringing doorbell */ smp_wmb(); diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index 4686a85a8..5713e83be 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -96,17 +96,6 @@ config CHELSIO_T4_DCB If unsure, say N. -config CHELSIO_T4_UWIRE - bool "Unified Wire Support for Chelsio T5 cards" - default n - depends on CHELSIO_T4 - ---help--- - Enable unified-wire offload features. - Say Y here if you want to enable unified-wire over Ethernet - in the driver. - - If unsure, say N. - config CHELSIO_T4_FCOE bool "Fibre Channel over Ethernet (FCoE) Support for Chelsio T5 cards" default n @@ -137,4 +126,9 @@ config CHELSIO_T4VF To compile this driver as a module choose M here; the module will be called cxgb4vf. +config CHELSIO_LIB + tristate + ---help--- + Common library for Chelsio drivers. + endif # NET_VENDOR_CHELSIO diff --git a/drivers/net/ethernet/chelsio/Makefile b/drivers/net/ethernet/chelsio/Makefile index 390510b5e..b6a5eec6e 100644 --- a/drivers/net/ethernet/chelsio/Makefile +++ b/drivers/net/ethernet/chelsio/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_CHELSIO_T1) += cxgb/ obj-$(CONFIG_CHELSIO_T3) += cxgb3/ obj-$(CONFIG_CHELSIO_T4) += cxgb4/ obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/ +obj-$(CONFIG_CHELSIO_LIB) += libcxgb/ diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index 85c92821b..ace0ab98d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -7,5 +7,4 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o -cxgb4-$(CONFIG_CHELSIO_T4_UWIRE) += cxgb4_ppm.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index b4fceb924..edd23386b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -418,8 +418,9 @@ struct trace_params { struct link_config { unsigned short supported; /* link capabilities */ unsigned short advertising; /* advertised capabilities */ - unsigned short requested_speed; /* speed user has requested */ - unsigned short speed; /* actual link speed */ + unsigned short lp_advertising; /* peer advertised capabilities */ + unsigned int requested_speed; /* speed user has requested */ + unsigned int speed; /* actual link speed */ unsigned char requested_fc; /* flow control user has requested */ unsigned char fc; /* actual link flow control */ unsigned char autoneg; /* autonegotiating? */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 7a0b92b2f..02f80febe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -480,178 +480,293 @@ static int identify_port(struct net_device *dev, return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val); } -static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps) +/** + * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool + * @port_type: Firmware Port Type + * @mod_type: Firmware Module Type + * + * Translate Firmware Port/Module type to Ethtool Port Type. + */ +static int from_fw_port_mod_type(enum fw_port_type port_type, + enum fw_port_module_type mod_type) { - unsigned int v = 0; - - if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI || - type == FW_PORT_TYPE_BT_XAUI) { - v |= SUPPORTED_TP; - if (caps & FW_PORT_CAP_SPEED_100M) - v |= SUPPORTED_100baseT_Full; - if (caps & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseT_Full; - if (caps & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseT_Full; - } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { - v |= SUPPORTED_Backplane; - if (caps & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseKX_Full; - if (caps & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseKX4_Full; - } else if (type == FW_PORT_TYPE_KR) { - v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; - } else if (type == FW_PORT_TYPE_BP_AP) { - v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | - SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full; - } else if (type == FW_PORT_TYPE_BP4_AP) { - v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | - SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | - SUPPORTED_10000baseKX4_Full; - } else if (type == FW_PORT_TYPE_FIBER_XFI || - type == FW_PORT_TYPE_FIBER_XAUI || - type == FW_PORT_TYPE_SFP || - type == FW_PORT_TYPE_QSFP_10G || - type == FW_PORT_TYPE_QSA) { - v |= SUPPORTED_FIBRE; - if (caps & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseT_Full; - if (caps & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseT_Full; - } else if (type == FW_PORT_TYPE_BP40_BA || - type == FW_PORT_TYPE_QSFP) { - v |= SUPPORTED_40000baseSR4_Full; - v |= SUPPORTED_FIBRE; + if (port_type == FW_PORT_TYPE_BT_SGMII || + port_type == FW_PORT_TYPE_BT_XFI || + port_type == FW_PORT_TYPE_BT_XAUI) { + return PORT_TP; + } else if (port_type == FW_PORT_TYPE_FIBER_XFI || + port_type == FW_PORT_TYPE_FIBER_XAUI) { + return PORT_FIBRE; + } else if (port_type == FW_PORT_TYPE_SFP || + port_type == FW_PORT_TYPE_QSFP_10G || + port_type == FW_PORT_TYPE_QSA || + port_type == FW_PORT_TYPE_QSFP) { + if (mod_type == FW_PORT_MOD_TYPE_LR || + mod_type == FW_PORT_MOD_TYPE_SR || + mod_type == FW_PORT_MOD_TYPE_ER || + mod_type == FW_PORT_MOD_TYPE_LRM) + return PORT_FIBRE; + else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || + mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) + return PORT_DA; + else + return PORT_OTHER; } - if (caps & FW_PORT_CAP_ANEG) - v |= SUPPORTED_Autoneg; - return v; + return PORT_OTHER; } -static unsigned int to_fw_linkcaps(unsigned int caps) +/** + * speed_to_fw_caps - translate Port Speed to Firmware Port Capabilities + * @speed: speed in Kb/s + * + * Translates a specific Port Speed into a Firmware Port Capabilities + * value. + */ +static unsigned int speed_to_fw_caps(int speed) { - unsigned int v = 0; - - if (caps & ADVERTISED_100baseT_Full) - v |= FW_PORT_CAP_SPEED_100M; - if (caps & ADVERTISED_1000baseT_Full) - v |= FW_PORT_CAP_SPEED_1G; - if (caps & ADVERTISED_10000baseT_Full) - v |= FW_PORT_CAP_SPEED_10G; - if (caps & ADVERTISED_40000baseSR4_Full) - v |= FW_PORT_CAP_SPEED_40G; - return v; + if (speed == 100) + return FW_PORT_CAP_SPEED_100M; + if (speed == 1000) + return FW_PORT_CAP_SPEED_1G; + if (speed == 10000) + return FW_PORT_CAP_SPEED_10G; + if (speed == 25000) + return FW_PORT_CAP_SPEED_25G; + if (speed == 40000) + return FW_PORT_CAP_SPEED_40G; + if (speed == 100000) + return FW_PORT_CAP_SPEED_100G; + return 0; } -static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +/** + * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask + * @port_type: Firmware Port Type + * @fw_caps: Firmware Port Capabilities + * @link_mode_mask: ethtool Link Mode Mask + * + * Translate a Firmware Port Capabilities specification to an ethtool + * Link Mode Mask. + */ +static void fw_caps_to_lmm(enum fw_port_type port_type, + unsigned int fw_caps, + unsigned long *link_mode_mask) { - const struct port_info *p = netdev_priv(dev); - - if (p->port_type == FW_PORT_TYPE_BT_SGMII || - p->port_type == FW_PORT_TYPE_BT_XFI || - p->port_type == FW_PORT_TYPE_BT_XAUI) { - cmd->port = PORT_TP; - } else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || - p->port_type == FW_PORT_TYPE_FIBER_XAUI) { - cmd->port = PORT_FIBRE; - } else if (p->port_type == FW_PORT_TYPE_SFP || - p->port_type == FW_PORT_TYPE_QSFP_10G || - p->port_type == FW_PORT_TYPE_QSA || - p->port_type == FW_PORT_TYPE_QSFP) { - if (p->mod_type == FW_PORT_MOD_TYPE_LR || - p->mod_type == FW_PORT_MOD_TYPE_SR || - p->mod_type == FW_PORT_MOD_TYPE_ER || - p->mod_type == FW_PORT_MOD_TYPE_LRM) - cmd->port = PORT_FIBRE; - else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || - p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) - cmd->port = PORT_DA; - else - cmd->port = PORT_OTHER; + #define SET_LMM(__lmm_name) __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name \ + ## _BIT, link_mode_mask) + + #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \ + do { \ + if (fw_caps & FW_PORT_CAP_ ## __fw_name) \ + SET_LMM(__lmm_name); \ + } while (0) + + switch (port_type) { + case FW_PORT_TYPE_BT_SGMII: + case FW_PORT_TYPE_BT_XFI: + case FW_PORT_TYPE_BT_XAUI: + SET_LMM(TP); + FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); + break; + + case FW_PORT_TYPE_KX4: + case FW_PORT_TYPE_KX: + SET_LMM(Backplane); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full); + break; + + case FW_PORT_TYPE_KR: + SET_LMM(Backplane); + SET_LMM(10000baseKR_Full); + break; + + case FW_PORT_TYPE_BP_AP: + SET_LMM(Backplane); + SET_LMM(10000baseR_FEC); + SET_LMM(10000baseKR_Full); + SET_LMM(1000baseKX_Full); + break; + + case FW_PORT_TYPE_BP4_AP: + SET_LMM(Backplane); + SET_LMM(10000baseR_FEC); + SET_LMM(10000baseKR_Full); + SET_LMM(1000baseKX_Full); + SET_LMM(10000baseKX4_Full); + break; + + case FW_PORT_TYPE_FIBER_XFI: + case FW_PORT_TYPE_FIBER_XAUI: + case FW_PORT_TYPE_SFP: + case FW_PORT_TYPE_QSFP_10G: + case FW_PORT_TYPE_QSA: + SET_LMM(FIBRE); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); + break; + + case FW_PORT_TYPE_BP40_BA: + case FW_PORT_TYPE_QSFP: + SET_LMM(FIBRE); + SET_LMM(40000baseSR4_Full); + break; + + case FW_PORT_TYPE_CR_QSFP: + case FW_PORT_TYPE_SFP28: + SET_LMM(FIBRE); + SET_LMM(25000baseCR_Full); + break; + + case FW_PORT_TYPE_KR4_100G: + case FW_PORT_TYPE_CR4_QSFP: + SET_LMM(FIBRE); + SET_LMM(100000baseCR4_Full); + break; + + default: + break; + } + + FW_CAPS_TO_LMM(ANEG, Autoneg); + FW_CAPS_TO_LMM(802_3_PAUSE, Pause); + FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause); + + #undef FW_CAPS_TO_LMM + #undef SET_LMM +} + +/** + * lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware + * capabilities + * + * @link_mode_mask: ethtool Link Mode Mask + * + * Translate ethtool Link Mode Mask into a Firmware Port capabilities + * value. + */ +static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask) +{ + unsigned int fw_caps = 0; + + #define LMM_TO_FW_CAPS(__lmm_name, __fw_name) \ + do { \ + if (test_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \ + link_mode_mask)) \ + fw_caps |= FW_PORT_CAP_ ## __fw_name; \ + } while (0) + + LMM_TO_FW_CAPS(100baseT_Full, SPEED_100M); + LMM_TO_FW_CAPS(1000baseT_Full, SPEED_1G); + LMM_TO_FW_CAPS(10000baseT_Full, SPEED_10G); + LMM_TO_FW_CAPS(40000baseSR4_Full, SPEED_40G); + LMM_TO_FW_CAPS(25000baseCR_Full, SPEED_25G); + LMM_TO_FW_CAPS(100000baseCR4_Full, SPEED_100G); + + #undef LMM_TO_FW_CAPS + + return fw_caps; +} + +static int get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *link_ksettings) +{ + const struct port_info *pi = netdev_priv(dev); + struct ethtool_link_settings *base = &link_ksettings->base; + + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); + ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising); + + base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type); + + if (pi->mdio_addr >= 0) { + base->phy_address = pi->mdio_addr; + base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII + ? ETH_MDIO_SUPPORTS_C22 + : ETH_MDIO_SUPPORTS_C45); } else { - cmd->port = PORT_OTHER; + base->phy_address = 255; + base->mdio_support = 0; } - if (p->mdio_addr >= 0) { - cmd->phy_address = p->mdio_addr; - cmd->transceiver = XCVR_EXTERNAL; - cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? - MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; + fw_caps_to_lmm(pi->port_type, pi->link_cfg.supported, + link_ksettings->link_modes.supported); + fw_caps_to_lmm(pi->port_type, pi->link_cfg.advertising, + link_ksettings->link_modes.advertising); + fw_caps_to_lmm(pi->port_type, pi->link_cfg.lp_advertising, + link_ksettings->link_modes.lp_advertising); + + if (netif_carrier_ok(dev)) { + base->speed = pi->link_cfg.speed; + base->duplex = DUPLEX_FULL; } else { - cmd->phy_address = 0; /* not really, but no better option */ - cmd->transceiver = XCVR_INTERNAL; - cmd->mdio_support = 0; + base->speed = SPEED_UNKNOWN; + base->duplex = DUPLEX_UNKNOWN; } - cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); - cmd->advertising = from_fw_linkcaps(p->port_type, - p->link_cfg.advertising); - ethtool_cmd_speed_set(cmd, - netif_carrier_ok(dev) ? p->link_cfg.speed : 0); - cmd->duplex = DUPLEX_FULL; - cmd->autoneg = p->link_cfg.autoneg; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; - return 0; -} + base->autoneg = pi->link_cfg.autoneg; + if (pi->link_cfg.supported & FW_PORT_CAP_ANEG) + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, Autoneg); + if (pi->link_cfg.autoneg) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, Autoneg); -static unsigned int speed_to_caps(int speed) -{ - if (speed == 100) - return FW_PORT_CAP_SPEED_100M; - if (speed == 1000) - return FW_PORT_CAP_SPEED_1G; - if (speed == 10000) - return FW_PORT_CAP_SPEED_10G; - if (speed == 40000) - return FW_PORT_CAP_SPEED_40G; return 0; } -static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings + *link_ksettings) { - unsigned int cap; - struct port_info *p = netdev_priv(dev); - struct link_config *lc = &p->link_cfg; - u32 speed = ethtool_cmd_speed(cmd); + struct port_info *pi = netdev_priv(dev); + struct link_config *lc = &pi->link_cfg; + const struct ethtool_link_settings *base = &link_ksettings->base; struct link_config old_lc; - int ret; + unsigned int fw_caps; + int ret = 0; - if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ + /* only full-duplex supported */ + if (base->duplex != DUPLEX_FULL) return -EINVAL; if (!(lc->supported & FW_PORT_CAP_ANEG)) { /* PHY offers a single speed. See if that's what's * being requested. */ - if (cmd->autoneg == AUTONEG_DISABLE && - (lc->supported & speed_to_caps(speed))) + if (base->autoneg == AUTONEG_DISABLE && + (lc->supported & speed_to_fw_caps(base->speed))) return 0; return -EINVAL; } old_lc = *lc; - if (cmd->autoneg == AUTONEG_DISABLE) { - cap = speed_to_caps(speed); + if (base->autoneg == AUTONEG_DISABLE) { + fw_caps = speed_to_fw_caps(base->speed); - if (!(lc->supported & cap)) + if (!(lc->supported & fw_caps)) return -EINVAL; - lc->requested_speed = cap; + lc->requested_speed = fw_caps; lc->advertising = 0; } else { - cap = to_fw_linkcaps(cmd->advertising); - if (!(lc->supported & cap)) + fw_caps = + lmm_to_fw_caps(link_ksettings->link_modes.advertising); + + if (!(lc->supported & fw_caps)) return -EINVAL; lc->requested_speed = 0; - lc->advertising = cap | FW_PORT_CAP_ANEG; + lc->advertising = fw_caps | FW_PORT_CAP_ANEG; } - lc->autoneg = cmd->autoneg; + lc->autoneg = base->autoneg; /* If the firmware rejects the Link Configuration request, back out * the changes and report the error. */ - ret = t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, lc); + ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox, pi->tx_chan, lc); if (ret) *lc = old_lc; @@ -1093,8 +1208,8 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, } static const struct ethtool_ops cxgb_ethtool_ops = { - .get_settings = get_settings, - .set_settings = set_settings, + .get_link_ksettings = get_link_ksettings, + .set_link_ksettings = set_link_ksettings, .get_drvinfo = get_drvinfo, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 6eea06a31..6694233e2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -64,6 +64,7 @@ #include #include #include +#include #include "cxgb4.h" #include "t4_regs.h" @@ -204,7 +205,7 @@ static int rx_dma_offset = 2; static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV]; module_param_array(num_vf, uint, NULL, 0644); -MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); +MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3, deprecated parameter - please use the pci sysfs interface."); #endif /* TX Queue select used to determine what algorithm to use for selecting TX @@ -458,11 +459,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; - if (!(dev->flags & IFF_PROMISC)) { - __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); - if (!(dev->flags & IFF_ALLMULTI)) - __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); - } + __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); + __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, @@ -3733,7 +3731,8 @@ static int adap_init0(struct adapter *adap) return ret; /* Contact FW, advertising Master capability */ - ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); + ret = t4_fw_hello(adap, adap->mbox, adap->mbox, + is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state); if (ret < 0) { dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", ret); @@ -4304,10 +4303,17 @@ static const struct pci_error_handlers cxgb4_eeh = { .resume = eeh_resume, }; +/* Return true if the Link Configuration supports "High Speeds" (those greater + * than 1Gb/s). + */ static inline bool is_x_10g_port(const struct link_config *lc) { - return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 || - (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; + unsigned int speeds, high_speeds; + + speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported)); + high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G); + + return high_speeds != 0; } static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, @@ -4334,6 +4340,11 @@ static void cfg_queues(struct adapter *adap) #endif int ciq_size; + /* Reduce memory usage in kdump environment, disable all offload. + */ + if (is_kdump_kernel()) + adap->params.offload = 0; + for_each_port(adap, i) n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); #ifdef CONFIG_CHELSIO_T4_DCB @@ -4750,8 +4761,12 @@ static void print_port_info(const struct net_device *dev) bufp += sprintf(bufp, "1000/"); if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) bufp += sprintf(bufp, "10G/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G) + bufp += sprintf(bufp, "25G/"); if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) bufp += sprintf(bufp, "40G/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G) + bufp += sprintf(bufp, "100G/"); if (bufp != buf) --bufp; sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); @@ -4827,6 +4842,60 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev) return -EINVAL; } +#ifdef CONFIG_PCI_IOV +static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) +{ + int err = 0; + int current_vfs = pci_num_vf(pdev); + u32 pcie_fw; + void __iomem *regs; + + regs = pci_ioremap_bar(pdev, 0); + if (!regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + return -ENOMEM; + } + + pcie_fw = readl(regs + PCIE_FW_A); + iounmap(regs); + /* Check if cxgb4 is the MASTER and fw is initialized */ + if (!(pcie_fw & PCIE_FW_INIT_F) || + !(pcie_fw & PCIE_FW_MASTER_VLD_F) || + PCIE_FW_MASTER_G(pcie_fw) != 4) { + dev_warn(&pdev->dev, + "cxgb4 driver needs to be MASTER to support SRIOV\n"); + return -EOPNOTSUPP; + } + + /* If any of the VF's is already assigned to Guest OS, then + * SRIOV for the same cannot be modified + */ + if (current_vfs && pci_vfs_assigned(pdev)) { + dev_err(&pdev->dev, + "Cannot modify SR-IOV while VFs are assigned\n"); + num_vfs = current_vfs; + return num_vfs; + } + + /* Disable SRIOV when zero is passed. + * One needs to disable SRIOV before modifying it, else + * stack throws the below warning: + * " 'n' VFs already enabled. Disable before enabling 'm' VFs." + */ + if (!num_vfs) { + pci_disable_sriov(pdev); + return num_vfs; + } + + if (num_vfs != current_vfs) { + err = pci_enable_sriov(pdev, num_vfs); + if (err) + return err; + } + return num_vfs; +} +#endif + static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int func, i, err, s_qpp, qpp, num_seg; @@ -5160,11 +5229,16 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) sriov: #ifdef CONFIG_PCI_IOV - if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) + if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) { + dev_warn(&pdev->dev, + "Enabling SR-IOV VFs using the num_vf module " + "parameter is deprecated - please use the pci sysfs " + "interface instead.\n"); if (pci_enable_sriov(pdev, num_vf[func]) == 0) dev_info(&pdev->dev, "instantiated %u virtual functions\n", num_vf[func]); + } #endif return 0; @@ -5257,6 +5331,9 @@ static struct pci_driver cxgb4_driver = { .probe = init_one, .remove = remove_one, .shutdown = remove_one, +#ifdef CONFIG_PCI_IOV + .sriov_configure = cxgb4_iov_configure, +#endif .err_handler = &cxgb4_eeh, }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index bad253beb..ad3552df0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1192,7 +1192,7 @@ out_free: dev_kfree_skb_any(skb); /* Discard the packet if the length is greater than mtu */ max_pkt_len = ETH_HLEN + dev->mtu; - if (skb_vlan_tag_present(skb)) + if (skb_vlan_tagged(skb)) max_pkt_len += VLAN_HLEN; if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) goto out_free; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index a63addb4e..660204bff 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3627,7 +3627,8 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) } #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ - FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ + FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \ + FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \ FW_PORT_CAP_ANEG) /** @@ -7196,8 +7197,12 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) speed = 1000; else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) speed = 10000; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) + speed = 25000; else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) speed = 40000; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) + speed = 100000; lc = &pi->link_cfg; @@ -7219,6 +7224,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) lc->speed = speed; lc->fc = fc; lc->supported = be16_to_cpu(p->u.info.pcap); + lc->lp_advertising = be16_to_cpu(p->u.info.lpacap); t4_os_link_changed(adap, pi->port_id, link_ok); } } @@ -7284,6 +7290,7 @@ static void get_pci_mode(struct adapter *adapter, struct pci_params *p) static void init_link_config(struct link_config *lc, unsigned int caps) { lc->supported = caps; + lc->lp_advertising = 0; lc->requested_speed = 0; lc->speed = 0; lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 4705e2dea..e0ebe1378 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -104,6 +104,8 @@ enum { enum CPL_error { CPL_ERR_NONE = 0, + CPL_ERR_TCAM_PARITY = 1, + CPL_ERR_TCAM_MISS = 2, CPL_ERR_TCAM_FULL = 3, CPL_ERR_BAD_LENGTH = 15, CPL_ERR_BAD_ROUTE = 18, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 392d6644f..30507d444 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -2249,22 +2249,28 @@ struct fw_acl_vlan_cmd { enum fw_port_cap { FW_PORT_CAP_SPEED_100M = 0x0001, FW_PORT_CAP_SPEED_1G = 0x0002, - FW_PORT_CAP_SPEED_2_5G = 0x0004, + FW_PORT_CAP_SPEED_25G = 0x0004, FW_PORT_CAP_SPEED_10G = 0x0008, FW_PORT_CAP_SPEED_40G = 0x0010, FW_PORT_CAP_SPEED_100G = 0x0020, FW_PORT_CAP_FC_RX = 0x0040, FW_PORT_CAP_FC_TX = 0x0080, FW_PORT_CAP_ANEG = 0x0100, - FW_PORT_CAP_MDI_0 = 0x0200, - FW_PORT_CAP_MDI_1 = 0x0400, - FW_PORT_CAP_BEAN = 0x0800, - FW_PORT_CAP_PMA_LPBK = 0x1000, - FW_PORT_CAP_PCS_LPBK = 0x2000, - FW_PORT_CAP_PHYXS_LPBK = 0x4000, - FW_PORT_CAP_FAR_END_LPBK = 0x8000, + FW_PORT_CAP_MDIX = 0x0200, + FW_PORT_CAP_MDIAUTO = 0x0400, + FW_PORT_CAP_FEC = 0x0800, + FW_PORT_CAP_TECHKR = 0x1000, + FW_PORT_CAP_TECHKX4 = 0x2000, + FW_PORT_CAP_802_3_PAUSE = 0x4000, + FW_PORT_CAP_802_3_ASM_DIR = 0x8000, }; +#define FW_PORT_CAP_SPEED_S 0 +#define FW_PORT_CAP_SPEED_M 0x3f +#define FW_PORT_CAP_SPEED_V(x) ((x) << FW_PORT_CAP_SPEED_S) +#define FW_PORT_CAP_SPEED_G(x) \ + (((x) >> FW_PORT_CAP_SPEED_S) & FW_PORT_CAP_SPEED_M) + enum fw_port_mdi { FW_PORT_CAP_MDI_UNCHANGED, FW_PORT_CAP_MDI_AUTO, @@ -2376,7 +2382,8 @@ struct fw_port_cmd { __u8 cbllen; __u8 auxlinfo; __u8 dcbxdis_pkd; - __u8 r8_lo[3]; + __u8 r8_lo; + __be16 lpacap; __be64 r9; } info; struct fw_port_diags { @@ -2555,6 +2562,11 @@ enum fw_port_type { FW_PORT_TYPE_QSA, FW_PORT_TYPE_QSFP, FW_PORT_TYPE_BP40_BA, + FW_PORT_TYPE_KR4_100G, + FW_PORT_TYPE_CR4_QSFP, + FW_PORT_TYPE_CR_QSFP, + FW_PORT_TYPE_CR2_QSFP, + FW_PORT_TYPE_SFP28, FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M }; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 734dd776c..109bc6304 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -353,6 +353,10 @@ struct hash_mac_addr { u8 addr[ETH_ALEN]; }; +struct mbox_list { + struct list_head list; +}; + /* * Per-"adapter" (Virtual Function) information. */ @@ -387,6 +391,10 @@ struct adapter { /* various locks */ spinlock_t stats_lock; + /* lock for mailbox cmd list */ + spinlock_t mbox_lock; + struct mbox_list mlist; + /* support for mailbox command/reply logging */ #define T4VF_OS_LOG_MBOX_CMDS 256 struct mbox_cmd_log *mbox_log; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 04fc6f6d1..e116bb8d1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -937,12 +937,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) { struct port_info *pi = netdev_priv(dev); - if (!(dev->flags & IFF_PROMISC)) { - __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync); - if (!(dev->flags & IFF_ALLMULTI)) - __dev_mc_sync(dev, cxgb4vf_mac_sync, - cxgb4vf_mac_unsync); - } + __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync); + __dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync); return t4vf_set_rxmode(pi->adapter, pi->viid, -1, (dev->flags & IFF_PROMISC) != 0, (dev->flags & IFF_ALLMULTI) != 0, @@ -1205,105 +1201,187 @@ static void cxgb4vf_poll_controller(struct net_device *dev) * state of the port to which we're linked. */ -static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type, - unsigned int caps) -{ - unsigned int v = 0; - - if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI || - type == FW_PORT_TYPE_BT_XAUI) { - v |= SUPPORTED_TP; - if (caps & FW_PORT_CAP_SPEED_100M) - v |= SUPPORTED_100baseT_Full; - if (caps & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseT_Full; - if (caps & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseT_Full; - } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { - v |= SUPPORTED_Backplane; - if (caps & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseKX_Full; - if (caps & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseKX4_Full; - } else if (type == FW_PORT_TYPE_KR) - v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; - else if (type == FW_PORT_TYPE_BP_AP) - v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | - SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full; - else if (type == FW_PORT_TYPE_BP4_AP) - v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | - SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | - SUPPORTED_10000baseKX4_Full; - else if (type == FW_PORT_TYPE_FIBER_XFI || - type == FW_PORT_TYPE_FIBER_XAUI || - type == FW_PORT_TYPE_SFP || - type == FW_PORT_TYPE_QSFP_10G || - type == FW_PORT_TYPE_QSA) { - v |= SUPPORTED_FIBRE; - if (caps & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseT_Full; - if (caps & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseT_Full; - } else if (type == FW_PORT_TYPE_BP40_BA || - type == FW_PORT_TYPE_QSFP) { - v |= SUPPORTED_40000baseSR4_Full; - v |= SUPPORTED_FIBRE; - } - - if (caps & FW_PORT_CAP_ANEG) - v |= SUPPORTED_Autoneg; - return v; -} - -static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - const struct port_info *p = netdev_priv(dev); - - if (p->port_type == FW_PORT_TYPE_BT_SGMII || - p->port_type == FW_PORT_TYPE_BT_XFI || - p->port_type == FW_PORT_TYPE_BT_XAUI) - cmd->port = PORT_TP; - else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || - p->port_type == FW_PORT_TYPE_FIBER_XAUI) - cmd->port = PORT_FIBRE; - else if (p->port_type == FW_PORT_TYPE_SFP || - p->port_type == FW_PORT_TYPE_QSFP_10G || - p->port_type == FW_PORT_TYPE_QSA || - p->port_type == FW_PORT_TYPE_QSFP) { - if (p->mod_type == FW_PORT_MOD_TYPE_LR || - p->mod_type == FW_PORT_MOD_TYPE_SR || - p->mod_type == FW_PORT_MOD_TYPE_ER || - p->mod_type == FW_PORT_MOD_TYPE_LRM) - cmd->port = PORT_FIBRE; - else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || - p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) - cmd->port = PORT_DA; +/** + * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool + * @port_type: Firmware Port Type + * @mod_type: Firmware Module Type + * + * Translate Firmware Port/Module type to Ethtool Port Type. + */ +static int from_fw_port_mod_type(enum fw_port_type port_type, + enum fw_port_module_type mod_type) +{ + if (port_type == FW_PORT_TYPE_BT_SGMII || + port_type == FW_PORT_TYPE_BT_XFI || + port_type == FW_PORT_TYPE_BT_XAUI) { + return PORT_TP; + } else if (port_type == FW_PORT_TYPE_FIBER_XFI || + port_type == FW_PORT_TYPE_FIBER_XAUI) { + return PORT_FIBRE; + } else if (port_type == FW_PORT_TYPE_SFP || + port_type == FW_PORT_TYPE_QSFP_10G || + port_type == FW_PORT_TYPE_QSA || + port_type == FW_PORT_TYPE_QSFP) { + if (mod_type == FW_PORT_MOD_TYPE_LR || + mod_type == FW_PORT_MOD_TYPE_SR || + mod_type == FW_PORT_MOD_TYPE_ER || + mod_type == FW_PORT_MOD_TYPE_LRM) + return PORT_FIBRE; + else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || + mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) + return PORT_DA; else - cmd->port = PORT_OTHER; - } else - cmd->port = PORT_OTHER; + return PORT_OTHER; + } + + return PORT_OTHER; +} + +/** + * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask + * @port_type: Firmware Port Type + * @fw_caps: Firmware Port Capabilities + * @link_mode_mask: ethtool Link Mode Mask + * + * Translate a Firmware Port Capabilities specification to an ethtool + * Link Mode Mask. + */ +static void fw_caps_to_lmm(enum fw_port_type port_type, + unsigned int fw_caps, + unsigned long *link_mode_mask) +{ + #define SET_LMM(__lmm_name) __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name\ + ## _BIT, link_mode_mask) + + #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \ + do { \ + if (fw_caps & FW_PORT_CAP_ ## __fw_name) \ + SET_LMM(__lmm_name); \ + } while (0) + + switch (port_type) { + case FW_PORT_TYPE_BT_SGMII: + case FW_PORT_TYPE_BT_XFI: + case FW_PORT_TYPE_BT_XAUI: + SET_LMM(TP); + FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); + break; + + case FW_PORT_TYPE_KX4: + case FW_PORT_TYPE_KX: + SET_LMM(Backplane); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full); + break; + + case FW_PORT_TYPE_KR: + SET_LMM(Backplane); + SET_LMM(10000baseKR_Full); + break; + + case FW_PORT_TYPE_BP_AP: + SET_LMM(Backplane); + SET_LMM(10000baseR_FEC); + SET_LMM(10000baseKR_Full); + SET_LMM(1000baseKX_Full); + break; + + case FW_PORT_TYPE_BP4_AP: + SET_LMM(Backplane); + SET_LMM(10000baseR_FEC); + SET_LMM(10000baseKR_Full); + SET_LMM(1000baseKX_Full); + SET_LMM(10000baseKX4_Full); + break; + + case FW_PORT_TYPE_FIBER_XFI: + case FW_PORT_TYPE_FIBER_XAUI: + case FW_PORT_TYPE_SFP: + case FW_PORT_TYPE_QSFP_10G: + case FW_PORT_TYPE_QSA: + SET_LMM(FIBRE); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); + break; + + case FW_PORT_TYPE_BP40_BA: + case FW_PORT_TYPE_QSFP: + SET_LMM(FIBRE); + SET_LMM(40000baseSR4_Full); + break; + + case FW_PORT_TYPE_CR_QSFP: + case FW_PORT_TYPE_SFP28: + SET_LMM(FIBRE); + SET_LMM(25000baseCR_Full); + break; + + case FW_PORT_TYPE_KR4_100G: + case FW_PORT_TYPE_CR4_QSFP: + SET_LMM(FIBRE); + SET_LMM(100000baseCR4_Full); + break; + + default: + break; + } + + FW_CAPS_TO_LMM(ANEG, Autoneg); + FW_CAPS_TO_LMM(802_3_PAUSE, Pause); + FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause); + + #undef FW_CAPS_TO_LMM + #undef SET_LMM +} + +static int cxgb4vf_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings + *link_ksettings) +{ + const struct port_info *pi = netdev_priv(dev); + struct ethtool_link_settings *base = &link_ksettings->base; - if (p->mdio_addr >= 0) { - cmd->phy_address = p->mdio_addr; - cmd->transceiver = XCVR_EXTERNAL; - cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? - MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); + ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising); + + base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type); + + if (pi->mdio_addr >= 0) { + base->phy_address = pi->mdio_addr; + base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII + ? ETH_MDIO_SUPPORTS_C22 + : ETH_MDIO_SUPPORTS_C45); } else { - cmd->phy_address = 0; /* not really, but no better option */ - cmd->transceiver = XCVR_INTERNAL; - cmd->mdio_support = 0; - } - - cmd->supported = t4vf_from_fw_linkcaps(p->port_type, - p->link_cfg.supported); - cmd->advertising = t4vf_from_fw_linkcaps(p->port_type, - p->link_cfg.advertising); - ethtool_cmd_speed_set(cmd, - netif_carrier_ok(dev) ? p->link_cfg.speed : 0); - cmd->duplex = DUPLEX_FULL; - cmd->autoneg = p->link_cfg.autoneg; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; + base->phy_address = 255; + base->mdio_support = 0; + } + + fw_caps_to_lmm(pi->port_type, pi->link_cfg.supported, + link_ksettings->link_modes.supported); + fw_caps_to_lmm(pi->port_type, pi->link_cfg.advertising, + link_ksettings->link_modes.advertising); + fw_caps_to_lmm(pi->port_type, pi->link_cfg.lp_advertising, + link_ksettings->link_modes.lp_advertising); + + if (netif_carrier_ok(dev)) { + base->speed = pi->link_cfg.speed; + base->duplex = DUPLEX_FULL; + } else { + base->speed = SPEED_UNKNOWN; + base->duplex = DUPLEX_UNKNOWN; + } + + base->autoneg = pi->link_cfg.autoneg; + if (pi->link_cfg.supported & FW_PORT_CAP_ANEG) + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, Autoneg); + if (pi->link_cfg.autoneg) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, Autoneg); + return 0; } @@ -1679,7 +1757,7 @@ static void cxgb4vf_get_wol(struct net_device *dev, #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) static const struct ethtool_ops cxgb4vf_ethtool_ops = { - .get_settings = cxgb4vf_get_settings, + .get_link_ksettings = cxgb4vf_get_link_ksettings, .get_drvinfo = cxgb4vf_get_drvinfo, .get_msglevel = cxgb4vf_get_msglevel, .set_msglevel = cxgb4vf_set_msglevel, @@ -2778,6 +2856,8 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * Initialize SMP data synchronization resources. */ spin_lock_init(&adapter->stats_lock); + spin_lock_init(&adapter->mbox_lock); + INIT_LIST_HEAD(&adapter->mlist.list); /* * Map our I/O registers in BAR0. diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 1bb57d3fb..c8fd4f8fe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1188,7 +1188,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) /* Discard the packet if the length is greater than mtu */ max_pkt_len = ETH_HLEN + dev->mtu; - if (skb_vlan_tag_present(skb)) + if (skb_vlan_tagged(skb)) max_pkt_len += VLAN_HLEN; if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) goto out_free; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 438374a05..17a2bbcf9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -107,8 +107,9 @@ struct t4vf_port_stats { struct link_config { unsigned int supported; /* link capabilities */ unsigned int advertising; /* advertised capabilities */ - unsigned short requested_speed; /* speed user has requested */ - unsigned short speed; /* actual link speed */ + unsigned short lp_advertising; /* peer advertised capabilities */ + unsigned int requested_speed; /* speed user has requested */ + unsigned int speed; /* actual link speed */ unsigned char requested_fc; /* flow control user has requested */ unsigned char fc; /* actual link flow control */ unsigned char autoneg; /* autonegotiating? */ @@ -270,10 +271,17 @@ static inline bool is_10g_port(const struct link_config *lc) return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; } +/* Return true if the Link Configuration supports "High Speeds" (those greater + * than 1Gb/s). + */ static inline bool is_x_10g_port(const struct link_config *lc) { - return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 || - (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; + unsigned int speeds, high_speeds; + + speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported)); + high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G); + + return high_speeds != 0; } static inline unsigned int core_ticks_per_usec(const struct adapter *adapter) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 955ff7c61..b5622b168 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -139,6 +139,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi)); __be64 cmd_rpl[MBOX_LEN / 8]; + struct mbox_list entry; /* In T6, mailbox size is changed to 128 bytes to avoid * invalidating the entire prefetch buffer. @@ -156,6 +157,51 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) return -EINVAL; + /* Queue ourselves onto the mailbox access list. When our entry is at + * the front of the list, we have rights to access the mailbox. So we + * wait [for a while] till we're at the front [or bail out with an + * EBUSY] ... + */ + spin_lock(&adapter->mbox_lock); + list_add_tail(&entry.list, &adapter->mlist.list); + spin_unlock(&adapter->mbox_lock); + + delay_idx = 0; + ms = delay[0]; + + for (i = 0; ; i += ms) { + /* If we've waited too long, return a busy indication. This + * really ought to be based on our initial position in the + * mailbox access list but this is a start. We very rearely + * contend on access to the mailbox ... + */ + if (i > FW_CMD_MAX_TIMEOUT) { + spin_lock(&adapter->mbox_lock); + list_del(&entry.list); + spin_unlock(&adapter->mbox_lock); + ret = -EBUSY; + t4vf_record_mbox(adapter, cmd, size, access, ret); + return ret; + } + + /* If we're at the head, break out and start the mailbox + * protocol. + */ + if (list_first_entry(&adapter->mlist.list, struct mbox_list, + list) == &entry) + break; + + /* Delay for a bit before checking again ... */ + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + mdelay(ms); + } + } + /* * Loop trying to get ownership of the mailbox. Return an error * if we can't gain ownership. @@ -164,6 +210,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); if (v != MBOX_OWNER_DRV) { + spin_lock(&adapter->mbox_lock); + list_del(&entry.list); + spin_unlock(&adapter->mbox_lock); ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; t4vf_record_mbox(adapter, cmd, size, access, ret); return ret; @@ -248,6 +297,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, if (cmd_op != FW_VI_STATS_CMD) t4vf_record_mbox(adapter, cmd_rpl, size, access, execute); + spin_lock(&adapter->mbox_lock); + list_del(&entry.list); + spin_unlock(&adapter->mbox_lock); return -FW_CMD_RETVAL_G(v); } } @@ -255,12 +307,16 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, /* We timed out. Return the error ... */ ret = -ETIMEDOUT; t4vf_record_mbox(adapter, cmd, size, access, ret); + spin_lock(&adapter->mbox_lock); + list_del(&entry.list); + spin_unlock(&adapter->mbox_lock); return ret; } #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ - FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ - FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) + FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \ + FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \ + FW_PORT_CAP_ANEG) /** * init_link_config - initialize a link's SW state @@ -273,6 +329,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, static void init_link_config(struct link_config *lc, unsigned int caps) { lc->supported = caps; + lc->lp_advertising = 0; lc->requested_speed = 0; lc->speed = 0; lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; @@ -1656,8 +1713,12 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) speed = 1000; else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) speed = 10000; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) + speed = 25000; else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) speed = 40000; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) + speed = 100000; /* * Scan all of our "ports" (Virtual Interfaces) looking for @@ -1688,6 +1749,8 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) lc->fc = fc; lc->supported = be16_to_cpu(port_cmd->u.info.pcap); + lc->lp_advertising = + be16_to_cpu(port_cmd->u.info.lpacap); t4vf_os_link_changed(adapter, pidx, link_ok); } } diff --git a/drivers/net/ethernet/chelsio/libcxgb/Makefile b/drivers/net/ethernet/chelsio/libcxgb/Makefile new file mode 100644 index 000000000..2362230ef --- /dev/null +++ b/drivers/net/ethernet/chelsio/libcxgb/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_CHELSIO_LIB) += libcxgb.o + +libcxgb-y := libcxgb_ppm.o diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c new file mode 100644 index 000000000..0ed161642 --- /dev/null +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c @@ -0,0 +1,498 @@ +/* + * libcxgb_ppm.c: Chelsio common library for T3/T4/T5 iSCSI PagePod Manager + * + * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Written by: Karen Xie (kxie@chelsio.com) + */ + +#define DRV_NAME "libcxgb" +#define DRV_VERSION "1.0.0-ko" +#define pr_fmt(fmt) DRV_NAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "libcxgb_ppm.h" + +/* Direct Data Placement - + * Directly place the iSCSI Data-In or Data-Out PDU's payload into + * pre-posted final destination host-memory buffers based on the + * Initiator Task Tag (ITT) in Data-In or Target Task Tag (TTT) + * in Data-Out PDUs. The host memory address is programmed into + * h/w in the format of pagepod entries. The location of the + * pagepod entry is encoded into ddp tag which is used as the base + * for ITT/TTT. + */ + +/* Direct-Data Placement page size adjustment + */ +int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz) +{ + struct cxgbi_tag_format *tformat = &ppm->tformat; + int i; + + for (i = 0; i < DDP_PGIDX_MAX; i++) { + if (pgsz == 1UL << (DDP_PGSZ_BASE_SHIFT + + tformat->pgsz_order[i])) { + pr_debug("%s: %s ppm, pgsz %lu -> idx %d.\n", + __func__, ppm->ndev->name, pgsz, i); + return i; + } + } + pr_info("ippm: ddp page size %lu not supported.\n", pgsz); + return DDP_PGIDX_MAX; +} + +/* DDP setup & teardown + */ +static int ppm_find_unused_entries(unsigned long *bmap, + unsigned int max_ppods, + unsigned int start, + unsigned int nr, + unsigned int align_mask) +{ + unsigned long i; + + i = bitmap_find_next_zero_area(bmap, max_ppods, start, nr, align_mask); + + if (unlikely(i >= max_ppods) && (start > nr)) + i = bitmap_find_next_zero_area(bmap, max_ppods, 0, start - 1, + align_mask); + if (unlikely(i >= max_ppods)) + return -ENOSPC; + + bitmap_set(bmap, i, nr); + return (int)i; +} + +static void ppm_mark_entries(struct cxgbi_ppm *ppm, int i, int count, + unsigned long caller_data) +{ + struct cxgbi_ppod_data *pdata = ppm->ppod_data + i; + + pdata->caller_data = caller_data; + pdata->npods = count; + + if (pdata->color == ((1 << PPOD_IDX_SHIFT) - 1)) + pdata->color = 0; + else + pdata->color++; +} + +static int ppm_get_cpu_entries(struct cxgbi_ppm *ppm, unsigned int count, + unsigned long caller_data) +{ + struct cxgbi_ppm_pool *pool; + unsigned int cpu; + int i; + + cpu = get_cpu(); + pool = per_cpu_ptr(ppm->pool, cpu); + spin_lock_bh(&pool->lock); + put_cpu(); + + i = ppm_find_unused_entries(pool->bmap, ppm->pool_index_max, + pool->next, count, 0); + if (i < 0) { + pool->next = 0; + spin_unlock_bh(&pool->lock); + return -ENOSPC; + } + + pool->next = i + count; + if (pool->next >= ppm->pool_index_max) + pool->next = 0; + + spin_unlock_bh(&pool->lock); + + pr_debug("%s: cpu %u, idx %d + %d (%d), next %u.\n", + __func__, cpu, i, count, i + cpu * ppm->pool_index_max, + pool->next); + + i += cpu * ppm->pool_index_max; + ppm_mark_entries(ppm, i, count, caller_data); + + return i; +} + +static int ppm_get_entries(struct cxgbi_ppm *ppm, unsigned int count, + unsigned long caller_data) +{ + int i; + + spin_lock_bh(&ppm->map_lock); + i = ppm_find_unused_entries(ppm->ppod_bmap, ppm->bmap_index_max, + ppm->next, count, 0); + if (i < 0) { + ppm->next = 0; + spin_unlock_bh(&ppm->map_lock); + pr_debug("ippm: NO suitable entries %u available.\n", + count); + return -ENOSPC; + } + + ppm->next = i + count; + if (ppm->next >= ppm->bmap_index_max) + ppm->next = 0; + + spin_unlock_bh(&ppm->map_lock); + + pr_debug("%s: idx %d + %d (%d), next %u, caller_data 0x%lx.\n", + __func__, i, count, i + ppm->pool_rsvd, ppm->next, + caller_data); + + i += ppm->pool_rsvd; + ppm_mark_entries(ppm, i, count, caller_data); + + return i; +} + +static void ppm_unmark_entries(struct cxgbi_ppm *ppm, int i, int count) +{ + pr_debug("%s: idx %d + %d.\n", __func__, i, count); + + if (i < ppm->pool_rsvd) { + unsigned int cpu; + struct cxgbi_ppm_pool *pool; + + cpu = i / ppm->pool_index_max; + i %= ppm->pool_index_max; + + pool = per_cpu_ptr(ppm->pool, cpu); + spin_lock_bh(&pool->lock); + bitmap_clear(pool->bmap, i, count); + + if (i < pool->next) + pool->next = i; + spin_unlock_bh(&pool->lock); + + pr_debug("%s: cpu %u, idx %d, next %u.\n", + __func__, cpu, i, pool->next); + } else { + spin_lock_bh(&ppm->map_lock); + + i -= ppm->pool_rsvd; + bitmap_clear(ppm->ppod_bmap, i, count); + + if (i < ppm->next) + ppm->next = i; + spin_unlock_bh(&ppm->map_lock); + + pr_debug("%s: idx %d, next %u.\n", __func__, i, ppm->next); + } +} + +void cxgbi_ppm_ppod_release(struct cxgbi_ppm *ppm, u32 idx) +{ + struct cxgbi_ppod_data *pdata; + + if (idx >= ppm->ppmax) { + pr_warn("ippm: idx too big %u > %u.\n", idx, ppm->ppmax); + return; + } + + pdata = ppm->ppod_data + idx; + if (!pdata->npods) { + pr_warn("ippm: idx %u, npods 0.\n", idx); + return; + } + + pr_debug("release idx %u, npods %u.\n", idx, pdata->npods); + ppm_unmark_entries(ppm, idx, pdata->npods); +} +EXPORT_SYMBOL(cxgbi_ppm_ppod_release); + +int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *ppm, unsigned short nr_pages, + u32 per_tag_pg_idx, u32 *ppod_idx, + u32 *ddp_tag, unsigned long caller_data) +{ + struct cxgbi_ppod_data *pdata; + unsigned int npods; + int idx = -1; + unsigned int hwidx; + u32 tag; + + npods = (nr_pages + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; + if (!npods) { + pr_warn("%s: pages %u -> npods %u, full.\n", + __func__, nr_pages, npods); + return -EINVAL; + } + + /* grab from cpu pool first */ + idx = ppm_get_cpu_entries(ppm, npods, caller_data); + /* try the general pool */ + if (idx < 0) + idx = ppm_get_entries(ppm, npods, caller_data); + if (idx < 0) { + pr_debug("ippm: pages %u, nospc %u, nxt %u, 0x%lx.\n", + nr_pages, npods, ppm->next, caller_data); + return idx; + } + + pdata = ppm->ppod_data + idx; + hwidx = ppm->base_idx + idx; + + tag = cxgbi_ppm_make_ddp_tag(hwidx, pdata->color); + + if (per_tag_pg_idx) + tag |= (per_tag_pg_idx << 30) & 0xC0000000; + + *ppod_idx = idx; + *ddp_tag = tag; + + pr_debug("ippm: sg %u, tag 0x%x(%u,%u), data 0x%lx.\n", + nr_pages, tag, idx, npods, caller_data); + + return npods; +} +EXPORT_SYMBOL(cxgbi_ppm_ppods_reserve); + +void cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm *ppm, u32 tag, + unsigned int tid, unsigned int offset, + unsigned int length, + struct cxgbi_pagepod_hdr *hdr) +{ + /* The ddp tag in pagepod should be with bit 31:30 set to 0. + * The ddp Tag on the wire should be with non-zero 31:30 to the peer + */ + tag &= 0x3FFFFFFF; + + hdr->vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid)); + + hdr->rsvd = 0; + hdr->pgsz_tag_clr = htonl(tag & ppm->tformat.idx_clr_mask); + hdr->max_offset = htonl(length); + hdr->page_offset = htonl(offset); + + pr_debug("ippm: tag 0x%x, tid 0x%x, xfer %u, off %u.\n", + tag, tid, length, offset); +} +EXPORT_SYMBOL(cxgbi_ppm_make_ppod_hdr); + +static void ppm_free(struct cxgbi_ppm *ppm) +{ + vfree(ppm); +} + +static void ppm_destroy(struct kref *kref) +{ + struct cxgbi_ppm *ppm = container_of(kref, + struct cxgbi_ppm, + refcnt); + pr_info("ippm: kref 0, destroy %s ppm 0x%p.\n", + ppm->ndev->name, ppm); + + *ppm->ppm_pp = NULL; + + free_percpu(ppm->pool); + ppm_free(ppm); +} + +int cxgbi_ppm_release(struct cxgbi_ppm *ppm) +{ + if (ppm) { + int rv; + + rv = kref_put(&ppm->refcnt, ppm_destroy); + return rv; + } + return 1; +} +EXPORT_SYMBOL(cxgbi_ppm_release); + +static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total, + unsigned int *pcpu_ppmax) +{ + struct cxgbi_ppm_pool *pools; + unsigned int ppmax = (*total) / num_possible_cpus(); + unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3; + unsigned int bmap; + unsigned int alloc_sz; + unsigned int count = 0; + unsigned int cpu; + + /* make sure per cpu pool fits into PCPU_MIN_UNIT_SIZE */ + if (ppmax > max) + ppmax = max; + + /* pool size must be multiple of unsigned long */ + bmap = BITS_TO_LONGS(ppmax); + ppmax = (bmap * sizeof(unsigned long)) << 3; + + alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; + pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool)); + + if (!pools) + return NULL; + + for_each_possible_cpu(cpu) { + struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu); + + memset(ppool, 0, alloc_sz); + spin_lock_init(&ppool->lock); + count += ppmax; + } + + *total = count; + *pcpu_ppmax = ppmax; + + return pools; +} + +int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev, + struct pci_dev *pdev, void *lldev, + struct cxgbi_tag_format *tformat, + unsigned int ppmax, + unsigned int llimit, + unsigned int start, + unsigned int reserve_factor) +{ + struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp); + struct cxgbi_ppm_pool *pool = NULL; + unsigned int ppmax_pool = 0; + unsigned int pool_index_max = 0; + unsigned int alloc_sz; + unsigned int ppod_bmap_size; + + if (ppm) { + pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n", + ndev->name, ppm_pp, ppm, ppm->ppmax, ppmax); + kref_get(&ppm->refcnt); + return 1; + } + + if (reserve_factor) { + ppmax_pool = ppmax / reserve_factor; + pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max); + + pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n", + ndev->name, ppmax, ppmax_pool, pool_index_max); + } + + ppod_bmap_size = BITS_TO_LONGS(ppmax - ppmax_pool); + alloc_sz = sizeof(struct cxgbi_ppm) + + ppmax * (sizeof(struct cxgbi_ppod_data)) + + ppod_bmap_size * sizeof(unsigned long); + + ppm = vmalloc(alloc_sz); + if (!ppm) + goto release_ppm_pool; + + memset(ppm, 0, alloc_sz); + + ppm->ppod_bmap = (unsigned long *)(&ppm->ppod_data[ppmax]); + + if ((ppod_bmap_size >> 3) > (ppmax - ppmax_pool)) { + unsigned int start = ppmax - ppmax_pool; + unsigned int end = ppod_bmap_size >> 3; + + bitmap_set(ppm->ppod_bmap, ppmax, end - start); + pr_info("%s: %u - %u < %u * 8, mask extra bits %u, %u.\n", + __func__, ppmax, ppmax_pool, ppod_bmap_size, start, + end); + } + + spin_lock_init(&ppm->map_lock); + kref_init(&ppm->refcnt); + + memcpy(&ppm->tformat, tformat, sizeof(struct cxgbi_tag_format)); + + ppm->ppm_pp = ppm_pp; + ppm->ndev = ndev; + ppm->pdev = pdev; + ppm->lldev = lldev; + ppm->ppmax = ppmax; + ppm->next = 0; + ppm->llimit = llimit; + ppm->base_idx = start > llimit ? + (start - llimit + 1) >> PPOD_SIZE_SHIFT : 0; + ppm->bmap_index_max = ppmax - ppmax_pool; + + ppm->pool = pool; + ppm->pool_rsvd = ppmax_pool; + ppm->pool_index_max = pool_index_max; + + /* check one more time */ + if (*ppm_pp) { + ppm_free(ppm); + ppm = (struct cxgbi_ppm *)(*ppm_pp); + + pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n", + ndev->name, ppm_pp, *ppm_pp, ppm->ppmax, ppmax); + + kref_get(&ppm->refcnt); + return 1; + } + *ppm_pp = ppm; + + ppm->tformat.pgsz_idx_dflt = cxgbi_ppm_find_page_index(ppm, PAGE_SIZE); + + pr_info("ippm %s: ppm 0x%p, 0x%p, base %u/%u, pg %lu,%u, rsvd %u,%u.\n", + ndev->name, ppm_pp, ppm, ppm->base_idx, ppm->ppmax, PAGE_SIZE, + ppm->tformat.pgsz_idx_dflt, ppm->pool_rsvd, + ppm->pool_index_max); + + return 0; + +release_ppm_pool: + free_percpu(pool); + return -ENOMEM; +} +EXPORT_SYMBOL(cxgbi_ppm_init); + +unsigned int cxgbi_tagmask_set(unsigned int ppmax) +{ + unsigned int bits = fls(ppmax); + + if (bits > PPOD_IDX_MAX_SIZE) + bits = PPOD_IDX_MAX_SIZE; + + pr_info("ippm: ppmax %u/0x%x -> bits %u, tagmask 0x%x.\n", + ppmax, ppmax, bits, 1 << (bits + PPOD_IDX_SHIFT)); + + return 1 << (bits + PPOD_IDX_SHIFT); +} +EXPORT_SYMBOL(cxgbi_tagmask_set); + +MODULE_AUTHOR("Chelsio Communications"); +MODULE_DESCRIPTION("Chelsio common library"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h new file mode 100644 index 000000000..e995a1a38 --- /dev/null +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h @@ -0,0 +1,334 @@ +/* + * libcxgb_ppm.h: Chelsio common library for T3/T4/T5 iSCSI ddp operation + * + * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Written by: Karen Xie (kxie@chelsio.com) + */ + +#ifndef __LIBCXGB_PPM_H__ +#define __LIBCXGB_PPM_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct cxgbi_pagepod_hdr { + u32 vld_tid; + u32 pgsz_tag_clr; + u32 max_offset; + u32 page_offset; + u64 rsvd; +}; + +#define PPOD_PAGES_MAX 4 +struct cxgbi_pagepod { + struct cxgbi_pagepod_hdr hdr; + u64 addr[PPOD_PAGES_MAX + 1]; +}; + +/* ddp tag format + * for a 32-bit tag: + * bit # + * 31 ..... ..... 0 + * X Y...Y Z...Z, where + * ^ ^^^^^ ^^^^ + * | | |____ when ddp bit = 0: color bits + * | | + * | |____ when ddp bit = 0: idx into the ddp memory region + * | + * |____ ddp bit: 0 - ddp tag, 1 - non-ddp tag + * + * [page selector:2] [sw/free bits] [0] [idx] [color:6] + */ + +#define DDP_PGIDX_MAX 4 +#define DDP_PGSZ_BASE_SHIFT 12 /* base page 4K */ + +struct cxgbi_task_tag_info { + unsigned char flags; +#define CXGBI_PPOD_INFO_FLAG_VALID 0x1 +#define CXGBI_PPOD_INFO_FLAG_MAPPED 0x2 + unsigned char cid; + unsigned short pg_shift; + unsigned int npods; + unsigned int idx; + unsigned int tag; + struct cxgbi_pagepod_hdr hdr; + int nents; + int nr_pages; + struct scatterlist *sgl; +}; + +struct cxgbi_tag_format { + unsigned char pgsz_order[DDP_PGIDX_MAX]; + unsigned char pgsz_idx_dflt; + unsigned char free_bits:4; + unsigned char color_bits:4; + unsigned char idx_bits; + unsigned char rsvd_bits; + unsigned int no_ddp_mask; + unsigned int idx_mask; + unsigned int color_mask; + unsigned int idx_clr_mask; + unsigned int rsvd_mask; +}; + +struct cxgbi_ppod_data { + unsigned char pg_idx:2; + unsigned char color:6; + unsigned char chan_id; + unsigned short npods; + unsigned long caller_data; +}; + +/* per cpu ppm pool */ +struct cxgbi_ppm_pool { + unsigned int base; /* base index */ + unsigned int next; /* next possible free index */ + spinlock_t lock; /* ppm pool lock */ + unsigned long bmap[0]; +} ____cacheline_aligned_in_smp; + +struct cxgbi_ppm { + struct kref refcnt; + struct net_device *ndev; /* net_device, 1st port */ + struct pci_dev *pdev; + void *lldev; + void **ppm_pp; + struct cxgbi_tag_format tformat; + unsigned int ppmax; + unsigned int llimit; + unsigned int base_idx; + + unsigned int pool_rsvd; + unsigned int pool_index_max; + struct cxgbi_ppm_pool __percpu *pool; + /* map lock */ + spinlock_t map_lock; /* ppm map lock */ + unsigned int bmap_index_max; + unsigned int next; + unsigned long *ppod_bmap; + struct cxgbi_ppod_data ppod_data[0]; +}; + +#define DDP_THRESHOLD 512 + +#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */ + +#define IPPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */ +#define PPOD_SIZE_SHIFT 6 + +/* page pods are allocated in groups of this size (must be power of 2) */ +#define PPOD_CLUSTER_SIZE 16U + +#define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ +#define ULPMEM_IDATA_MAX_NPPODS 3 /* (PPOD_SIZE * 3 + ulptx hdr) < 256B */ +#define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ + +#define PPOD_COLOR_SHIFT 0 +#define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT) + +#define PPOD_IDX_SHIFT 6 +#define PPOD_IDX_MAX_SIZE 24 + +#define PPOD_TID_SHIFT 0 +#define PPOD_TID(x) ((x) << PPOD_TID_SHIFT) + +#define PPOD_TAG_SHIFT 6 +#define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT) + +#define PPOD_VALID_SHIFT 24 +#define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT) +#define PPOD_VALID_FLAG PPOD_VALID(1U) + +#define PPOD_PI_EXTRACT_CTL_SHIFT 31 +#define PPOD_PI_EXTRACT_CTL(x) ((x) << PPOD_PI_EXTRACT_CTL_SHIFT) +#define PPOD_PI_EXTRACT_CTL_FLAG V_PPOD_PI_EXTRACT_CTL(1U) + +#define PPOD_PI_TYPE_SHIFT 29 +#define PPOD_PI_TYPE_MASK 0x3 +#define PPOD_PI_TYPE(x) ((x) << PPOD_PI_TYPE_SHIFT) + +#define PPOD_PI_CHECK_CTL_SHIFT 27 +#define PPOD_PI_CHECK_CTL_MASK 0x3 +#define PPOD_PI_CHECK_CTL(x) ((x) << PPOD_PI_CHECK_CTL_SHIFT) + +#define PPOD_PI_REPORT_CTL_SHIFT 25 +#define PPOD_PI_REPORT_CTL_MASK 0x3 +#define PPOD_PI_REPORT_CTL(x) ((x) << PPOD_PI_REPORT_CTL_SHIFT) + +static inline int cxgbi_ppm_is_ddp_tag(struct cxgbi_ppm *ppm, u32 tag) +{ + return !(tag & ppm->tformat.no_ddp_mask); +} + +static inline int cxgbi_ppm_sw_tag_is_usable(struct cxgbi_ppm *ppm, + u32 tag) +{ + /* the sw tag must be using <= 31 bits */ + return !(tag & 0x80000000U); +} + +static inline int cxgbi_ppm_make_non_ddp_tag(struct cxgbi_ppm *ppm, + u32 sw_tag, + u32 *final_tag) +{ + struct cxgbi_tag_format *tformat = &ppm->tformat; + + if (!cxgbi_ppm_sw_tag_is_usable(ppm, sw_tag)) { + pr_info("sw_tag 0x%x NOT usable.\n", sw_tag); + return -EINVAL; + } + + if (!sw_tag) { + *final_tag = tformat->no_ddp_mask; + } else { + unsigned int shift = tformat->idx_bits + tformat->color_bits; + u32 lower = sw_tag & tformat->idx_clr_mask; + u32 upper = (sw_tag >> shift) << (shift + 1); + + *final_tag = upper | tformat->no_ddp_mask | lower; + } + return 0; +} + +static inline u32 cxgbi_ppm_decode_non_ddp_tag(struct cxgbi_ppm *ppm, + u32 tag) +{ + struct cxgbi_tag_format *tformat = &ppm->tformat; + unsigned int shift = tformat->idx_bits + tformat->color_bits; + u32 lower = tag & tformat->idx_clr_mask; + u32 upper = (tag >> tformat->rsvd_bits) << shift; + + return upper | lower; +} + +static inline u32 cxgbi_ppm_ddp_tag_get_idx(struct cxgbi_ppm *ppm, + u32 ddp_tag) +{ + u32 hw_idx = (ddp_tag >> PPOD_IDX_SHIFT) & + ppm->tformat.idx_mask; + + return hw_idx - ppm->base_idx; +} + +static inline u32 cxgbi_ppm_make_ddp_tag(unsigned int hw_idx, + unsigned char color) +{ + return (hw_idx << PPOD_IDX_SHIFT) | ((u32)color); +} + +static inline unsigned long +cxgbi_ppm_get_tag_caller_data(struct cxgbi_ppm *ppm, + u32 ddp_tag) +{ + u32 idx = cxgbi_ppm_ddp_tag_get_idx(ppm, ddp_tag); + + return ppm->ppod_data[idx].caller_data; +} + +/* sw bits are the free bits */ +static inline int cxgbi_ppm_ddp_tag_update_sw_bits(struct cxgbi_ppm *ppm, + u32 val, u32 orig_tag, + u32 *final_tag) +{ + struct cxgbi_tag_format *tformat = &ppm->tformat; + u32 v = val >> tformat->free_bits; + + if (v) { + pr_info("sw_bits 0x%x too large, avail bits %u.\n", + val, tformat->free_bits); + return -EINVAL; + } + if (!cxgbi_ppm_is_ddp_tag(ppm, orig_tag)) + return -EINVAL; + + *final_tag = (val << tformat->rsvd_bits) | + (orig_tag & ppm->tformat.rsvd_mask); + return 0; +} + +static inline void cxgbi_ppm_ppod_clear(struct cxgbi_pagepod *ppod) +{ + ppod->hdr.vld_tid = 0U; +} + +static inline void cxgbi_tagmask_check(unsigned int tagmask, + struct cxgbi_tag_format *tformat) +{ + unsigned int bits = fls(tagmask); + + /* reserve top most 2 bits for page selector */ + tformat->free_bits = 32 - 2 - bits; + tformat->rsvd_bits = bits; + tformat->color_bits = PPOD_IDX_SHIFT; + tformat->idx_bits = bits - 1 - PPOD_IDX_SHIFT; + tformat->no_ddp_mask = 1 << (bits - 1); + tformat->idx_mask = (1 << tformat->idx_bits) - 1; + tformat->color_mask = (1 << PPOD_IDX_SHIFT) - 1; + tformat->idx_clr_mask = (1 << (bits - 1)) - 1; + tformat->rsvd_mask = (1 << bits) - 1; + + pr_info("ippm: tagmask 0x%x, rsvd %u=%u+%u+1, mask 0x%x,0x%x, " + "pg %u,%u,%u,%u.\n", + tagmask, tformat->rsvd_bits, tformat->idx_bits, + tformat->color_bits, tformat->no_ddp_mask, tformat->rsvd_mask, + tformat->pgsz_order[0], tformat->pgsz_order[1], + tformat->pgsz_order[2], tformat->pgsz_order[3]); +} + +int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz); +void cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm *ppm, u32 tag, + unsigned int tid, unsigned int offset, + unsigned int length, + struct cxgbi_pagepod_hdr *hdr); +void cxgbi_ppm_ppod_release(struct cxgbi_ppm *, u32 idx); +int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *, unsigned short nr_pages, + u32 per_tag_pg_idx, u32 *ppod_idx, u32 *ddp_tag, + unsigned long caller_data); +int cxgbi_ppm_init(void **ppm_pp, struct net_device *, struct pci_dev *, + void *lldev, struct cxgbi_tag_format *, + unsigned int ppmax, unsigned int llimit, + unsigned int start, + unsigned int reserve_factor); +int cxgbi_ppm_release(struct cxgbi_ppm *ppm); +void cxgbi_tagmask_check(unsigned int tagmask, struct cxgbi_tag_format *); +unsigned int cxgbi_tagmask_set(unsigned int ppmax); + +#endif /*__LIBCXGB_PPM_H__*/ diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 60383040d..c363b5855 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -53,6 +53,8 @@ #include #include #include +#include +#include #include #include #include @@ -1895,9 +1897,17 @@ static int cs89x0_platform_remove(struct platform_device *pdev) return 0; } +static const struct __maybe_unused of_device_id cs89x0_match[] = { + { .compatible = "cirrus,cs8900", }, + { .compatible = "cirrus,cs8920", }, + { }, +}; +MODULE_DEVICE_TABLE(of, cs89x0_match); + static struct platform_driver cs89x0_driver = { .driver = { - .name = DRV_NAME, + .name = DRV_NAME, + .of_match_table = of_match_ptr(cs89x0_match), }, .remove = cs89x0_platform_remove, }; diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index f44a39c40..fd3980cc1 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -103,25 +103,29 @@ static void enic_intr_coal_set_rx(struct enic *enic, u32 timer) } } -static int enic_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int enic_get_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ecmd) { struct enic *enic = netdev_priv(netdev); + struct ethtool_link_settings *base = &ecmd->base; - ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); - ecmd->port = PORT_FIBRE; - ecmd->transceiver = XCVR_EXTERNAL; + ethtool_link_ksettings_add_link_mode(ecmd, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ecmd, advertising, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE); + base->port = PORT_FIBRE; if (netif_carrier_ok(netdev)) { - ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev)); - ecmd->duplex = DUPLEX_FULL; + base->speed = vnic_dev_port_speed(enic->vdev); + base->duplex = DUPLEX_FULL; } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; + base->speed = SPEED_UNKNOWN; + base->duplex = DUPLEX_UNKNOWN; } - ecmd->autoneg = AUTONEG_DISABLE; + base->autoneg = AUTONEG_DISABLE; return 0; } @@ -500,7 +504,6 @@ static int enic_set_rxfh(struct net_device *netdev, const u32 *indir, } static const struct ethtool_ops enic_ethtool_ops = { - .get_settings = enic_get_settings, .get_drvinfo = enic_get_drvinfo, .get_msglevel = enic_get_msglevel, .set_msglevel = enic_set_msglevel, @@ -516,6 +519,7 @@ static const struct ethtool_ops enic_ethtool_ops = { .get_rxfh_key_size = enic_get_rxfh_key_size, .get_rxfh = enic_get_rxfh, .set_rxfh = enic_set_rxfh, + .get_link_ksettings = enic_get_ksettings, }; void enic_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index f15560a06..48f82ab6c 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1566,7 +1566,7 @@ static int enic_request_intr(struct enic *enic) intr = enic_msix_rq_intr(enic, i); snprintf(enic->msix[intr].devname, sizeof(enic->msix[intr].devname), - "%.11s-rx-%d", netdev->name, i); + "%.11s-rx-%u", netdev->name, i); enic->msix[intr].isr = enic_isr_msix; enic->msix[intr].devid = &enic->napi[i]; } @@ -1577,7 +1577,7 @@ static int enic_request_intr(struct enic *enic) intr = enic_msix_wq_intr(enic, i); snprintf(enic->msix[intr].devname, sizeof(enic->msix[intr].devname), - "%.11s-tx-%d", netdev->name, i); + "%.11s-tx-%u", netdev->name, i); enic->msix[intr].isr = enic_isr_msix; enic->msix[intr].devid = &enic->napi[wq]; } diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 1471e16ba..f45385f5c 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1299,6 +1299,7 @@ static int dm9000_open(struct net_device *dev) { struct board_info *db = netdev_priv(dev); + unsigned int irq_flags = irq_get_trigger_type(dev->irq); if (netif_msg_ifup(db)) dev_dbg(db->dev, "enabling %s\n", dev->name); @@ -1306,9 +1307,11 @@ dm9000_open(struct net_device *dev) /* If there is no IRQ type specified, tell the user that this is a * problem */ - if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE) + if (irq_flags == IRQF_TRIGGER_NONE) dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); + irq_flags |= IRQF_SHARED; + /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ mdelay(1); /* delay needs by DM9000B */ @@ -1316,8 +1319,7 @@ dm9000_open(struct net_device *dev) /* Initialize DM9000 board */ dm9000_init_dm9000(dev); - if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED, - dev->name, dev)) + if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev)) return -EAGAIN; /* Now that we have an interrupt handler hooked up we can unmask * our interrupts diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index cbe84972f..f0e9e2ef6 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -1319,7 +1319,7 @@ de4x5_open(struct net_device *dev) if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED, lp->adapter_name, dev)) { - printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq); + printk("de4x5_open(): Requested IRQ%d is busy - attempting FAST/SHARE...", dev->irq); if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED, lp->adapter_name, dev)) { printk("\n Cannot get IRQ- reconfigure your hardware.\n"); diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index b69a9eacc..c3b64cdd0 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -173,7 +173,7 @@ static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, static void dnet_handle_link_change(struct net_device *dev) { struct dnet *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; + struct phy_device *phydev = dev->phydev; unsigned long flags; u32 mode_reg, ctl_reg; @@ -295,7 +295,6 @@ static int dnet_mii_probe(struct net_device *dev) bp->link = 0; bp->speed = 0; bp->duplex = -1; - bp->phy_dev = phydev; return 0; } @@ -629,16 +628,16 @@ static int dnet_open(struct net_device *dev) struct dnet *bp = netdev_priv(dev); /* if the phy is not yet register, retry later */ - if (!bp->phy_dev) + if (!dev->phydev) return -EAGAIN; napi_enable(&bp->napi); dnet_init_hw(bp); - phy_start_aneg(bp->phy_dev); + phy_start_aneg(dev->phydev); /* schedule a link state check */ - phy_start(bp->phy_dev); + phy_start(dev->phydev); netif_start_queue(dev); @@ -652,8 +651,8 @@ static int dnet_close(struct net_device *dev) netif_stop_queue(dev); napi_disable(&bp->napi); - if (bp->phy_dev) - phy_stop(bp->phy_dev); + if (dev->phydev) + phy_stop(dev->phydev); dnet_reset_hw(bp); netif_carrier_off(dev); @@ -731,32 +730,9 @@ static struct net_device_stats *dnet_get_stats(struct net_device *dev) return nstat; } -static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct dnet *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_gset(phydev, cmd); -} - -static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct dnet *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_sset(phydev, cmd); -} - static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct dnet *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; + struct phy_device *phydev = dev->phydev; if (!netif_running(dev)) return -EINVAL; @@ -776,11 +752,11 @@ static void dnet_get_drvinfo(struct net_device *dev, } static const struct ethtool_ops dnet_ethtool_ops = { - .get_settings = dnet_get_settings, - .set_settings = dnet_set_settings, .get_drvinfo = dnet_get_drvinfo, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops dnet_netdev_ops = { @@ -875,7 +851,7 @@ static int dnet_probe(struct platform_device *pdev) (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ", (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", (bp->capabilities & DNET_HAS_DMA) ? "" : "no "); - phydev = bp->phy_dev; + phydev = dev->phydev; phy_attached_info(phydev); return 0; @@ -899,8 +875,8 @@ static int dnet_remove(struct platform_device *pdev) if (dev) { bp = netdev_priv(dev); - if (bp->phy_dev) - phy_disconnect(bp->phy_dev); + if (dev->phydev) + phy_disconnect(dev->phydev); mdiobus_unregister(bp->mii_bus); mdiobus_free(bp->mii_bus); unregister_netdev(dev); diff --git a/drivers/net/ethernet/dnet.h b/drivers/net/ethernet/dnet.h index 37f5b30fa..d985080bb 100644 --- a/drivers/net/ethernet/dnet.h +++ b/drivers/net/ethernet/dnet.h @@ -216,7 +216,6 @@ struct dnet { /* PHY stuff */ struct mii_bus *mii_bus; - struct phy_device *phy_dev; unsigned int link; unsigned int speed; unsigned int duplex; diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig index 710856326..b4853ec9d 100644 --- a/drivers/net/ethernet/emulex/benet/Kconfig +++ b/drivers/net/ethernet/emulex/benet/Kconfig @@ -13,11 +13,3 @@ config BE2NET_HWMON ---help--- Say Y here if you want to expose thermal sensor data on be2net network adapter. - -config BE2NET_VXLAN - bool "VXLAN offload support on be2net driver" - default y - depends on BE2NET && VXLAN && !(BE2NET=y && VXLAN=m) - ---help--- - Say Y here if you want to enable VXLAN offload support on - be2net driver. diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index fe3763df3..4555e041e 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -97,7 +97,8 @@ * SURF/DPDK */ -#define MAX_RSS_IFACES 15 +#define MAX_PORT_RSS_TABLES 15 +#define MAX_NIC_FUNCS 16 #define MAX_RX_QS 32 #define MAX_EVT_QS 32 #define MAX_TX_QS 32 @@ -442,8 +443,20 @@ struct be_resources { u16 max_iface_count; u16 max_mcc_count; u16 max_evt_qs; + u16 max_nic_evt_qs; /* NIC's share of evt qs */ u32 if_cap_flags; u32 vf_if_cap_flags; /* VF if capability flags */ + u32 flags; + /* Calculated PF Pool's share of RSS Tables. This is not enforced by + * the FW, but is a self-imposed driver limitation. + */ + u16 max_rss_tables; +}; + +/* These are port-wide values */ +struct be_port_resources { + u16 max_vfs; + u16 nic_pfs; }; #define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC) @@ -513,7 +526,8 @@ struct be_adapter { spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ spinlock_t mcc_cq_lock; - u16 cfg_num_qs; /* configured via set-channels */ + u16 cfg_num_rx_irqs; /* configured via set-channels */ + u16 cfg_num_tx_irqs; /* configured via set-channels */ u16 num_evt_qs; u16 num_msix_vec; struct be_eq_obj eq_obj[MAX_EVT_QS]; @@ -632,16 +646,42 @@ struct be_adapter { #define be_max_txqs(adapter) (adapter->res.max_tx_qs) #define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs) #define be_max_rxqs(adapter) (adapter->res.max_rx_qs) -#define be_max_eqs(adapter) (adapter->res.max_evt_qs) +/* Max number of EQs available for the function (NIC + RoCE (if enabled)) */ +#define be_max_func_eqs(adapter) (adapter->res.max_evt_qs) +/* Max number of EQs available avaialble only for NIC */ +#define be_max_nic_eqs(adapter) (adapter->res.max_nic_evt_qs) #define be_if_cap_flags(adapter) (adapter->res.if_cap_flags) - -static inline u16 be_max_qs(struct be_adapter *adapter) +#define be_max_pf_pool_rss_tables(adapter) \ + (adapter->pool_res.max_rss_tables) +/* Max irqs avaialble for NIC */ +#define be_max_irqs(adapter) \ + (min_t(u16, be_max_nic_eqs(adapter), num_online_cpus())) + +/* Max irqs *needed* for RX queues */ +static inline u16 be_max_rx_irqs(struct be_adapter *adapter) { - /* If no RSS, need atleast the one def RXQ */ + /* If no RSS, need atleast one irq for def-RXQ */ u16 num = max_t(u16, be_max_rss(adapter), 1); - num = min(num, be_max_eqs(adapter)); - return min_t(u16, num, num_online_cpus()); + return min_t(u16, num, be_max_irqs(adapter)); +} + +/* Max irqs *needed* for TX queues */ +static inline u16 be_max_tx_irqs(struct be_adapter *adapter) +{ + return min_t(u16, be_max_txqs(adapter), be_max_irqs(adapter)); +} + +/* Max irqs *needed* for combined queues */ +static inline u16 be_max_qp_irqs(struct be_adapter *adapter) +{ + return min(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter)); +} + +/* Max irqs *needed* for RX and TX queues together */ +static inline u16 be_max_any_irqs(struct be_adapter *adapter) +{ + return max(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter)); } /* Is BE in pvid_tagging mode */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 22402db27..2cc117568 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -87,6 +87,11 @@ static struct be_cmd_priv_map cmd_priv_map[] = { CMD_SUBSYSTEM_LOWLEVEL, BE_PRIV_DEVCFG | BE_PRIV_DEVSEC }, + { + OPCODE_COMMON_SET_HSW_CONFIG, + CMD_SUBSYSTEM_COMMON, + BE_PRIV_DEVCFG | BE_PRIV_VHADM + }, }; static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem) @@ -3850,6 +3855,10 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, void *ctxt; int status; + if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_HSW_CONFIG, + CMD_SUBSYSTEM_COMMON)) + return -EPERM; + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); @@ -3871,7 +3880,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); } - if (!BEx_chip(adapter) && hsw_mode) { + if (hsw_mode) { AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, adapter->hba_port_num); AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1); @@ -4023,7 +4032,10 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va; adapter->wol_cap = resp->wol_settings; - if (adapter->wol_cap & BE_WOL_CAP) + + /* Non-zero macaddr indicates WOL is enabled */ + if (adapter->wol_cap & BE_WOL_CAP && + !is_zero_ether_addr(resp->magic_mac)) adapter->wol_en = true; } err: @@ -4360,9 +4372,35 @@ err: return status; } +/* This routine returns a list of all the NIC PF_nums in the adapter */ +u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums) +{ + struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; + struct be_pcie_res_desc *pcie = NULL; + int i; + u16 nic_pf_count = 0; + + for (i = 0; i < desc_count; i++) { + if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 || + hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) { + pcie = (struct be_pcie_res_desc *)hdr; + if (pcie->pf_state && (pcie->pf_type == MISSION_NIC || + pcie->pf_type == MISSION_RDMA)) { + nic_pf_nums[nic_pf_count++] = pcie->pf_num; + } + } + + hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; + hdr = (void *)hdr + hdr->desc_len; + } + return nic_pf_count; +} + /* Will use MBOX only if MCCQ has not been created */ int be_cmd_get_profile_config(struct be_adapter *adapter, - struct be_resources *res, u8 query, u8 domain) + struct be_resources *res, + struct be_port_resources *port_res, + u8 profile_type, u8 query, u8 domain) { struct be_cmd_resp_get_profile_config *resp; struct be_cmd_req_get_profile_config *req; @@ -4389,7 +4427,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, if (!lancer_chip(adapter)) req->hdr.version = 1; - req->type = ACTIVE_PROFILE_TYPE; + req->type = profile_type; req->hdr.domain = domain; /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the @@ -4406,6 +4444,28 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, resp = cmd.va; desc_count = le16_to_cpu(resp->desc_count); + if (port_res) { + u16 nic_pf_cnt = 0, i; + u16 nic_pf_num_list[MAX_NIC_FUNCS]; + + nic_pf_cnt = be_get_nic_pf_num_list(resp->func_param, + desc_count, + nic_pf_num_list); + + for (i = 0; i < nic_pf_cnt; i++) { + nic = be_get_func_nic_desc(resp->func_param, desc_count, + nic_pf_num_list[i]); + if (nic->link_param == adapter->port_num) { + port_res->nic_pfs++; + pcie = be_get_pcie_desc(resp->func_param, + desc_count, + nic_pf_num_list[i]); + port_res->max_vfs += le16_to_cpu(pcie->num_vfs); + } + } + return status; + } + pcie = be_get_pcie_desc(resp->func_param, desc_count, adapter->pf_num); if (pcie) @@ -4465,7 +4525,7 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, } /* Mark all fields invalid */ -static void be_reset_nic_desc(struct be_nic_res_desc *nic) +void be_reset_nic_desc(struct be_nic_res_desc *nic) { memset(nic, 0, sizeof(*nic)); nic->unicast_mac_count = 0xFFFF; @@ -4534,73 +4594,9 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed, 1, version, domain); } -static void be_fill_vf_res_template(struct be_adapter *adapter, - struct be_resources pool_res, - u16 num_vfs, u16 num_vf_qs, - struct be_nic_res_desc *nic_vft) -{ - u32 vf_if_cap_flags = pool_res.vf_if_cap_flags; - struct be_resources res_mod = {0}; - - /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd, - * which are modifiable using SET_PROFILE_CONFIG cmd. - */ - be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0); - - /* If RSS IFACE capability flags are modifiable for a VF, set the - * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if - * more than 1 RSSQ is available for a VF. - * Otherwise, provision only 1 queue pair for VF. - */ - if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) { - nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); - if (num_vf_qs > 1) { - vf_if_cap_flags |= BE_IF_FLAGS_RSS; - if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS) - vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS; - } else { - vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS | - BE_IF_FLAGS_DEFQ_RSS); - } - } else { - num_vf_qs = 1; - } - - if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) { - nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); - vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS; - } - - nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags); - nic_vft->rq_count = cpu_to_le16(num_vf_qs); - nic_vft->txq_count = cpu_to_le16(num_vf_qs); - nic_vft->rssq_count = cpu_to_le16(num_vf_qs); - nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count / - (num_vfs + 1)); - - /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally - * among the PF and it's VFs, if the fields are changeable - */ - if (res_mod.max_uc_mac == FIELD_MODIFIABLE) - nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac / - (num_vfs + 1)); - - if (res_mod.max_vlans == FIELD_MODIFIABLE) - nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans / - (num_vfs + 1)); - - if (res_mod.max_iface_count == FIELD_MODIFIABLE) - nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count / - (num_vfs + 1)); - - if (res_mod.max_mcc_count == FIELD_MODIFIABLE) - nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count / - (num_vfs + 1)); -} - int be_cmd_set_sriov_config(struct be_adapter *adapter, struct be_resources pool_res, u16 num_vfs, - u16 num_vf_qs) + struct be_resources *vft_res) { struct { struct be_pcie_res_desc pcie; @@ -4620,12 +4616,26 @@ int be_cmd_set_sriov_config(struct be_adapter *adapter, be_reset_nic_desc(&desc.nic_vft); desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1; - desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); + desc.nic_vft.flags = vft_res->flags | BIT(VFT_SHIFT) | + BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); desc.nic_vft.pf_num = adapter->pdev->devfn; desc.nic_vft.vf_num = 0; - - be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs, - &desc.nic_vft); + desc.nic_vft.cap_flags = cpu_to_le32(vft_res->vf_if_cap_flags); + desc.nic_vft.rq_count = cpu_to_le16(vft_res->max_rx_qs); + desc.nic_vft.txq_count = cpu_to_le16(vft_res->max_tx_qs); + desc.nic_vft.rssq_count = cpu_to_le16(vft_res->max_rss_qs); + desc.nic_vft.cq_count = cpu_to_le16(vft_res->max_cq_count); + + if (vft_res->max_uc_mac) + desc.nic_vft.unicast_mac_count = + cpu_to_le16(vft_res->max_uc_mac); + if (vft_res->max_vlans) + desc.nic_vft.vlan_count = cpu_to_le16(vft_res->max_vlans); + if (vft_res->max_iface_count) + desc.nic_vft.iface_count = + cpu_to_le16(vft_res->max_iface_count); + if (vft_res->max_mcc_count) + desc.nic_vft.mcc_count = cpu_to_le16(vft_res->max_mcc_count); return be_cmd_set_profile_config(adapter, &desc, 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index d8540ae95..0d6be224a 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -1556,7 +1556,9 @@ struct be_cmd_resp_acpi_wol_magic_config_v1 { u8 rsvd0[2]; u8 wol_settings; u8 rsvd1[5]; - u32 rsvd2[295]; + u32 rsvd2[288]; + u8 magic_mac[6]; + u8 rsvd3[22]; } __packed; #define BE_GET_WOL_CAP 2 @@ -2128,6 +2130,9 @@ struct be_cmd_req_set_ext_fat_caps { #define IMM_SHIFT 6 /* Immediate */ #define NOSV_SHIFT 7 /* No save */ +#define MISSION_NIC 1 +#define MISSION_RDMA 8 + struct be_res_desc_hdr { u8 desc_type; u8 desc_len; @@ -2244,6 +2249,7 @@ struct be_cmd_req_get_profile_config { struct be_cmd_req_hdr hdr; u8 rsvd; #define ACTIVE_PROFILE_TYPE 0x2 +#define SAVED_PROFILE_TYPE 0x0 #define QUERY_MODIFIABLE_FIELDS_TYPE BIT(3) u8 type; u16 rsvd1; @@ -2449,7 +2455,9 @@ int be_cmd_query_port_name(struct be_adapter *adapter); int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res); int be_cmd_get_profile_config(struct be_adapter *adapter, - struct be_resources *res, u8 query, u8 domain); + struct be_resources *res, + struct be_port_resources *port_res, + u8 profile_type, u8 query, u8 domain); int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile); int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, int vf_num); @@ -2461,4 +2469,4 @@ int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port); int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op); int be_cmd_set_sriov_config(struct be_adapter *adapter, struct be_resources res, u16 num_vfs, - u16 num_vf_qs); + struct be_resources *vft_res); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 2ff691636..50e7be5da 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -793,6 +793,11 @@ static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct be_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->pdev->dev; + struct be_dma_mem cmd; + u8 mac[ETH_ALEN]; + bool enable; + int status; if (wol->wolopts & ~WAKE_MAGIC) return -EOPNOTSUPP; @@ -802,12 +807,32 @@ static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return -EOPNOTSUPP; } - if (wol->wolopts & WAKE_MAGIC) - adapter->wol_en = true; - else - adapter->wol_en = false; + cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); + cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); + if (!cmd.va) + return -ENOMEM; - return 0; + eth_zero_addr(mac); + + enable = wol->wolopts & WAKE_MAGIC; + if (enable) + ether_addr_copy(mac, adapter->netdev->dev_addr); + + status = be_cmd_enable_magic_wol(adapter, mac, &cmd); + if (status) { + dev_err(dev, "Could not set Wake-on-lan mac address\n"); + status = be_cmd_status(status); + goto err; + } + + pci_enable_wake(adapter->pdev, PCI_D3hot, enable); + pci_enable_wake(adapter->pdev, PCI_D3cold, enable); + + adapter->wol_en = enable ? true : false; + +err: + dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma); + return status; } static int be_test_ddr_dma(struct be_adapter *adapter) @@ -1171,9 +1196,17 @@ static void be_get_channels(struct net_device *netdev, struct ethtool_channels *ch) { struct be_adapter *adapter = netdev_priv(netdev); + u16 num_rx_irqs = max_t(u16, adapter->num_rss_qs, 1); - ch->combined_count = adapter->num_evt_qs; - ch->max_combined = be_max_qs(adapter); + /* num_tx_qs is always same as the number of irqs used for TX */ + ch->combined_count = min(adapter->num_tx_qs, num_rx_irqs); + ch->rx_count = num_rx_irqs - ch->combined_count; + ch->tx_count = adapter->num_tx_qs - ch->combined_count; + + ch->max_combined = be_max_qp_irqs(adapter); + /* The user must create atleast one combined channel */ + ch->max_rx = be_max_rx_irqs(adapter) - 1; + ch->max_tx = be_max_tx_irqs(adapter) - 1; } static int be_set_channels(struct net_device *netdev, @@ -1182,11 +1215,22 @@ static int be_set_channels(struct net_device *netdev, struct be_adapter *adapter = netdev_priv(netdev); int status; - if (ch->rx_count || ch->tx_count || ch->other_count || - !ch->combined_count || ch->combined_count > be_max_qs(adapter)) + /* we support either only combined channels or a combination of + * combined and either RX-only or TX-only channels. + */ + if (ch->other_count || !ch->combined_count || + (ch->rx_count && ch->tx_count)) + return -EINVAL; + + if (ch->combined_count > be_max_qp_irqs(adapter) || + (ch->rx_count && + (ch->rx_count + ch->combined_count) > be_max_rx_irqs(adapter)) || + (ch->tx_count && + (ch->tx_count + ch->combined_count) > be_max_tx_irqs(adapter))) return -EINVAL; - adapter->cfg_num_qs = ch->combined_count; + adapter->cfg_num_rx_irqs = ch->combined_count + ch->rx_count; + adapter->cfg_num_tx_irqs = ch->combined_count + ch->tx_count; status = be_update_queues(adapter); return be_cmd_status(status); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index ed98ef1ec..874c7539a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -2620,8 +2620,10 @@ static int be_evt_queues_create(struct be_adapter *adapter) struct be_aic_obj *aic; int i, rc; + /* need enough EQs to service both RX and TX queues */ adapter->num_evt_qs = min_t(u16, num_irqs(adapter), - adapter->cfg_num_qs); + max(adapter->cfg_num_rx_irqs, + adapter->cfg_num_tx_irqs)); for_all_evt_queues(adapter, eqo, i) { int numa_node = dev_to_node(&adapter->pdev->dev); @@ -2726,7 +2728,7 @@ static int be_tx_qs_create(struct be_adapter *adapter) struct be_eq_obj *eqo; int status, i; - adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter)); + adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs); for_all_tx_queues(adapter, txo, i) { cq = &txo->cq; @@ -2784,11 +2786,11 @@ static int be_rx_cqs_create(struct be_adapter *adapter) struct be_rx_obj *rxo; int rc, i; - /* We can create as many RSS rings as there are EQs. */ - adapter->num_rss_qs = adapter->num_evt_qs; + adapter->num_rss_qs = + min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs); /* We'll use RSS only if atleast 2 RSS rings are supported. */ - if (adapter->num_rss_qs <= 1) + if (adapter->num_rss_qs < 2) adapter->num_rss_qs = 0; adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq; @@ -3249,18 +3251,23 @@ static void be_msix_disable(struct be_adapter *adapter) static int be_msix_enable(struct be_adapter *adapter) { - int i, num_vec; + unsigned int i, max_roce_eqs; struct device *dev = &adapter->pdev->dev; + int num_vec; - /* If RoCE is supported, program the max number of NIC vectors that - * may be configured via set-channels, along with vectors needed for - * RoCe. Else, just program the number we'll use initially. + /* If RoCE is supported, program the max number of vectors that + * could be used for NIC and RoCE, else, just program the number + * we'll use initially. */ - if (be_roce_supported(adapter)) - num_vec = min_t(int, 2 * be_max_eqs(adapter), - 2 * num_online_cpus()); - else - num_vec = adapter->cfg_num_qs; + if (be_roce_supported(adapter)) { + max_roce_eqs = + be_max_func_eqs(adapter) - be_max_nic_eqs(adapter); + max_roce_eqs = min(max_roce_eqs, num_online_cpus()); + num_vec = be_max_any_irqs(adapter) + max_roce_eqs; + } else { + num_vec = max(adapter->cfg_num_rx_irqs, + adapter->cfg_num_tx_irqs); + } for (i = 0; i < num_vec; i++) adapter->msix_entries[i].entry = i; @@ -3625,10 +3632,8 @@ static int be_open(struct net_device *netdev) be_link_status_update(adapter, link_status); netif_tx_start_all_queues(netdev); -#ifdef CONFIG_BE2NET_VXLAN if (skyhawk_chip(adapter)) - vxlan_get_rx_port(netdev); -#endif + udp_tunnel_get_rx_info(netdev); return 0; err: @@ -3636,40 +3641,6 @@ err: return -EIO; } -static int be_setup_wol(struct be_adapter *adapter, bool enable) -{ - struct device *dev = &adapter->pdev->dev; - struct be_dma_mem cmd; - u8 mac[ETH_ALEN]; - int status; - - eth_zero_addr(mac); - - cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); - cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); - if (!cmd.va) - return -ENOMEM; - - if (enable) { - status = pci_write_config_dword(adapter->pdev, - PCICFG_PM_CONTROL_OFFSET, - PCICFG_PM_CONTROL_MASK); - if (status) { - dev_err(dev, "Could not enable Wake-on-lan\n"); - goto err; - } - } else { - ether_addr_copy(mac, adapter->netdev->dev_addr); - } - - status = be_cmd_enable_magic_wol(adapter, mac, &cmd); - pci_enable_wake(adapter->pdev, PCI_D3hot, enable); - pci_enable_wake(adapter->pdev, PCI_D3cold, enable); -err: - dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma); - return status; -} - static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) { u32 addr; @@ -3759,6 +3730,11 @@ static void be_vf_clear(struct be_adapter *adapter) be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1); } + + if (BE3_chip(adapter)) + be_cmd_set_hsw_config(adapter, 0, 0, + adapter->if_handle, + PORT_FWD_TYPE_PASSTHRU, 0); done: kfree(adapter->vf_cfg); adapter->num_vfs = 0; @@ -3789,7 +3765,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter) } } -#ifdef CONFIG_BE2NET_VXLAN static void be_disable_vxlan_offloads(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -3808,37 +3783,87 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter) netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL); netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL); } -#endif -static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs) +static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs, + struct be_resources *vft_res) { struct be_resources res = adapter->pool_res; + u32 vf_if_cap_flags = res.vf_if_cap_flags; + struct be_resources res_mod = {0}; u16 num_vf_qs = 1; - /* Distribute the queue resources among the PF and it's VFs - * Do not distribute queue resources in multi-channel configuration. - */ - if (num_vfs && !be_is_mc(adapter)) { - /* Divide the qpairs evenly among the VFs and the PF, capped - * at VF-EQ-count. Any remainder qpairs belong to the PF. - */ + /* Distribute the queue resources among the PF and it's VFs */ + if (num_vfs) { + /* Divide the rx queues evenly among the VFs and the PF, capped + * at VF-EQ-count. Any remainder queues belong to the PF. + */ num_vf_qs = min(SH_VF_MAX_NIC_EQS, res.max_rss_qs / (num_vfs + 1)); - /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable - * interfaces per port. Provide RSS on VFs, only if number - * of VFs requested is less than MAX_RSS_IFACES limit. + /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES + * RSS Tables per port. Provide RSS on VFs, only if number of + * VFs requested is less than it's PF Pool's RSS Tables limit. */ - if (num_vfs >= MAX_RSS_IFACES) + if (num_vfs >= be_max_pf_pool_rss_tables(adapter)) num_vf_qs = 1; } - return num_vf_qs; + + /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd, + * which are modifiable using SET_PROFILE_CONFIG cmd. + */ + be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE, + RESOURCE_MODIFIABLE, 0); + + /* If RSS IFACE capability flags are modifiable for a VF, set the + * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if + * more than 1 RSSQ is available for a VF. + * Otherwise, provision only 1 queue pair for VF. + */ + if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) { + vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); + if (num_vf_qs > 1) { + vf_if_cap_flags |= BE_IF_FLAGS_RSS; + if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS) + vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS; + } else { + vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS | + BE_IF_FLAGS_DEFQ_RSS); + } + } else { + num_vf_qs = 1; + } + + if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) { + vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); + vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS; + } + + vft_res->vf_if_cap_flags = vf_if_cap_flags; + vft_res->max_rx_qs = num_vf_qs; + vft_res->max_rss_qs = num_vf_qs; + vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1); + vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1); + + /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally + * among the PF and it's VFs, if the fields are changeable + */ + if (res_mod.max_uc_mac == FIELD_MODIFIABLE) + vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1); + + if (res_mod.max_vlans == FIELD_MODIFIABLE) + vft_res->max_vlans = res.max_vlans / (num_vfs + 1); + + if (res_mod.max_iface_count == FIELD_MODIFIABLE) + vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1); + + if (res_mod.max_mcc_count == FIELD_MODIFIABLE) + vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1); } static int be_clear(struct be_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; - u16 num_vf_qs; + struct be_resources vft_res = {0}; be_cancel_worker(adapter); @@ -3850,16 +3875,15 @@ static int be_clear(struct be_adapter *adapter) */ if (skyhawk_chip(adapter) && be_physfn(adapter) && !pci_vfs_assigned(pdev)) { - num_vf_qs = be_calculate_vf_qs(adapter, - pci_sriov_get_totalvfs(pdev)); + be_calculate_vf_res(adapter, + pci_sriov_get_totalvfs(pdev), + &vft_res); be_cmd_set_sriov_config(adapter, adapter->pool_res, pci_sriov_get_totalvfs(pdev), - num_vf_qs); + &vft_res); } -#ifdef CONFIG_BE2NET_VXLAN be_disable_vxlan_offloads(adapter); -#endif kfree(adapter->pmac_id); adapter->pmac_id = NULL; @@ -3884,7 +3908,8 @@ static int be_vfs_if_create(struct be_adapter *adapter) for_all_vfs(adapter, vf_cfg, vf) { if (!BE3_chip(adapter)) { - status = be_cmd_get_profile_config(adapter, &res, + status = be_cmd_get_profile_config(adapter, &res, NULL, + ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS, vf + 1); if (!status) { @@ -4000,6 +4025,15 @@ static int be_vf_setup(struct be_adapter *adapter) } } + if (BE3_chip(adapter)) { + /* On BE3, enable VEB only when SRIOV is enabled */ + status = be_cmd_set_hsw_config(adapter, 0, 0, + adapter->if_handle, + PORT_FWD_TYPE_VEB, 0); + if (status) + goto err; + } + adapter->flags |= BE_FLAGS_SRIOV_ENABLED; return 0; err: @@ -4069,8 +4103,9 @@ static void BEx_get_resources(struct be_adapter *adapter, /* On a SuperNIC profile, the driver needs to use the * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits */ - be_cmd_get_profile_config(adapter, &super_nic_res, - RESOURCE_LIMITS, 0); + be_cmd_get_profile_config(adapter, &super_nic_res, NULL, + ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS, + 0); /* Some old versions of BE3 FW don't report max_tx_qs value */ res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS; } else { @@ -4109,12 +4144,38 @@ static void be_setup_init(struct be_adapter *adapter) adapter->cmd_privileges = MIN_PRIVILEGES; } +/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port. + * However, this HW limitation is not exposed to the host via any SLI cmd. + * As a result, in the case of SRIOV and in particular multi-partition configs + * the driver needs to calcuate a proportional share of RSS Tables per PF-pool + * for distribution between the VFs. This self-imposed limit will determine the + * no: of VFs for which RSS can be enabled. + */ +void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter) +{ + struct be_port_resources port_res = {0}; + u8 rss_tables_on_port; + u16 max_vfs = be_max_vfs(adapter); + + be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE, + RESOURCE_LIMITS, 0); + + rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs; + + /* Each PF Pool's RSS Tables limit = + * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port + */ + adapter->pool_res.max_rss_tables = + max_vfs * rss_tables_on_port / port_res.max_vfs; +} + static int be_get_sriov_config(struct be_adapter *adapter) { struct be_resources res = {0}; int max_vfs, old_vfs; - be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0); + be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE, + RESOURCE_LIMITS, 0); /* Some old versions of BE3 FW don't report max_vfs value */ if (BE3_chip(adapter) && !res.max_vfs) { @@ -4138,13 +4199,19 @@ static int be_get_sriov_config(struct be_adapter *adapter) adapter->num_vfs = old_vfs; } + if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) { + be_calculate_pf_pool_rss_tables(adapter); + dev_info(&adapter->pdev->dev, + "RSS can be enabled for all VFs if num_vfs <= %d\n", + be_max_pf_pool_rss_tables(adapter)); + } return 0; } static void be_alloc_sriov_res(struct be_adapter *adapter) { int old_vfs = pci_num_vf(adapter->pdev); - u16 num_vf_qs; + struct be_resources vft_res = {0}; int status; be_get_sriov_config(adapter); @@ -4158,9 +4225,9 @@ static void be_alloc_sriov_res(struct be_adapter *adapter) * Also, this is done by FW in Lancer chip. */ if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) { - num_vf_qs = be_calculate_vf_qs(adapter, 0); + be_calculate_vf_res(adapter, 0, &vft_res); status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0, - num_vf_qs); + &vft_res); if (status) dev_err(&adapter->pdev->dev, "Failed to optimize SRIOV resources\n"); @@ -4173,16 +4240,13 @@ static int be_get_resources(struct be_adapter *adapter) struct be_resources res = {0}; int status; - if (BEx_chip(adapter)) { - BEx_get_resources(adapter, &res); - adapter->res = res; - } - /* For Lancer, SH etc read per-function resource limits from FW. * GET_FUNC_CONFIG returns per function guaranteed limits. * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits */ - if (!BEx_chip(adapter)) { + if (BEx_chip(adapter)) { + BEx_get_resources(adapter, &res); + } else { status = be_cmd_get_func_config(adapter, &res); if (status) return status; @@ -4191,13 +4255,13 @@ static int be_get_resources(struct be_adapter *adapter) if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs && !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)) res.max_rss_qs -= 1; - - /* If RoCE may be enabled stash away half the EQs for RoCE */ - if (be_roce_supported(adapter)) - res.max_evt_qs /= 2; - adapter->res = res; } + /* If RoCE is supported stash away half the EQs for RoCE */ + res.max_nic_evt_qs = be_roce_supported(adapter) ? + res.max_evt_qs / 2 : res.max_evt_qs; + adapter->res = res; + /* If FW supports RSS default queue, then skip creating non-RSS * queue for non-IP traffic. */ @@ -4206,15 +4270,17 @@ static int be_get_resources(struct be_adapter *adapter) dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n", be_max_txqs(adapter), be_max_rxqs(adapter), - be_max_rss(adapter), be_max_eqs(adapter), + be_max_rss(adapter), be_max_nic_eqs(adapter), be_max_vfs(adapter)); dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n", be_max_uc(adapter), be_max_mc(adapter), be_max_vlans(adapter)); - /* Sanitize cfg_num_qs based on HW and platform limits */ - adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(), - be_max_qs(adapter)); + /* Ensure RX and TX queues are created in pairs at init time */ + adapter->cfg_num_rx_irqs = + min_t(u16, netif_get_num_default_rss_queues(), + be_max_qp_irqs(adapter)); + adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs; return 0; } @@ -4241,6 +4307,8 @@ static int be_get_config(struct be_adapter *adapter) } be_cmd_get_acpi_wol_cap(adapter); + pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en); + pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en); be_cmd_query_port_name(adapter); @@ -4251,15 +4319,6 @@ static int be_get_config(struct be_adapter *adapter) "Using profile 0x%x\n", profile_id); } - status = be_get_resources(adapter); - if (status) - return status; - - adapter->pmac_id = kcalloc(be_max_uc(adapter), - sizeof(*adapter->pmac_id), GFP_KERNEL); - if (!adapter->pmac_id) - return -ENOMEM; - return 0; } @@ -4334,7 +4393,7 @@ static int be_if_create(struct be_adapter *adapter) u32 cap_flags = be_if_cap_flags(adapter); int status; - if (adapter->cfg_num_qs == 1) + if (adapter->cfg_num_rx_irqs == 1) cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS); en_flags &= cap_flags; @@ -4460,13 +4519,22 @@ static int be_setup(struct be_adapter *adapter) return status; } + status = be_get_config(adapter); + if (status) + goto err; + if (!BE2_chip(adapter) && be_physfn(adapter)) be_alloc_sriov_res(adapter); - status = be_get_config(adapter); + status = be_get_resources(adapter); if (status) goto err; + adapter->pmac_id = kcalloc(be_max_uc(adapter), + sizeof(*adapter->pmac_id), GFP_KERNEL); + if (!adapter->pmac_id) + return -ENOMEM; + status = be_msix_enable(adapter); if (status) goto err; @@ -4511,6 +4579,15 @@ static int be_setup(struct be_adapter *adapter) be_cmd_set_logical_link_config(adapter, IFLA_VF_LINK_STATE_AUTO, 0); + /* BE3 EVB echoes broadcast/multicast packets back to PF's vport + * confusing a linux bridge or OVS that it might be connected to. + * Set the EVB to PASSTHRU mode which effectively disables the EVB + * when SRIOV is not enabled. + */ + if (BE3_chip(adapter)) + be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle, + PORT_FWD_TYPE_PASSTHRU, 0); + if (adapter->num_vfs) be_vf_setup(adapter); @@ -4651,7 +4728,6 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 0, 0, nlflags, filter_mask, NULL); } -#ifdef CONFIG_BE2NET_VXLAN /* VxLAN offload Notes: * * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't @@ -4666,13 +4742,17 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, * adds more than one port, disable offloads and don't re-enable them again * until after all the tunnels are removed. */ -static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, - __be16 port) +static void be_add_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct be_adapter *adapter = netdev_priv(netdev); struct device *dev = &adapter->pdev->dev; + __be16 port = ti->port; int status; + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) return; @@ -4720,10 +4800,14 @@ err: be_disable_vxlan_offloads(adapter); } -static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, - __be16 port) +static void be_del_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct be_adapter *adapter = netdev_priv(netdev); + __be16 port = ti->port; + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) return; @@ -4785,7 +4869,6 @@ static netdev_features_t be_features_check(struct sk_buff *skb, return features; } -#endif static int be_get_phys_port_id(struct net_device *dev, struct netdev_phys_item_id *ppid) @@ -4833,11 +4916,9 @@ static const struct net_device_ops be_netdev_ops = { #ifdef CONFIG_NET_RX_BUSY_POLL .ndo_busy_poll = be_busy_poll, #endif -#ifdef CONFIG_BE2NET_VXLAN - .ndo_add_vxlan_port = be_add_vxlan_port, - .ndo_del_vxlan_port = be_del_vxlan_port, + .ndo_udp_tunnel_add = be_add_vxlan_port, + .ndo_udp_tunnel_del = be_del_vxlan_port, .ndo_features_check = be_features_check, -#endif .ndo_get_phys_port_id = be_get_phys_port_id, }; @@ -4996,6 +5077,10 @@ static void be_worker(struct work_struct *work) struct be_rx_obj *rxo; int i; + if (be_physfn(adapter) && + MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) + be_cmd_get_die_temperature(adapter); + /* when interrupts are not yet enabled, just reap any pending * mcc completions */ @@ -5014,10 +5099,6 @@ static void be_worker(struct work_struct *work) be_cmd_get_stats(adapter, &adapter->stats_cmd); } - if (be_physfn(adapter) && - MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) - be_cmd_get_die_temperature(adapter); - for_all_rx_queues(adapter, rxo, i) { /* Replenish RX-queues starved due to memory * allocation failures. @@ -5410,9 +5491,6 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) { struct be_adapter *adapter = pci_get_drvdata(pdev); - if (adapter->wol_en) - be_setup_wol(adapter, true); - be_intr_set(adapter, false); be_cancel_err_detection(adapter); @@ -5441,9 +5519,6 @@ static int be_pci_resume(struct pci_dev *pdev) be_schedule_err_detection(adapter, ERR_DETECTION_DELAY); - if (adapter->wol_en) - be_setup_wol(adapter, false); - return 0; } @@ -5552,7 +5627,7 @@ err: static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct be_adapter *adapter = pci_get_drvdata(pdev); - u16 num_vf_qs; + struct be_resources vft_res = {0}; int status; if (!num_vfs) @@ -5575,9 +5650,10 @@ static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) * Also, this is done by FW in Lancer chip. */ if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) { - num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs); + be_calculate_vf_res(adapter, adapter->num_vfs, + &vft_res); status = be_cmd_set_sriov_config(adapter, adapter->pool_res, - adapter->num_vfs, num_vf_qs); + adapter->num_vfs, &vft_res); if (status) dev_err(&pdev->dev, "Failed to optimize SR-IOV resources\n"); diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index 4089156a7..2b62841c4 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h index fde609789..e51719a73 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.h +++ b/drivers/net/ethernet/emulex/benet/be_roce.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 4466a1187..c044667a0 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -192,7 +192,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); * @napi: NAPI structure * @msg_enable: device state flags * @lock: device lock - * @phy: attached PHY * @mdio: MDIO bus for PHY access * @phy_id: address of attached PHY */ @@ -219,7 +218,6 @@ struct ethoc { spinlock_t lock; - struct phy_device *phy; struct mii_bus *mdio; struct clk *clk; s8 phy_id; @@ -694,7 +692,6 @@ static int ethoc_mdio_probe(struct net_device *dev) return err; } - priv->phy = phy; phy->advertising &= ~(ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half); phy->supported &= ~(SUPPORTED_1000baseT_Full | @@ -724,7 +721,7 @@ static int ethoc_open(struct net_device *dev) netif_start_queue(dev); } - phy_start(priv->phy); + phy_start(dev->phydev); napi_enable(&priv->napi); if (netif_msg_ifup(priv)) { @@ -741,8 +738,8 @@ static int ethoc_stop(struct net_device *dev) napi_disable(&priv->napi); - if (priv->phy) - phy_stop(priv->phy); + if (dev->phydev) + phy_stop(dev->phydev); ethoc_disable_rx_and_tx(priv); free_irq(dev->irq, dev); @@ -770,7 +767,7 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (!phy) return -ENODEV; } else { - phy = priv->phy; + phy = dev->phydev; } return phy_mii_ioctl(phy, ifr, cmd); @@ -903,28 +900,6 @@ out_no_free: return NETDEV_TX_OK; } -static int ethoc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ethoc *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phy; - - if (!phydev) - return -EOPNOTSUPP; - - return phy_ethtool_gset(phydev, cmd); -} - -static int ethoc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ethoc *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phy; - - if (!phydev) - return -EOPNOTSUPP; - - return phy_ethtool_sset(phydev, cmd); -} - static int ethoc_get_regs_len(struct net_device *netdev) { return ETH_END; @@ -989,14 +964,14 @@ static int ethoc_set_ringparam(struct net_device *dev, } const struct ethtool_ops ethoc_ethtool_ops = { - .get_settings = ethoc_get_settings, - .set_settings = ethoc_set_settings, .get_regs_len = ethoc_get_regs_len, .get_regs = ethoc_get_regs, .get_link = ethtool_op_get_link, .get_ringparam = ethoc_get_ringparam, .set_ringparam = ethoc_set_ringparam, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops ethoc_netdev_ops = { @@ -1267,8 +1242,7 @@ static int ethoc_remove(struct platform_device *pdev) if (netdev) { netif_napi_del(&priv->napi); - phy_disconnect(priv->phy); - priv->phy = NULL; + phy_disconnect(netdev->phydev); if (priv->mdio) { mdiobus_unregister(priv->mdio); diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 9b7a3f5a2..f928e6f79 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -24,6 +24,14 @@ #define DRV_NAME "nps_mgt_enet" +static inline bool nps_enet_is_tx_pending(struct nps_enet_priv *priv) +{ + u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); + u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; + + return (!tx_ctrl_ct && priv->tx_skb); +} + static void nps_enet_clean_rx_fifo(struct net_device *ndev, u32 frame_len) { struct nps_enet_priv *priv = netdev_priv(ndev); @@ -46,16 +54,17 @@ static void nps_enet_read_rx_fifo(struct net_device *ndev, if (dst_is_aligned) { ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, reg, len); reg += len; - } - else { /* !dst_is_aligned */ + } else { /* !dst_is_aligned */ for (i = 0; i < len; i++, reg++) { u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); + put_unaligned_be32(buf, reg); } } /* copy last bytes (if any) */ if (last) { u32 buf; + ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, &buf, 1); memcpy((u8 *)reg, &buf, last); } @@ -140,12 +149,11 @@ static void nps_enet_tx_handler(struct net_device *ndev) { struct nps_enet_priv *priv = netdev_priv(ndev); u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); - u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; u32 tx_ctrl_et = (tx_ctrl_value & TX_CTL_ET_MASK) >> TX_CTL_ET_SHIFT; u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT; /* Check if we got TX */ - if (!priv->tx_skb || tx_ctrl_ct) + if (!nps_enet_is_tx_pending(priv)) return; /* Ack Tx ctrl register */ @@ -183,9 +191,6 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) work_done = nps_enet_rx_handler(ndev); if (work_done < budget) { u32 buf_int_enable_value = 0; - u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); - u32 tx_ctrl_ct = - (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; napi_complete(napi); @@ -204,8 +209,7 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) * the two code lines below will solve this situation by * re-adding ourselves to the poll list. */ - - if (priv->tx_skb && !tx_ctrl_ct) { + if (nps_enet_is_tx_pending(priv)) { nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); napi_reschedule(napi); } @@ -230,11 +234,9 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance) struct net_device *ndev = dev_instance; struct nps_enet_priv *priv = netdev_priv(ndev); u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); - u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); - u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT; - if ((!tx_ctrl_ct && priv->tx_skb) || rx_ctrl_cr) + if (nps_enet_is_tx_pending(priv) || rx_ctrl_cr) if (likely(napi_schedule_prep(&priv->napi))) { nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); __napi_schedule(&priv->napi); @@ -460,7 +462,6 @@ static void nps_enet_set_rx_mode(struct net_device *ndev) | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT; ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK) | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT; - } nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2_value); diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index e7cf313e3..36361f8bf 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -31,6 +31,7 @@ #include #include #include +#include #include "ftgmac100.h" @@ -68,10 +69,14 @@ struct ftgmac100 { struct net_device *netdev; struct device *dev; + struct ncsi_dev *ndev; struct napi_struct napi; struct mii_bus *mii_bus; int old_speed; + int int_mask_all; + bool use_ncsi; + bool enabled; }; static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv, @@ -80,14 +85,6 @@ static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv, /****************************************************************************** * internal functions (hardware register access) *****************************************************************************/ -#define INT_MASK_ALL_ENABLED (FTGMAC100_INT_RPKT_LOST | \ - FTGMAC100_INT_XPKT_ETH | \ - FTGMAC100_INT_XPKT_LOST | \ - FTGMAC100_INT_AHB_ERR | \ - FTGMAC100_INT_PHYSTS_CHG | \ - FTGMAC100_INT_RPKT_BUF | \ - FTGMAC100_INT_NO_RXBUF) - static void ftgmac100_set_rx_ring_base(struct ftgmac100 *priv, dma_addr_t addr) { iowrite32(addr, priv->base + FTGMAC100_OFFSET_RXR_BADR); @@ -141,6 +138,55 @@ static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac) iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR); } +static void ftgmac100_setup_mac(struct ftgmac100 *priv) +{ + u8 mac[ETH_ALEN]; + unsigned int m; + unsigned int l; + void *addr; + + addr = device_get_mac_address(priv->dev, mac, ETH_ALEN); + if (addr) { + ether_addr_copy(priv->netdev->dev_addr, mac); + dev_info(priv->dev, "Read MAC address %pM from device tree\n", + mac); + return; + } + + m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR); + l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR); + + mac[0] = (m >> 8) & 0xff; + mac[1] = m & 0xff; + mac[2] = (l >> 24) & 0xff; + mac[3] = (l >> 16) & 0xff; + mac[4] = (l >> 8) & 0xff; + mac[5] = l & 0xff; + + if (is_valid_ether_addr(mac)) { + ether_addr_copy(priv->netdev->dev_addr, mac); + dev_info(priv->dev, "Read MAC address %pM from chip\n", mac); + } else { + eth_hw_addr_random(priv->netdev); + dev_info(priv->dev, "Generated random MAC address %pM\n", + priv->netdev->dev_addr); + } +} + +static int ftgmac100_set_mac_addr(struct net_device *dev, void *p) +{ + int ret; + + ret = eth_prepare_mac_addr_change(dev, p); + if (ret < 0) + return ret; + + eth_commit_mac_addr_change(dev, p); + ftgmac100_set_mac(netdev_priv(dev), dev->dev_addr); + + return 0; +} + static void ftgmac100_init_hw(struct ftgmac100 *priv) { /* setup ring buffer base registers */ @@ -952,7 +998,10 @@ static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id) struct net_device *netdev = dev_id; struct ftgmac100 *priv = netdev_priv(netdev); - if (likely(netif_running(netdev))) { + /* When running in NCSI mode, the interface should be ready for + * receiving or transmitting NCSI packets before it's opened. + */ + if (likely(priv->use_ncsi || netif_running(netdev))) { /* Disable interrupts for polling */ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); napi_schedule(&priv->napi); @@ -1005,8 +1054,9 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget) ftgmac100_tx_complete(priv); } - if (status & (FTGMAC100_INT_NO_RXBUF | FTGMAC100_INT_RPKT_LOST | - FTGMAC100_INT_AHB_ERR | FTGMAC100_INT_PHYSTS_CHG)) { + if (status & priv->int_mask_all & (FTGMAC100_INT_NO_RXBUF | + FTGMAC100_INT_RPKT_LOST | FTGMAC100_INT_AHB_ERR | + FTGMAC100_INT_PHYSTS_CHG)) { if (net_ratelimit()) netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status, status & FTGMAC100_INT_NO_RXBUF ? "NO_RXBUF " : "", @@ -1029,7 +1079,8 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget) napi_complete(napi); /* enable all interrupts */ - iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER); + iowrite32(priv->int_mask_all, + priv->base + FTGMAC100_OFFSET_IER); } return rx; @@ -1065,17 +1116,33 @@ static int ftgmac100_open(struct net_device *netdev) goto err_hw; ftgmac100_init_hw(priv); - ftgmac100_start_hw(priv, 10); - - phy_start(netdev->phydev); + ftgmac100_start_hw(priv, priv->use_ncsi ? 100 : 10); + if (netdev->phydev) + phy_start(netdev->phydev); + else if (priv->use_ncsi) + netif_carrier_on(netdev); napi_enable(&priv->napi); netif_start_queue(netdev); /* enable all interrupts */ - iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER); + iowrite32(priv->int_mask_all, priv->base + FTGMAC100_OFFSET_IER); + + /* Start the NCSI device */ + if (priv->use_ncsi) { + err = ncsi_start_dev(priv->ndev); + if (err) + goto err_ncsi; + } + + priv->enabled = true; + return 0; +err_ncsi: + napi_disable(&priv->napi); + netif_stop_queue(netdev); + iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); err_hw: free_irq(priv->irq, netdev); err_irq: @@ -1088,12 +1155,17 @@ static int ftgmac100_stop(struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); + if (!priv->enabled) + return 0; + /* disable all interrupts */ + priv->enabled = false; iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); netif_stop_queue(netdev); napi_disable(&priv->napi); - phy_stop(netdev->phydev); + if (netdev->phydev) + phy_stop(netdev->phydev); ftgmac100_stop_hw(priv); free_irq(priv->irq, netdev); @@ -1134,6 +1206,9 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb, /* optional */ static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { + if (!netdev->phydev) + return -ENXIO; + return phy_mii_ioctl(netdev->phydev, ifr, cmd); } @@ -1141,11 +1216,74 @@ static const struct net_device_ops ftgmac100_netdev_ops = { .ndo_open = ftgmac100_open, .ndo_stop = ftgmac100_stop, .ndo_start_xmit = ftgmac100_hard_start_xmit, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = ftgmac100_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = ftgmac100_do_ioctl, }; +static int ftgmac100_setup_mdio(struct net_device *netdev) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + struct platform_device *pdev = to_platform_device(priv->dev); + int i, err = 0; + + /* initialize mdio bus */ + priv->mii_bus = mdiobus_alloc(); + if (!priv->mii_bus) + return -EIO; + + priv->mii_bus->name = "ftgmac100_mdio"; + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", + pdev->name, pdev->id); + priv->mii_bus->priv = priv->netdev; + priv->mii_bus->read = ftgmac100_mdiobus_read; + priv->mii_bus->write = ftgmac100_mdiobus_write; + + for (i = 0; i < PHY_MAX_ADDR; i++) + priv->mii_bus->irq[i] = PHY_POLL; + + err = mdiobus_register(priv->mii_bus); + if (err) { + dev_err(priv->dev, "Cannot register MDIO bus!\n"); + goto err_register_mdiobus; + } + + err = ftgmac100_mii_probe(priv); + if (err) { + dev_err(priv->dev, "MII Probe failed!\n"); + goto err_mii_probe; + } + + return 0; + +err_mii_probe: + mdiobus_unregister(priv->mii_bus); +err_register_mdiobus: + mdiobus_free(priv->mii_bus); + return err; +} + +static void ftgmac100_destroy_mdio(struct net_device *netdev) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + + if (!netdev->phydev) + return; + + phy_disconnect(netdev->phydev); + mdiobus_unregister(priv->mii_bus); + mdiobus_free(priv->mii_bus); +} + +static void ftgmac100_ncsi_handler(struct ncsi_dev *nd) +{ + if (unlikely(nd->state != ncsi_dev_state_functional)) + return; + + netdev_info(nd->dev, "NCSI interface %s\n", + nd->link_up ? "up" : "down"); +} + /****************************************************************************** * struct platform_driver functions *****************************************************************************/ @@ -1155,7 +1293,7 @@ static int ftgmac100_probe(struct platform_device *pdev) int irq; struct net_device *netdev; struct ftgmac100 *priv; - int err; + int err = 0; if (!pdev) return -ENODEV; @@ -1179,7 +1317,6 @@ static int ftgmac100_probe(struct platform_device *pdev) netdev->ethtool_ops = &ftgmac100_ethtool_ops; netdev->netdev_ops = &ftgmac100_netdev_ops; - netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO; platform_set_drvdata(pdev, netdev); @@ -1211,31 +1348,45 @@ static int ftgmac100_probe(struct platform_device *pdev) priv->irq = irq; - /* initialize mdio bus */ - priv->mii_bus = mdiobus_alloc(); - if (!priv->mii_bus) { - err = -EIO; - goto err_alloc_mdiobus; - } - - priv->mii_bus->name = "ftgmac100_mdio"; - snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "ftgmac100_mii"); - - priv->mii_bus->priv = netdev; - priv->mii_bus->read = ftgmac100_mdiobus_read; - priv->mii_bus->write = ftgmac100_mdiobus_write; + /* MAC address from chip or random one */ + ftgmac100_setup_mac(priv); + + priv->int_mask_all = (FTGMAC100_INT_RPKT_LOST | + FTGMAC100_INT_XPKT_ETH | + FTGMAC100_INT_XPKT_LOST | + FTGMAC100_INT_AHB_ERR | + FTGMAC100_INT_PHYSTS_CHG | + FTGMAC100_INT_RPKT_BUF | + FTGMAC100_INT_NO_RXBUF); + if (pdev->dev.of_node && + of_get_property(pdev->dev.of_node, "use-ncsi", NULL)) { + if (!IS_ENABLED(CONFIG_NET_NCSI)) { + dev_err(&pdev->dev, "NCSI stack not enabled\n"); + goto err_ncsi_dev; + } - err = mdiobus_register(priv->mii_bus); - if (err) { - dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); - goto err_register_mdiobus; + dev_info(&pdev->dev, "Using NCSI interface\n"); + priv->use_ncsi = true; + priv->int_mask_all &= ~FTGMAC100_INT_PHYSTS_CHG; + priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler); + if (!priv->ndev) + goto err_ncsi_dev; + } else { + priv->use_ncsi = false; + err = ftgmac100_setup_mdio(netdev); + if (err) + goto err_setup_mdio; } - err = ftgmac100_mii_probe(priv); - if (err) { - dev_err(&pdev->dev, "MII Probe failed!\n"); - goto err_mii_probe; - } + /* We have to disable on-chip IP checksum functionality + * when NCSI is enabled on the interface. It doesn't work + * in that case. + */ + netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO; + if (priv->use_ncsi && + of_get_property(pdev->dev.of_node, "no-hw-checksum", NULL)) + netdev->features &= ~NETIF_F_IP_CSUM; + /* register network device */ err = register_netdev(netdev); @@ -1246,21 +1397,12 @@ static int ftgmac100_probe(struct platform_device *pdev) netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base); - if (!is_valid_ether_addr(netdev->dev_addr)) { - eth_hw_addr_random(netdev); - netdev_info(netdev, "generated random MAC address %pM\n", - netdev->dev_addr); - } - return 0; +err_ncsi_dev: err_register_netdev: - phy_disconnect(netdev->phydev); -err_mii_probe: - mdiobus_unregister(priv->mii_bus); -err_register_mdiobus: - mdiobus_free(priv->mii_bus); -err_alloc_mdiobus: + ftgmac100_destroy_mdio(netdev); +err_setup_mdio: iounmap(priv->base); err_ioremap: release_resource(priv->res); @@ -1280,10 +1422,7 @@ static int __exit ftgmac100_remove(struct platform_device *pdev) priv = netdev_priv(netdev); unregister_netdev(netdev); - - phy_disconnect(netdev->phydev); - mdiobus_unregister(priv->mii_bus); - mdiobus_free(priv->mii_bus); + ftgmac100_destroy_mdio(netdev); iounmap(priv->base); release_resource(priv->res); @@ -1293,14 +1432,20 @@ static int __exit ftgmac100_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id ftgmac100_of_match[] = { + { .compatible = "faraday,ftgmac100" }, + { } +}; +MODULE_DEVICE_TABLE(of, ftgmac100_of_match); + static struct platform_driver ftgmac100_driver = { - .probe = ftgmac100_probe, - .remove = __exit_p(ftgmac100_remove), - .driver = { - .name = DRV_NAME, + .probe = ftgmac100_probe, + .remove = __exit_p(ftgmac100_remove), + .driver = { + .name = DRV_NAME, + .of_match_table = ftgmac100_of_match, }, }; - module_platform_driver(ftgmac100_driver); MODULE_AUTHOR("Po-Yu Chuang "); diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index f58f9ea51..c865135f3 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -442,6 +442,10 @@ struct bufdesc_ex { #define FEC_QUIRK_SINGLE_MDIO (1 << 11) /* Controller supports RACC register */ #define FEC_QUIRK_HAS_RACC (1 << 12) +/* Controller supports interrupt coalesc */ +#define FEC_QUIRK_HAS_COALESCE (1 << 13) +/* Interrupt doesn't wake CPU from deep idle */ +#define FEC_QUIRK_ERR006687 (1 << 14) struct bufdesc_prop { int qid; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index fea0f330d..692ee248e 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -60,6 +60,7 @@ #include #include #include +#include #include @@ -88,10 +89,10 @@ static struct platform_device_id fec_devtype[] = { .driver_data = 0, }, { .name = "imx25-fec", - .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC, + .driver_data = FEC_QUIRK_USE_GASKET, }, { .name = "imx27-fec", - .driver_data = FEC_QUIRK_HAS_RACC, + .driver_data = 0, }, { .name = "imx28-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | @@ -111,7 +112,13 @@ static struct platform_device_id fec_devtype[] = { FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | - FEC_QUIRK_HAS_RACC, + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, + }, { + .name = "imx6ul-fec", + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, }, { /* sentinel */ } @@ -125,6 +132,7 @@ enum imx_fec_type { IMX6Q_FEC, MVF600_FEC, IMX6SX_FEC, + IMX6UL_FEC, }; static const struct of_device_id fec_dt_ids[] = { @@ -134,6 +142,7 @@ static const struct of_device_id fec_dt_ids[] = { { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, + { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@ -171,6 +180,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); /* FEC receive acceleration */ #define FEC_RACC_IPDIS (1 << 1) #define FEC_RACC_PRODIS (1 << 2) +#define FEC_RACC_SHIFT16 BIT(7) #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) /* @@ -936,9 +946,11 @@ fec_restart(struct net_device *ndev) #if !defined(CONFIG_M5272) if (fep->quirks & FEC_QUIRK_HAS_RACC) { - /* set RX checksum */ val = readl(fep->hwp + FEC_RACC); + /* align IP header */ + val |= FEC_RACC_SHIFT16; if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) + /* set RX checksum */ val |= FEC_RACC_OPTIONS; else val &= ~FEC_RACC_OPTIONS; @@ -1419,6 +1431,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) prefetch(skb->data - NET_IP_ALIGN); skb_put(skb, pkt_len - 4); data = skb->data; + +#if !defined(CONFIG_M5272) + if (fep->quirks & FEC_QUIRK_HAS_RACC) + data = skb_pull_inline(skb, 2); +#endif + if (!is_copybreak && need_swap) swap_buffer(data, pkt_len); @@ -2358,9 +2376,6 @@ static void fec_enet_itr_coal_set(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); int rx_itr, tx_itr; - if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) - return; - /* Must be greater than zero to avoid unpredictable behavior */ if (!fep->rx_time_itr || !fep->rx_pkts_itr || !fep->tx_time_itr || !fep->tx_pkts_itr) @@ -2383,10 +2398,12 @@ static void fec_enet_itr_coal_set(struct net_device *ndev) writel(tx_itr, fep->hwp + FEC_TXIC0); writel(rx_itr, fep->hwp + FEC_RXIC0); - writel(tx_itr, fep->hwp + FEC_TXIC1); - writel(rx_itr, fep->hwp + FEC_RXIC1); - writel(tx_itr, fep->hwp + FEC_TXIC2); - writel(rx_itr, fep->hwp + FEC_RXIC2); + if (fep->quirks & FEC_QUIRK_HAS_AVB) { + writel(tx_itr, fep->hwp + FEC_TXIC1); + writel(rx_itr, fep->hwp + FEC_RXIC1); + writel(tx_itr, fep->hwp + FEC_TXIC2); + writel(rx_itr, fep->hwp + FEC_RXIC2); + } } static int @@ -2394,7 +2411,7 @@ fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) { struct fec_enet_private *fep = netdev_priv(ndev); - if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) return -EOPNOTSUPP; ec->rx_coalesce_usecs = fep->rx_time_itr; @@ -2412,7 +2429,7 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) struct fec_enet_private *fep = netdev_priv(ndev); unsigned int cycle; - if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) return -EOPNOTSUPP; if (ec->rx_max_coalesced_frames > 255) { @@ -2818,6 +2835,9 @@ fec_enet_open(struct net_device *ndev) if (ret) goto err_enet_mii_probe; + if (fep->quirks & FEC_QUIRK_ERR006687) + imx6q_cpuidle_fec_irqs_used(); + napi_enable(&fep->napi); phy_start(ndev->phydev); netif_tx_start_all_queues(ndev); @@ -2853,6 +2873,9 @@ fec_enet_close(struct net_device *ndev) phy_disconnect(ndev->phydev); + if (fep->quirks & FEC_QUIRK_ERR006687) + imx6q_cpuidle_fec_irqs_unused(); + fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&fep->pdev->dev); pm_runtime_mark_last_busy(&fep->pdev->dev); @@ -3191,7 +3214,12 @@ static void fec_reset_phy(struct platform_device *pdev) dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); return; } - msleep(msec); + + if (msec > 20) + msleep(msec); + else + usleep_range(msec * 1000, msec * 1000 + 1000); + gpio_set_value_cansleep(phy_reset, !active_high); } #else /* CONFIG_OF */ @@ -3292,6 +3320,11 @@ fec_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); + if ((of_machine_is_compatible("fsl,imx6q") || + of_machine_is_compatible("fsl,imx6dl")) && + !of_property_read_bool(np, "fsl,err006687-workaround-present")) + fep->quirks |= FEC_QUIRK_ERR006687; + if (of_get_property(np, "fsl,magic-packet", NULL)) fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 2e6785b6e..4b4f5bc0e 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2275,7 +2275,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, fcb->flags = flags; } -void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) +static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) { fcb->flags |= TXFCB_VLN; fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); @@ -2922,17 +2922,25 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, { unsigned int size = lstatus & BD_LENGTH_MASK; struct page *page = rxb->page; + bool last = !!(lstatus & BD_LFLAG(RXBD_LAST)); /* Remove the FCS from the packet length */ - if (likely(lstatus & BD_LFLAG(RXBD_LAST))) + if (last) size -= ETH_FCS_LEN; - if (likely(first)) + if (likely(first)) { skb_put(skb, size); - else - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rxb->page_offset + RXBUF_ALIGNMENT, - size, GFAR_RXB_TRUESIZE); + } else { + /* the last fragments' length contains the full frame length */ + if (last) + size -= skb->len; + + /* in case the last fragment consisted only of the FCS */ + if (size > 0) + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rxb->page_offset + RXBUF_ALIGNMENT, + size, GFAR_RXB_TRUESIZE); + } /* try reuse page */ if (unlikely(page_count(page) != 1)) diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 373fd094f..6e8a9c846 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -100,7 +100,8 @@ extern const char gfar_driver_version[]; #define DEFAULT_RX_LFC_THR 16 #define DEFAULT_LFC_PTVVAL 4 -#define GFAR_RXB_SIZE 1536 +/* prevent fragmenation by HW in DSA environments */ +#define GFAR_RXB_SIZE roundup(1536 + 8, 64) #define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define GFAR_RXB_TRUESIZE 2048 diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 4ccc03263..d11287e11 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_HISILICON bool "Hisilicon devices" default y - depends on OF && HAS_DMA + depends on (OF || ACPI) && HAS_DMA depends on ARM || ARM64 || COMPILE_TEST ---help--- If you have a network (Ethernet) card belonging to this class, say Y. @@ -23,6 +23,18 @@ config HIX5HD2_GMAC help This selects the hix5hd2 mac family network device. +config HISI_FEMAC + tristate "Hisilicon Fast Ethernet MAC device support" + depends on HAS_IOMEM + select PHYLIB + select RESET_CONTROLLER + help + This selects the Hisilicon Fast Ethernet MAC device(FEMAC). + The FEMAC receives and transmits data over Ethernet + ports at 10/100 Mbps in full-duplex or half-duplex mode. + The FEMAC exchanges data with the CPU, and supports + the energy efficient Ethernet (EEE). + config HIP04_ETH tristate "HISILICON P04 Ethernet support" depends on HAS_IOMEM # For MFD_SYSCON diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile index 390b71fb3..866169502 100644 --- a/drivers/net/ethernet/hisilicon/Makefile +++ b/drivers/net/ethernet/hisilicon/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o obj-$(CONFIG_HIP04_ETH) += hip04_eth.o obj-$(CONFIG_HNS_MDIO) += hns_mdio.o obj-$(CONFIG_HNS) += hns/ +obj-$(CONFIG_HISI_FEMAC) += hisi_femac.o diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c new file mode 100644 index 000000000..b5d7ad025 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -0,0 +1,1007 @@ +/* + * Hisilicon Fast Ethernet MAC Driver + * + * Copyright (c) 2016 HiSilicon Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* MAC control register list */ +#define MAC_PORTSEL 0x0200 +#define MAC_PORTSEL_STAT_CPU BIT(0) +#define MAC_PORTSEL_RMII BIT(1) +#define MAC_PORTSET 0x0208 +#define MAC_PORTSET_DUPLEX_FULL BIT(0) +#define MAC_PORTSET_LINKED BIT(1) +#define MAC_PORTSET_SPEED_100M BIT(2) +#define MAC_SET 0x0210 +#define MAX_FRAME_SIZE 1600 +#define MAX_FRAME_SIZE_MASK GENMASK(10, 0) +#define BIT_PAUSE_EN BIT(18) +#define RX_COALESCE_SET 0x0340 +#define RX_COALESCED_FRAME_OFFSET 24 +#define RX_COALESCED_FRAMES 8 +#define RX_COALESCED_TIMER 0x74 +#define QLEN_SET 0x0344 +#define RX_DEPTH_OFFSET 8 +#define MAX_HW_FIFO_DEPTH 64 +#define HW_TX_FIFO_DEPTH 12 +#define HW_RX_FIFO_DEPTH (MAX_HW_FIFO_DEPTH - HW_TX_FIFO_DEPTH) +#define IQFRM_DES 0x0354 +#define RX_FRAME_LEN_MASK GENMASK(11, 0) +#define IQ_ADDR 0x0358 +#define EQ_ADDR 0x0360 +#define EQFRM_LEN 0x0364 +#define ADDRQ_STAT 0x036C +#define TX_CNT_INUSE_MASK GENMASK(5, 0) +#define BIT_TX_READY BIT(24) +#define BIT_RX_READY BIT(25) +/* global control register list */ +#define GLB_HOSTMAC_L32 0x0000 +#define GLB_HOSTMAC_H16 0x0004 +#define GLB_SOFT_RESET 0x0008 +#define SOFT_RESET_ALL BIT(0) +#define GLB_FWCTRL 0x0010 +#define FWCTRL_VLAN_ENABLE BIT(0) +#define FWCTRL_FW2CPU_ENA BIT(5) +#define FWCTRL_FWALL2CPU BIT(7) +#define GLB_MACTCTRL 0x0014 +#define MACTCTRL_UNI2CPU BIT(1) +#define MACTCTRL_MULTI2CPU BIT(3) +#define MACTCTRL_BROAD2CPU BIT(5) +#define MACTCTRL_MACT_ENA BIT(7) +#define GLB_IRQ_STAT 0x0030 +#define GLB_IRQ_ENA 0x0034 +#define IRQ_ENA_PORT0_MASK GENMASK(7, 0) +#define IRQ_ENA_PORT0 BIT(18) +#define IRQ_ENA_ALL BIT(19) +#define GLB_IRQ_RAW 0x0038 +#define IRQ_INT_RX_RDY BIT(0) +#define IRQ_INT_TX_PER_PACKET BIT(1) +#define IRQ_INT_TX_FIFO_EMPTY BIT(6) +#define IRQ_INT_MULTI_RXRDY BIT(7) +#define DEF_INT_MASK (IRQ_INT_MULTI_RXRDY | \ + IRQ_INT_TX_PER_PACKET | \ + IRQ_INT_TX_FIFO_EMPTY) +#define GLB_MAC_L32_BASE 0x0100 +#define GLB_MAC_H16_BASE 0x0104 +#define MACFLT_HI16_MASK GENMASK(15, 0) +#define BIT_MACFLT_ENA BIT(17) +#define BIT_MACFLT_FW2CPU BIT(21) +#define GLB_MAC_H16(reg) (GLB_MAC_H16_BASE + ((reg) * 0x8)) +#define GLB_MAC_L32(reg) (GLB_MAC_L32_BASE + ((reg) * 0x8)) +#define MAX_MAC_FILTER_NUM 8 +#define MAX_UNICAST_ADDRESSES 2 +#define MAX_MULTICAST_ADDRESSES (MAX_MAC_FILTER_NUM - \ + MAX_UNICAST_ADDRESSES) +/* software tx and rx queue number, should be power of 2 */ +#define TXQ_NUM 64 +#define RXQ_NUM 128 +#define FEMAC_POLL_WEIGHT 16 + +#define PHY_RESET_DELAYS_PROPERTY "hisilicon,phy-reset-delays-us" + +enum phy_reset_delays { + PRE_DELAY, + PULSE, + POST_DELAY, + DELAYS_NUM, +}; + +struct hisi_femac_queue { + struct sk_buff **skb; + dma_addr_t *dma_phys; + int num; + unsigned int head; + unsigned int tail; +}; + +struct hisi_femac_priv { + void __iomem *port_base; + void __iomem *glb_base; + struct clk *clk; + struct reset_control *mac_rst; + struct reset_control *phy_rst; + u32 phy_reset_delays[DELAYS_NUM]; + u32 link_status; + + struct device *dev; + struct net_device *ndev; + + struct hisi_femac_queue txq; + struct hisi_femac_queue rxq; + u32 tx_fifo_used_cnt; + struct napi_struct napi; +}; + +static void hisi_femac_irq_enable(struct hisi_femac_priv *priv, int irqs) +{ + u32 val; + + val = readl(priv->glb_base + GLB_IRQ_ENA); + writel(val | irqs, priv->glb_base + GLB_IRQ_ENA); +} + +static void hisi_femac_irq_disable(struct hisi_femac_priv *priv, int irqs) +{ + u32 val; + + val = readl(priv->glb_base + GLB_IRQ_ENA); + writel(val & (~irqs), priv->glb_base + GLB_IRQ_ENA); +} + +static void hisi_femac_tx_dma_unmap(struct hisi_femac_priv *priv, + struct sk_buff *skb, unsigned int pos) +{ + dma_addr_t dma_addr; + + dma_addr = priv->txq.dma_phys[pos]; + dma_unmap_single(priv->dev, dma_addr, skb->len, DMA_TO_DEVICE); +} + +static void hisi_femac_xmit_reclaim(struct net_device *dev) +{ + struct sk_buff *skb; + struct hisi_femac_priv *priv = netdev_priv(dev); + struct hisi_femac_queue *txq = &priv->txq; + unsigned int bytes_compl = 0, pkts_compl = 0; + u32 val; + + netif_tx_lock(dev); + + val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK; + while (val < priv->tx_fifo_used_cnt) { + skb = txq->skb[txq->tail]; + if (unlikely(!skb)) { + netdev_err(dev, "xmitq_cnt_inuse=%d, tx_fifo_used=%d\n", + val, priv->tx_fifo_used_cnt); + break; + } + hisi_femac_tx_dma_unmap(priv, skb, txq->tail); + pkts_compl++; + bytes_compl += skb->len; + dev_kfree_skb_any(skb); + + priv->tx_fifo_used_cnt--; + + val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK; + txq->skb[txq->tail] = NULL; + txq->tail = (txq->tail + 1) % txq->num; + } + + netdev_completed_queue(dev, pkts_compl, bytes_compl); + + if (unlikely(netif_queue_stopped(dev)) && pkts_compl) + netif_wake_queue(dev); + + netif_tx_unlock(dev); +} + +static void hisi_femac_adjust_link(struct net_device *dev) +{ + struct hisi_femac_priv *priv = netdev_priv(dev); + struct phy_device *phy = dev->phydev; + u32 status = 0; + + if (phy->link) + status |= MAC_PORTSET_LINKED; + if (phy->duplex == DUPLEX_FULL) + status |= MAC_PORTSET_DUPLEX_FULL; + if (phy->speed == SPEED_100) + status |= MAC_PORTSET_SPEED_100M; + + if ((status != priv->link_status) && + ((status | priv->link_status) & MAC_PORTSET_LINKED)) { + writel(status, priv->port_base + MAC_PORTSET); + priv->link_status = status; + phy_print_status(phy); + } +} + +static void hisi_femac_rx_refill(struct hisi_femac_priv *priv) +{ + struct hisi_femac_queue *rxq = &priv->rxq; + struct sk_buff *skb; + u32 pos; + u32 len = MAX_FRAME_SIZE; + dma_addr_t addr; + + pos = rxq->head; + while (readl(priv->port_base + ADDRQ_STAT) & BIT_RX_READY) { + if (!CIRC_SPACE(pos, rxq->tail, rxq->num)) + break; + if (unlikely(rxq->skb[pos])) { + netdev_err(priv->ndev, "err skb[%d]=%p\n", + pos, rxq->skb[pos]); + break; + } + skb = netdev_alloc_skb_ip_align(priv->ndev, len); + if (unlikely(!skb)) + break; + + addr = dma_map_single(priv->dev, skb->data, len, + DMA_FROM_DEVICE); + if (dma_mapping_error(priv->dev, addr)) { + dev_kfree_skb_any(skb); + break; + } + rxq->dma_phys[pos] = addr; + rxq->skb[pos] = skb; + writel(addr, priv->port_base + IQ_ADDR); + pos = (pos + 1) % rxq->num; + } + rxq->head = pos; +} + +static int hisi_femac_rx(struct net_device *dev, int limit) +{ + struct hisi_femac_priv *priv = netdev_priv(dev); + struct hisi_femac_queue *rxq = &priv->rxq; + struct sk_buff *skb; + dma_addr_t addr; + u32 rx_pkt_info, pos, len, rx_pkts_num = 0; + + pos = rxq->tail; + while (readl(priv->glb_base + GLB_IRQ_RAW) & IRQ_INT_RX_RDY) { + rx_pkt_info = readl(priv->port_base + IQFRM_DES); + len = rx_pkt_info & RX_FRAME_LEN_MASK; + len -= ETH_FCS_LEN; + + /* tell hardware we will deal with this packet */ + writel(IRQ_INT_RX_RDY, priv->glb_base + GLB_IRQ_RAW); + + rx_pkts_num++; + + skb = rxq->skb[pos]; + if (unlikely(!skb)) { + netdev_err(dev, "rx skb NULL. pos=%d\n", pos); + break; + } + rxq->skb[pos] = NULL; + + addr = rxq->dma_phys[pos]; + dma_unmap_single(priv->dev, addr, MAX_FRAME_SIZE, + DMA_FROM_DEVICE); + skb_put(skb, len); + if (unlikely(skb->len > MAX_FRAME_SIZE)) { + netdev_err(dev, "rcv len err, len = %d\n", skb->len); + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + dev_kfree_skb_any(skb); + goto next; + } + + skb->protocol = eth_type_trans(skb, dev); + napi_gro_receive(&priv->napi, skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; +next: + pos = (pos + 1) % rxq->num; + if (rx_pkts_num >= limit) + break; + } + rxq->tail = pos; + + hisi_femac_rx_refill(priv); + + return rx_pkts_num; +} + +static int hisi_femac_poll(struct napi_struct *napi, int budget) +{ + struct hisi_femac_priv *priv = container_of(napi, + struct hisi_femac_priv, napi); + struct net_device *dev = priv->ndev; + int work_done = 0, task = budget; + int ints, num; + + do { + hisi_femac_xmit_reclaim(dev); + num = hisi_femac_rx(dev, task); + work_done += num; + task -= num; + if (work_done >= budget) + break; + + ints = readl(priv->glb_base + GLB_IRQ_RAW); + writel(ints & DEF_INT_MASK, + priv->glb_base + GLB_IRQ_RAW); + } while (ints & DEF_INT_MASK); + + if (work_done < budget) { + napi_complete(napi); + hisi_femac_irq_enable(priv, DEF_INT_MASK & + (~IRQ_INT_TX_PER_PACKET)); + } + + return work_done; +} + +static irqreturn_t hisi_femac_interrupt(int irq, void *dev_id) +{ + int ints; + struct net_device *dev = (struct net_device *)dev_id; + struct hisi_femac_priv *priv = netdev_priv(dev); + + ints = readl(priv->glb_base + GLB_IRQ_RAW); + + if (likely(ints & DEF_INT_MASK)) { + writel(ints & DEF_INT_MASK, + priv->glb_base + GLB_IRQ_RAW); + hisi_femac_irq_disable(priv, DEF_INT_MASK); + napi_schedule(&priv->napi); + } + + return IRQ_HANDLED; +} + +static int hisi_femac_init_queue(struct device *dev, + struct hisi_femac_queue *queue, + unsigned int num) +{ + queue->skb = devm_kcalloc(dev, num, sizeof(struct sk_buff *), + GFP_KERNEL); + if (!queue->skb) + return -ENOMEM; + + queue->dma_phys = devm_kcalloc(dev, num, sizeof(dma_addr_t), + GFP_KERNEL); + if (!queue->dma_phys) + return -ENOMEM; + + queue->num = num; + queue->head = 0; + queue->tail = 0; + + return 0; +} + +static int hisi_femac_init_tx_and_rx_queues(struct hisi_femac_priv *priv) +{ + int ret; + + ret = hisi_femac_init_queue(priv->dev, &priv->txq, TXQ_NUM); + if (ret) + return ret; + + ret = hisi_femac_init_queue(priv->dev, &priv->rxq, RXQ_NUM); + if (ret) + return ret; + + priv->tx_fifo_used_cnt = 0; + + return 0; +} + +static void hisi_femac_free_skb_rings(struct hisi_femac_priv *priv) +{ + struct hisi_femac_queue *txq = &priv->txq; + struct hisi_femac_queue *rxq = &priv->rxq; + struct sk_buff *skb; + dma_addr_t dma_addr; + u32 pos; + + pos = rxq->tail; + while (pos != rxq->head) { + skb = rxq->skb[pos]; + if (unlikely(!skb)) { + netdev_err(priv->ndev, "NULL rx skb. pos=%d, head=%d\n", + pos, rxq->head); + continue; + } + + dma_addr = rxq->dma_phys[pos]; + dma_unmap_single(priv->dev, dma_addr, MAX_FRAME_SIZE, + DMA_FROM_DEVICE); + + dev_kfree_skb_any(skb); + rxq->skb[pos] = NULL; + pos = (pos + 1) % rxq->num; + } + rxq->tail = pos; + + pos = txq->tail; + while (pos != txq->head) { + skb = txq->skb[pos]; + if (unlikely(!skb)) { + netdev_err(priv->ndev, "NULL tx skb. pos=%d, head=%d\n", + pos, txq->head); + continue; + } + hisi_femac_tx_dma_unmap(priv, skb, pos); + dev_kfree_skb_any(skb); + txq->skb[pos] = NULL; + pos = (pos + 1) % txq->num; + } + txq->tail = pos; + priv->tx_fifo_used_cnt = 0; +} + +static int hisi_femac_set_hw_mac_addr(struct hisi_femac_priv *priv, + unsigned char *mac) +{ + u32 reg; + + reg = mac[1] | (mac[0] << 8); + writel(reg, priv->glb_base + GLB_HOSTMAC_H16); + + reg = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24); + writel(reg, priv->glb_base + GLB_HOSTMAC_L32); + + return 0; +} + +static int hisi_femac_port_reset(struct hisi_femac_priv *priv) +{ + u32 val; + + val = readl(priv->glb_base + GLB_SOFT_RESET); + val |= SOFT_RESET_ALL; + writel(val, priv->glb_base + GLB_SOFT_RESET); + + usleep_range(500, 800); + + val &= ~SOFT_RESET_ALL; + writel(val, priv->glb_base + GLB_SOFT_RESET); + + return 0; +} + +static int hisi_femac_net_open(struct net_device *dev) +{ + struct hisi_femac_priv *priv = netdev_priv(dev); + + hisi_femac_port_reset(priv); + hisi_femac_set_hw_mac_addr(priv, dev->dev_addr); + hisi_femac_rx_refill(priv); + + netif_carrier_off(dev); + netdev_reset_queue(dev); + netif_start_queue(dev); + napi_enable(&priv->napi); + + priv->link_status = 0; + if (dev->phydev) + phy_start(dev->phydev); + + writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW); + hisi_femac_irq_enable(priv, IRQ_ENA_ALL | IRQ_ENA_PORT0 | DEF_INT_MASK); + + return 0; +} + +static int hisi_femac_net_close(struct net_device *dev) +{ + struct hisi_femac_priv *priv = netdev_priv(dev); + + hisi_femac_irq_disable(priv, IRQ_ENA_PORT0); + + if (dev->phydev) + phy_stop(dev->phydev); + + netif_stop_queue(dev); + napi_disable(&priv->napi); + + hisi_femac_free_skb_rings(priv); + + return 0; +} + +static netdev_tx_t hisi_femac_net_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct hisi_femac_priv *priv = netdev_priv(dev); + struct hisi_femac_queue *txq = &priv->txq; + dma_addr_t addr; + u32 val; + + val = readl(priv->port_base + ADDRQ_STAT); + val &= BIT_TX_READY; + if (!val) { + hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET); + dev->stats.tx_dropped++; + dev->stats.tx_fifo_errors++; + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + + if (unlikely(!CIRC_SPACE(txq->head, txq->tail, + txq->num))) { + hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET); + dev->stats.tx_dropped++; + dev->stats.tx_fifo_errors++; + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + + addr = dma_map_single(priv->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, addr))) { + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + txq->dma_phys[txq->head] = addr; + + txq->skb[txq->head] = skb; + txq->head = (txq->head + 1) % txq->num; + + writel(addr, priv->port_base + EQ_ADDR); + writel(skb->len + ETH_FCS_LEN, priv->port_base + EQFRM_LEN); + + priv->tx_fifo_used_cnt++; + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + netdev_sent_queue(dev, skb->len); + + return NETDEV_TX_OK; +} + +static int hisi_femac_set_mac_address(struct net_device *dev, void *p) +{ + struct hisi_femac_priv *priv = netdev_priv(dev); + struct sockaddr *skaddr = p; + + if (!is_valid_ether_addr(skaddr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, skaddr->sa_data, dev->addr_len); + dev->addr_assign_type &= ~NET_ADDR_RANDOM; + + hisi_femac_set_hw_mac_addr(priv, dev->dev_addr); + + return 0; +} + +static void hisi_femac_enable_hw_addr_filter(struct hisi_femac_priv *priv, + unsigned int reg_n, bool enable) +{ + u32 val; + + val = readl(priv->glb_base + GLB_MAC_H16(reg_n)); + if (enable) + val |= BIT_MACFLT_ENA; + else + val &= ~BIT_MACFLT_ENA; + writel(val, priv->glb_base + GLB_MAC_H16(reg_n)); +} + +static void hisi_femac_set_hw_addr_filter(struct hisi_femac_priv *priv, + unsigned char *addr, + unsigned int reg_n) +{ + unsigned int high, low; + u32 val; + + high = GLB_MAC_H16(reg_n); + low = GLB_MAC_L32(reg_n); + + val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]; + writel(val, priv->glb_base + low); + + val = readl(priv->glb_base + high); + val &= ~MACFLT_HI16_MASK; + val |= ((addr[0] << 8) | addr[1]); + val |= (BIT_MACFLT_ENA | BIT_MACFLT_FW2CPU); + writel(val, priv->glb_base + high); +} + +static void hisi_femac_set_promisc_mode(struct hisi_femac_priv *priv, + bool promisc_mode) +{ + u32 val; + + val = readl(priv->glb_base + GLB_FWCTRL); + if (promisc_mode) + val |= FWCTRL_FWALL2CPU; + else + val &= ~FWCTRL_FWALL2CPU; + writel(val, priv->glb_base + GLB_FWCTRL); +} + +/* Handle multiple multicast addresses (perfect filtering)*/ +static void hisi_femac_set_mc_addr_filter(struct hisi_femac_priv *priv) +{ + struct net_device *dev = priv->ndev; + u32 val; + + val = readl(priv->glb_base + GLB_MACTCTRL); + if ((netdev_mc_count(dev) > MAX_MULTICAST_ADDRESSES) || + (dev->flags & IFF_ALLMULTI)) { + val |= MACTCTRL_MULTI2CPU; + } else { + int reg = MAX_UNICAST_ADDRESSES; + int i; + struct netdev_hw_addr *ha; + + for (i = reg; i < MAX_MAC_FILTER_NUM; i++) + hisi_femac_enable_hw_addr_filter(priv, i, false); + + netdev_for_each_mc_addr(ha, dev) { + hisi_femac_set_hw_addr_filter(priv, ha->addr, reg); + reg++; + } + val &= ~MACTCTRL_MULTI2CPU; + } + writel(val, priv->glb_base + GLB_MACTCTRL); +} + +/* Handle multiple unicast addresses (perfect filtering)*/ +static void hisi_femac_set_uc_addr_filter(struct hisi_femac_priv *priv) +{ + struct net_device *dev = priv->ndev; + u32 val; + + val = readl(priv->glb_base + GLB_MACTCTRL); + if (netdev_uc_count(dev) > MAX_UNICAST_ADDRESSES) { + val |= MACTCTRL_UNI2CPU; + } else { + int reg = 0; + int i; + struct netdev_hw_addr *ha; + + for (i = reg; i < MAX_UNICAST_ADDRESSES; i++) + hisi_femac_enable_hw_addr_filter(priv, i, false); + + netdev_for_each_uc_addr(ha, dev) { + hisi_femac_set_hw_addr_filter(priv, ha->addr, reg); + reg++; + } + val &= ~MACTCTRL_UNI2CPU; + } + writel(val, priv->glb_base + GLB_MACTCTRL); +} + +static void hisi_femac_net_set_rx_mode(struct net_device *dev) +{ + struct hisi_femac_priv *priv = netdev_priv(dev); + + if (dev->flags & IFF_PROMISC) { + hisi_femac_set_promisc_mode(priv, true); + } else { + hisi_femac_set_promisc_mode(priv, false); + hisi_femac_set_mc_addr_filter(priv); + hisi_femac_set_uc_addr_filter(priv); + } +} + +static int hisi_femac_net_ioctl(struct net_device *dev, + struct ifreq *ifreq, int cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -EINVAL; + + return phy_mii_ioctl(dev->phydev, ifreq, cmd); +} + +static struct ethtool_ops hisi_femac_ethtools_ops = { + .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + +static const struct net_device_ops hisi_femac_netdev_ops = { + .ndo_open = hisi_femac_net_open, + .ndo_stop = hisi_femac_net_close, + .ndo_start_xmit = hisi_femac_net_xmit, + .ndo_do_ioctl = hisi_femac_net_ioctl, + .ndo_set_mac_address = hisi_femac_set_mac_address, + .ndo_set_rx_mode = hisi_femac_net_set_rx_mode, + .ndo_change_mtu = eth_change_mtu, +}; + +static void hisi_femac_core_reset(struct hisi_femac_priv *priv) +{ + reset_control_assert(priv->mac_rst); + reset_control_deassert(priv->mac_rst); +} + +static void hisi_femac_sleep_us(u32 time_us) +{ + u32 time_ms; + + if (!time_us) + return; + + time_ms = DIV_ROUND_UP(time_us, 1000); + if (time_ms < 20) + usleep_range(time_us, time_us + 500); + else + msleep(time_ms); +} + +static void hisi_femac_phy_reset(struct hisi_femac_priv *priv) +{ + /* To make sure PHY hardware reset success, + * we must keep PHY in deassert state first and + * then complete the hardware reset operation + */ + reset_control_deassert(priv->phy_rst); + hisi_femac_sleep_us(priv->phy_reset_delays[PRE_DELAY]); + + reset_control_assert(priv->phy_rst); + /* delay some time to ensure reset ok, + * this depends on PHY hardware feature + */ + hisi_femac_sleep_us(priv->phy_reset_delays[PULSE]); + reset_control_deassert(priv->phy_rst); + /* delay some time to ensure later MDIO access */ + hisi_femac_sleep_us(priv->phy_reset_delays[POST_DELAY]); +} + +static void hisi_femac_port_init(struct hisi_femac_priv *priv) +{ + u32 val; + + /* MAC gets link status info and phy mode by software config */ + val = MAC_PORTSEL_STAT_CPU; + if (priv->ndev->phydev->interface == PHY_INTERFACE_MODE_RMII) + val |= MAC_PORTSEL_RMII; + writel(val, priv->port_base + MAC_PORTSEL); + + /*clear all interrupt status */ + writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW); + hisi_femac_irq_disable(priv, IRQ_ENA_PORT0_MASK | IRQ_ENA_PORT0); + + val = readl(priv->glb_base + GLB_FWCTRL); + val &= ~(FWCTRL_VLAN_ENABLE | FWCTRL_FWALL2CPU); + val |= FWCTRL_FW2CPU_ENA; + writel(val, priv->glb_base + GLB_FWCTRL); + + val = readl(priv->glb_base + GLB_MACTCTRL); + val |= (MACTCTRL_BROAD2CPU | MACTCTRL_MACT_ENA); + writel(val, priv->glb_base + GLB_MACTCTRL); + + val = readl(priv->port_base + MAC_SET); + val &= ~MAX_FRAME_SIZE_MASK; + val |= MAX_FRAME_SIZE; + writel(val, priv->port_base + MAC_SET); + + val = RX_COALESCED_TIMER | + (RX_COALESCED_FRAMES << RX_COALESCED_FRAME_OFFSET); + writel(val, priv->port_base + RX_COALESCE_SET); + + val = (HW_RX_FIFO_DEPTH << RX_DEPTH_OFFSET) | HW_TX_FIFO_DEPTH; + writel(val, priv->port_base + QLEN_SET); +} + +static int hisi_femac_drv_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct resource *res; + struct net_device *ndev; + struct hisi_femac_priv *priv; + struct phy_device *phy; + const char *mac_addr; + int ret; + + ndev = alloc_etherdev(sizeof(*priv)); + if (!ndev) + return -ENOMEM; + + platform_set_drvdata(pdev, ndev); + + priv = netdev_priv(ndev); + priv->dev = dev; + priv->ndev = ndev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->port_base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->port_base)) { + ret = PTR_ERR(priv->port_base); + goto out_free_netdev; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->glb_base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->glb_base)) { + ret = PTR_ERR(priv->glb_base); + goto out_free_netdev; + } + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(dev, "failed to get clk\n"); + ret = -ENODEV; + goto out_free_netdev; + } + + ret = clk_prepare_enable(priv->clk); + if (ret) { + dev_err(dev, "failed to enable clk %d\n", ret); + goto out_free_netdev; + } + + priv->mac_rst = devm_reset_control_get(dev, "mac"); + if (IS_ERR(priv->mac_rst)) { + ret = PTR_ERR(priv->mac_rst); + goto out_disable_clk; + } + hisi_femac_core_reset(priv); + + priv->phy_rst = devm_reset_control_get(dev, "phy"); + if (IS_ERR(priv->phy_rst)) { + priv->phy_rst = NULL; + } else { + ret = of_property_read_u32_array(node, + PHY_RESET_DELAYS_PROPERTY, + priv->phy_reset_delays, + DELAYS_NUM); + if (ret) + goto out_disable_clk; + hisi_femac_phy_reset(priv); + } + + phy = of_phy_get_and_connect(ndev, node, hisi_femac_adjust_link); + if (!phy) { + dev_err(dev, "connect to PHY failed!\n"); + ret = -ENODEV; + goto out_disable_clk; + } + + phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n", + (unsigned long)phy->phy_id, + phy_modes(phy->interface)); + + mac_addr = of_get_mac_address(node); + if (mac_addr) + ether_addr_copy(ndev->dev_addr, mac_addr); + if (!is_valid_ether_addr(ndev->dev_addr)) { + eth_hw_addr_random(ndev); + dev_warn(dev, "using random MAC address %pM\n", + ndev->dev_addr); + } + + ndev->watchdog_timeo = 6 * HZ; + ndev->priv_flags |= IFF_UNICAST_FLT; + ndev->netdev_ops = &hisi_femac_netdev_ops; + ndev->ethtool_ops = &hisi_femac_ethtools_ops; + netif_napi_add(ndev, &priv->napi, hisi_femac_poll, FEMAC_POLL_WEIGHT); + SET_NETDEV_DEV(ndev, &pdev->dev); + + hisi_femac_port_init(priv); + + ret = hisi_femac_init_tx_and_rx_queues(priv); + if (ret) + goto out_disconnect_phy; + + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq <= 0) { + dev_err(dev, "No irq resource\n"); + ret = -ENODEV; + goto out_disconnect_phy; + } + + ret = devm_request_irq(dev, ndev->irq, hisi_femac_interrupt, + IRQF_SHARED, pdev->name, ndev); + if (ret) { + dev_err(dev, "devm_request_irq %d failed!\n", ndev->irq); + goto out_disconnect_phy; + } + + ret = register_netdev(ndev); + if (ret) { + dev_err(dev, "register_netdev failed!\n"); + goto out_disconnect_phy; + } + + return ret; + +out_disconnect_phy: + netif_napi_del(&priv->napi); + phy_disconnect(phy); +out_disable_clk: + clk_disable_unprepare(priv->clk); +out_free_netdev: + free_netdev(ndev); + + return ret; +} + +static int hisi_femac_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct hisi_femac_priv *priv = netdev_priv(ndev); + + netif_napi_del(&priv->napi); + unregister_netdev(ndev); + + phy_disconnect(ndev->phydev); + clk_disable_unprepare(priv->clk); + free_netdev(ndev); + + return 0; +} + +#ifdef CONFIG_PM +int hisi_femac_drv_suspend(struct platform_device *pdev, + pm_message_t state) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct hisi_femac_priv *priv = netdev_priv(ndev); + + disable_irq(ndev->irq); + if (netif_running(ndev)) { + hisi_femac_net_close(ndev); + netif_device_detach(ndev); + } + + clk_disable_unprepare(priv->clk); + + return 0; +} + +int hisi_femac_drv_resume(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct hisi_femac_priv *priv = netdev_priv(ndev); + + clk_prepare_enable(priv->clk); + if (priv->phy_rst) + hisi_femac_phy_reset(priv); + + if (netif_running(ndev)) { + hisi_femac_port_init(priv); + hisi_femac_net_open(ndev); + netif_device_attach(ndev); + } + enable_irq(ndev->irq); + + return 0; +} +#endif + +static const struct of_device_id hisi_femac_match[] = { + {.compatible = "hisilicon,hisi-femac-v1",}, + {.compatible = "hisilicon,hisi-femac-v2",}, + {.compatible = "hisilicon,hi3516cv300-femac",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, hisi_femac_match); + +static struct platform_driver hisi_femac_driver = { + .driver = { + .name = "hisi-femac", + .of_match_table = hisi_femac_match, + }, + .probe = hisi_femac_drv_probe, + .remove = hisi_femac_drv_remove, +#ifdef CONFIG_PM + .suspend = hisi_femac_drv_suspend, + .resume = hisi_femac_drv_resume, +#endif +}; + +module_platform_driver(hisi_femac_driver); + +MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC driver"); +MODULE_AUTHOR("Dongpo Li "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:hisi-femac"); diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index b9f2ea593..275618bb4 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -218,7 +218,6 @@ struct hix5hd2_priv { struct device *dev; struct net_device *netdev; - struct phy_device *phy; struct device_node *phy_node; phy_interface_t phy_mode; @@ -402,7 +401,7 @@ static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p) static void hix5hd2_adjust_link(struct net_device *dev) { struct hix5hd2_priv *priv = netdev_priv(dev); - struct phy_device *phy = priv->phy; + struct phy_device *phy = dev->phydev; if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) { hix5hd2_config_port(dev, phy->speed, phy->duplex); @@ -679,6 +678,7 @@ static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv *priv) static int hix5hd2_net_open(struct net_device *dev) { struct hix5hd2_priv *priv = netdev_priv(dev); + struct phy_device *phy; int ret; ret = clk_prepare_enable(priv->clk); @@ -687,12 +687,12 @@ static int hix5hd2_net_open(struct net_device *dev) return ret; } - priv->phy = of_phy_connect(dev, priv->phy_node, - &hix5hd2_adjust_link, 0, priv->phy_mode); - if (!priv->phy) + phy = of_phy_connect(dev, priv->phy_node, + &hix5hd2_adjust_link, 0, priv->phy_mode); + if (!phy) return -ENODEV; - phy_start(priv->phy); + phy_start(phy); hix5hd2_hw_init(priv); hix5hd2_rx_refill(priv); @@ -716,9 +716,9 @@ static int hix5hd2_net_close(struct net_device *dev) netif_stop_queue(dev); hix5hd2_free_dma_desc_rings(priv); - if (priv->phy) { - phy_stop(priv->phy); - phy_disconnect(priv->phy); + if (dev->phydev) { + phy_stop(dev->phydev); + phy_disconnect(dev->phydev); } clk_disable_unprepare(priv->clk); @@ -750,32 +750,10 @@ static const struct net_device_ops hix5hd2_netdev_ops = { .ndo_set_mac_address = hix5hd2_net_set_mac_address, }; -static int hix5hd2_get_settings(struct net_device *net_dev, - struct ethtool_cmd *cmd) -{ - struct hix5hd2_priv *priv = netdev_priv(net_dev); - - if (!priv->phy) - return -ENODEV; - - return phy_ethtool_gset(priv->phy, cmd); -} - -static int hix5hd2_set_settings(struct net_device *net_dev, - struct ethtool_cmd *cmd) -{ - struct hix5hd2_priv *priv = netdev_priv(net_dev); - - if (!priv->phy) - return -ENODEV; - - return phy_ethtool_sset(priv->phy, cmd); -} - static struct ethtool_ops hix5hd2_ethtools_ops = { .get_link = ethtool_op_get_link, - .get_settings = hix5hd2_get_settings, - .set_settings = hix5hd2_set_settings, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int hix5hd2_mdio_wait_ready(struct mii_bus *bus) diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 3bfe36f94..c54c6fac0 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -96,16 +96,22 @@ static int __ae_match(struct device *dev, const void *data) { struct hnae_ae_dev *hdev = cls_to_ae_dev(dev); - return hdev->dev->of_node == data; + if (dev_of_node(hdev->dev)) + return (data == &hdev->dev->of_node->fwnode); + else if (is_acpi_node(hdev->dev->fwnode)) + return (data == hdev->dev->fwnode); + + dev_err(dev, "__ae_match cannot read cfg data from OF or acpi\n"); + return 0; } -static struct hnae_ae_dev *find_ae(const struct device_node *ae_node) +static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode) { struct device *dev; - WARN_ON(!ae_node); + WARN_ON(!fwnode); - dev = class_find_device(hnae_class, NULL, ae_node, __ae_match); + dev = class_find_device(hnae_class, NULL, fwnode, __ae_match); return dev ? cls_to_ae_dev(dev) : NULL; } @@ -312,7 +318,7 @@ EXPORT_SYMBOL(hnae_reinit_handle); * return handle ptr or ERR_PTR */ struct hnae_handle *hnae_get_handle(struct device *owner_dev, - const struct device_node *ae_node, + const struct fwnode_handle *fwnode, u32 port_id, struct hnae_buf_ops *bops) { @@ -321,7 +327,7 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev, int i, j; int ret; - dev = find_ae(ae_node); + dev = find_ae(fwnode); if (!dev) return ERR_PTR(-ENODEV); @@ -394,7 +400,6 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner) if (!hdev->ops || !hdev->ops->get_handle || !hdev->ops->toggle_ring_irq || - !hdev->ops->toggle_queue_status || !hdev->ops->get_status || !hdev->ops->adjust_link) return -EINVAL; diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index e8d36aaea..e093cbf26 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -27,6 +27,7 @@ * "cb" means control block */ +#include #include #include #include @@ -362,6 +363,14 @@ enum hnae_port_type { HNAE_PORT_DEBUG }; +/* mac media type */ +enum hnae_media_type { + HNAE_MEDIA_TYPE_UNKNOWN = 0, + HNAE_MEDIA_TYPE_FIBER, + HNAE_MEDIA_TYPE_COPPER, + HNAE_MEDIA_TYPE_BACKPLANE, +}; + /* This struct defines the operation on the handle. * * get_handle(): (mandatory) @@ -453,7 +462,6 @@ struct hnae_ae_ops { int (*get_info)(struct hnae_handle *handle, u8 *auto_neg, u16 *speed, u8 *duplex); void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val); - void (*toggle_queue_status)(struct hnae_queue *queue, u32 val); void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex); int (*set_loopback)(struct hnae_handle *handle, enum hnae_loop loop_mode, int en); @@ -472,6 +480,11 @@ struct hnae_ae_ops { int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout); int (*set_coalesce_frames)(struct hnae_handle *handle, u32 coalesce_frames); + void (*get_coalesce_range)(struct hnae_handle *handle, + u32 *tx_frames_low, u32 *rx_frames_low, + u32 *tx_frames_high, u32 *rx_frames_high, + u32 *tx_usecs_low, u32 *rx_usecs_low, + u32 *tx_usecs_high, u32 *rx_usecs_high); void (*set_promisc_mode)(struct hnae_handle *handle, u32 en); int (*get_mac_addr)(struct hnae_handle *handle, void **p); int (*set_mac_addr)(struct hnae_handle *handle, void *p); @@ -512,7 +525,7 @@ struct hnae_ae_dev { struct hnae_handle { struct device *owner_dev; /* the device which make use of this handle */ struct hnae_ae_dev *dev; /* the device who provides this handle */ - struct device_node *phy_node; + struct phy_device *phy_dev; phy_interface_t phy_if; u32 if_support; int q_num; @@ -520,6 +533,7 @@ struct hnae_handle { u32 eport_id; u32 dport_id; /* v2 tx bd should fill the dport_id */ enum hnae_port_type port_type; + enum hnae_media_type media_type; struct list_head node; /* list to hnae_ae_dev->handle_list */ struct hnae_buf_ops *bops; /* operation for the buffer */ struct hnae_queue **qs; /* array base of all queues */ @@ -528,7 +542,7 @@ struct hnae_handle { #define ring_to_dev(ring) ((ring)->q->dev->dev) struct hnae_handle *hnae_get_handle(struct device *owner_dev, - const struct device_node *ae_node, + const struct fwnode_handle *fwnode, u32 port_id, struct hnae_buf_ops *bops); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index 7a757e88c..e28d96099 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -131,9 +131,10 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, vf_cb->mac_cb = dsaf_dev->mac_cb[port_id]; ae_handle->phy_if = vf_cb->mac_cb->phy_if; - ae_handle->phy_node = vf_cb->mac_cb->phy_node; + ae_handle->phy_dev = vf_cb->mac_cb->phy_dev; ae_handle->if_support = vf_cb->mac_cb->if_support; ae_handle->port_type = vf_cb->mac_cb->mac_type; + ae_handle->media_type = vf_cb->mac_cb->media_type; ae_handle->dport_id = port_id; return ae_handle; @@ -247,12 +248,21 @@ static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable) static int hns_ae_start(struct hnae_handle *handle) { int ret; + int k; struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); ret = hns_mac_vm_config_bc_en(mac_cb, 0, true); if (ret) return ret; + for (k = 0; k < handle->q_num; k++) { + if (AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) + hns_rcb_int_clr_hw(handle->qs[k], + RCB_INT_FLAG_TX | RCB_INT_FLAG_RX); + else + hns_rcbv2_int_clr_hw(handle->qs[k], + RCB_INT_FLAG_TX | RCB_INT_FLAG_RX); + } hns_ae_ring_enable_all(handle, 1); msleep(100); @@ -313,18 +323,6 @@ static void hns_aev2_toggle_ring_irq(struct hnae_ring *ring, u32 mask) hns_rcbv2_int_ctrl_hw(ring->q, flag, mask); } -static void hns_ae_toggle_queue_status(struct hnae_queue *queue, u32 val) -{ - struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(queue->dev); - - if (AE_IS_VER1(dsaf_dev->dsaf_ver)) - hns_rcb_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX); - else - hns_rcbv2_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX); - - hns_rcb_start(queue, val); -} - static int hns_ae_get_link_status(struct hnae_handle *handle) { u32 link_status; @@ -465,6 +463,30 @@ static int hns_ae_set_coalesce_frames(struct hnae_handle *handle, ring_pair->port_id_in_comm, coalesce_frames); } +static void hns_ae_get_coalesce_range(struct hnae_handle *handle, + u32 *tx_frames_low, u32 *rx_frames_low, + u32 *tx_frames_high, u32 *rx_frames_high, + u32 *tx_usecs_low, u32 *rx_usecs_low, + u32 *tx_usecs_high, u32 *rx_usecs_high) +{ + struct dsaf_device *dsaf_dev; + + dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); + + *tx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES; + *rx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES; + *tx_frames_high = + (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ? + HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1; + *rx_frames_high = + (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ? + HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1; + *tx_usecs_low = 0; + *rx_usecs_low = 0; + *tx_usecs_high = HNS_RCB_MAX_COALESCED_USECS; + *rx_usecs_high = HNS_RCB_MAX_COALESCED_USECS; +} + void hns_ae_update_stats(struct hnae_handle *handle, struct net_device_stats *net_stats) { @@ -587,6 +609,7 @@ void hns_ae_get_strings(struct hnae_handle *handle, int idx; struct hns_mac_cb *mac_cb; struct hns_ppe_cb *ppe_cb; + struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); u8 *p = data; struct hnae_vf_cb *vf_cb; @@ -609,13 +632,14 @@ void hns_ae_get_strings(struct hnae_handle *handle, p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset); if (mac_cb->mac_type == HNAE_PORT_SERVICE) - hns_dsaf_get_strings(stringset, p, port); + hns_dsaf_get_strings(stringset, p, port, dsaf_dev); } int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset) { u32 sset_count = 0; struct hns_mac_cb *mac_cb; + struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); assert(handle); @@ -626,7 +650,7 @@ int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset) sset_count += hns_mac_get_sset_count(mac_cb, stringset); if (mac_cb->mac_type == HNAE_PORT_SERVICE) - sset_count += hns_dsaf_get_sset_count(stringset); + sset_count += hns_dsaf_get_sset_count(dsaf_dev, stringset); return sset_count; } @@ -637,13 +661,15 @@ static int hns_ae_config_loopback(struct hnae_handle *handle, int ret; struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); + struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; switch (loop) { case MAC_INTERNALLOOP_PHY: ret = 0; break; case MAC_INTERNALLOOP_SERDES: - ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en); + ret = dsaf_dev->misc_op->cfg_serdes_loopback(vf_cb->mac_cb, + !!en); break; case MAC_INTERNALLOOP_MAC: ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en); @@ -780,7 +806,6 @@ static struct hnae_ae_ops hns_dsaf_ops = { .stop = hns_ae_stop, .reset = hns_ae_reset, .toggle_ring_irq = hns_ae_toggle_ring_irq, - .toggle_queue_status = hns_ae_toggle_queue_status, .get_status = hns_ae_get_link_status, .get_info = hns_ae_get_mac_info, .adjust_link = hns_ae_adjust_link, @@ -794,6 +819,7 @@ static struct hnae_ae_ops hns_dsaf_ops = { .get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames, .set_coalesce_usecs = hns_ae_set_coalesce_usecs, .set_coalesce_frames = hns_ae_set_coalesce_frames, + .get_coalesce_range = hns_ae_get_coalesce_range, .set_promisc_mode = hns_ae_set_promisc_mode, .set_mac_addr = hns_ae_set_mac_address, .set_mc_addr = hns_ae_set_multicast_one, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 44abb08de..1e1eb9299 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -17,7 +17,7 @@ static const struct mac_stats_string g_gmac_stats_string[] = { {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)}, {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)}, {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, - {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, + {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)}, {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, @@ -110,7 +110,7 @@ static void hns_gmac_free(void *mac_drv) u32 mac_id = drv->mac_id; - hns_dsaf_ge_srst_by_port(dsaf_dev, mac_id, 0); + dsaf_dev->misc_op->ge_srst(dsaf_dev, mac_id, 0); } static void hns_gmac_set_tx_auto_pause_frames(void *mac_drv, u16 newval) @@ -317,9 +317,9 @@ static void hns_gmac_init(void *mac_drv) port = drv->mac_id; - hns_dsaf_ge_srst_by_port(dsaf_dev, port, 0); + dsaf_dev->misc_op->ge_srst(dsaf_dev, port, 0); mdelay(10); - hns_dsaf_ge_srst_by_port(dsaf_dev, port, 1); + dsaf_dev->misc_op->ge_srst(dsaf_dev, port, 1); mdelay(10); hns_gmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX); hns_gmac_tx_loop_pkt_dis(mac_drv); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 611581fcc..5c8afe1a5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -7,6 +7,7 @@ * (at your option) any later version. */ +#include #include #include #include @@ -15,7 +16,8 @@ #include #include #include -#include +#include +#include #include #include "hns_dsaf_main.h" @@ -54,20 +56,6 @@ static const enum mac_mode g_mac_mode_1000[] = { [PHY_INTERFACE_MODE_RTBI] = MAC_MODE_RTBI_1000 }; -static enum mac_mode hns_mac_dev_to_enet_if(const struct hns_mac_cb *mac_cb) -{ - switch (mac_cb->max_speed) { - case MAC_SPEED_100: - return g_mac_mode_100[mac_cb->phy_if]; - case MAC_SPEED_1000: - return g_mac_mode_1000[mac_cb->phy_if]; - case MAC_SPEED_10000: - return MAC_MODE_XGMII_10000; - default: - return MAC_MODE_MII_100; - } -} - static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb) { switch (mac_cb->max_speed) { @@ -94,7 +82,7 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status) else *link_status = 0; - ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt); + ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, &sfp_prsnt); if (!ret) *link_status = *link_status && sfp_prsnt; @@ -132,7 +120,6 @@ void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) mac_cb->speed = speed; mac_cb->half_duplex = !duplex; - mac_ctrl_drv->mac_mode = hns_mac_dev_to_enet_if(mac_cb); if (mac_ctrl_drv->adjust_link) { ret = mac_ctrl_drv->adjust_link(mac_ctrl_drv, @@ -511,7 +498,7 @@ void hns_mac_stop(struct hns_mac_cb *mac_cb) mac_ctrl_drv->mac_en_flg = 0; mac_cb->link = 0; - cpld_led_reset(mac_cb); + mac_cb->dsaf_dev->misc_op->cpld_reset_led(mac_cb); } /** @@ -637,6 +624,127 @@ free_mac_drv: return ret; } +static int +hns_mac_phy_parse_addr(struct device *dev, struct fwnode_handle *fwnode) +{ + u32 addr; + int ret; + + ret = fwnode_property_read_u32(fwnode, "phy-addr", &addr); + if (ret) { + dev_err(dev, "has invalid PHY address ret:%d\n", ret); + return ret; + } + + if (addr >= PHY_MAX_ADDR) { + dev_err(dev, "PHY address %i is too large\n", addr); + return -EINVAL; + } + + return addr; +} + +static int hns_mac_phydev_match(struct device *dev, void *fwnode) +{ + return dev->fwnode == fwnode; +} + +static struct +platform_device *hns_mac_find_platform_device(struct fwnode_handle *fwnode) +{ + struct device *dev; + + dev = bus_find_device(&platform_bus_type, NULL, + fwnode, hns_mac_phydev_match); + return dev ? to_platform_device(dev) : NULL; +} + +static int +hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb, + u32 addr) +{ + struct phy_device *phy; + const char *phy_type; + bool is_c45; + int rc; + + rc = fwnode_property_read_string(mac_cb->fw_port, + "phy-mode", &phy_type); + if (rc < 0) + return rc; + + if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_XGMII))) + is_c45 = 1; + else if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_SGMII))) + is_c45 = 0; + else + return -ENODATA; + + phy = get_phy_device(mdio, addr, is_c45); + if (!phy || IS_ERR(phy)) + return -EIO; + + if (mdio->irq) + phy->irq = mdio->irq[addr]; + + /* All data is now stored in the phy struct; + * register it + */ + rc = phy_device_register(phy); + if (rc) { + phy_device_free(phy); + return -ENODEV; + } + + mac_cb->phy_dev = phy; + + dev_dbg(&mdio->dev, "registered phy at address %i\n", addr); + + return 0; +} + +static void hns_mac_register_phy(struct hns_mac_cb *mac_cb) +{ + struct acpi_reference_args args; + struct platform_device *pdev; + struct mii_bus *mii_bus; + int rc; + int addr; + + /* Loop over the child nodes and register a phy_device for each one */ + if (!to_acpi_device_node(mac_cb->fw_port)) + return; + + rc = acpi_node_get_property_reference( + mac_cb->fw_port, "mdio-node", 0, &args); + if (rc) + return; + + addr = hns_mac_phy_parse_addr(mac_cb->dev, mac_cb->fw_port); + if (addr < 0) + return; + + /* dev address in adev */ + pdev = hns_mac_find_platform_device(acpi_fwnode_handle(args.adev)); + mii_bus = platform_get_drvdata(pdev); + rc = hns_mac_register_phydev(mii_bus, mac_cb, addr); + if (!rc) + dev_dbg(mac_cb->dev, "mac%d register phy addr:%d\n", + mac_cb->mac_id, addr); +} + +#define MAC_MEDIA_TYPE_MAX_LEN 16 + +static const struct { + enum hnae_media_type value; + const char *name; +} media_type_defs[] = { + {HNAE_MEDIA_TYPE_UNKNOWN, "unknown" }, + {HNAE_MEDIA_TYPE_FIBER, "fiber" }, + {HNAE_MEDIA_TYPE_COPPER, "copper" }, + {HNAE_MEDIA_TYPE_BACKPLANE, "backplane" }, +}; + /** *hns_mac_get_info - get mac information from device node *@mac_cb: mac device @@ -645,13 +753,16 @@ free_mac_drv: */ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) { - struct device_node *np = mac_cb->dev->of_node; + struct device_node *np; struct regmap *syscon; struct of_phandle_args cpld_args; + const char *media_type; + u32 i; u32 ret; mac_cb->link = false; mac_cb->half_duplex = false; + mac_cb->media_type = HNAE_MEDIA_TYPE_UNKNOWN; mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if]; mac_cb->max_speed = mac_cb->speed; @@ -672,62 +783,98 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) * from dsaf node */ if (!mac_cb->fw_port) { - mac_cb->phy_node = of_parse_phandle(np, "phy-handle", - mac_cb->mac_id); - if (mac_cb->phy_node) + np = of_parse_phandle(mac_cb->dev->of_node, "phy-handle", + mac_cb->mac_id); + mac_cb->phy_dev = of_phy_find_device(np); + if (mac_cb->phy_dev) { + /* refcount is held by of_phy_find_device() + * if the phy_dev is found + */ + put_device(&mac_cb->phy_dev->mdio.dev); + dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", - mac_cb->mac_id, mac_cb->phy_node->name); - return 0; - } - if (!is_of_node(mac_cb->fw_port)) - return -EINVAL; - /* parse property from port subnode in dsaf */ - mac_cb->phy_node = of_parse_phandle(to_of_node(mac_cb->fw_port), - "phy-handle", 0); - if (mac_cb->phy_node) - dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", - mac_cb->mac_id, mac_cb->phy_node->name); - syscon = syscon_node_to_regmap( - of_parse_phandle(to_of_node(mac_cb->fw_port), - "serdes-syscon", 0)); - if (IS_ERR_OR_NULL(syscon)) { - dev_err(mac_cb->dev, "serdes-syscon is needed!\n"); - return -EINVAL; - } - mac_cb->serdes_ctrl = syscon; + mac_cb->mac_id, np->name); + } + of_node_put(np); - ret = fwnode_property_read_u32(mac_cb->fw_port, - "port-rst-offset", - &mac_cb->port_rst_off); - if (ret) { - dev_dbg(mac_cb->dev, - "mac%d port-rst-offset not found, use default value.\n", - mac_cb->mac_id); + return 0; } - ret = fwnode_property_read_u32(mac_cb->fw_port, - "port-mode-offset", - &mac_cb->port_mode_off); - if (ret) { - dev_dbg(mac_cb->dev, - "mac%d port-mode-offset not found, use default value.\n", - mac_cb->mac_id); - } + if (is_of_node(mac_cb->fw_port)) { + /* parse property from port subnode in dsaf */ + np = of_parse_phandle(to_of_node(mac_cb->fw_port), + "phy-handle", 0); + mac_cb->phy_dev = of_phy_find_device(np); + if (mac_cb->phy_dev) { + /* refcount is held by of_phy_find_device() + * if the phy_dev is found + */ + put_device(&mac_cb->phy_dev->mdio.dev); + dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", + mac_cb->mac_id, np->name); + } + of_node_put(np); - ret = of_parse_phandle_with_fixed_args(to_of_node(mac_cb->fw_port), - "cpld-syscon", 1, 0, &cpld_args); - if (ret) { - dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n", - mac_cb->mac_id); - mac_cb->cpld_ctrl = NULL; - } else { - syscon = syscon_node_to_regmap(cpld_args.np); + np = of_parse_phandle(to_of_node(mac_cb->fw_port), + "serdes-syscon", 0); + syscon = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR_OR_NULL(syscon)) { - dev_dbg(mac_cb->dev, "no cpld-syscon found!\n"); + dev_err(mac_cb->dev, "serdes-syscon is needed!\n"); + return -EINVAL; + } + mac_cb->serdes_ctrl = syscon; + + ret = fwnode_property_read_u32(mac_cb->fw_port, + "port-rst-offset", + &mac_cb->port_rst_off); + if (ret) { + dev_dbg(mac_cb->dev, + "mac%d port-rst-offset not found, use default value.\n", + mac_cb->mac_id); + } + + ret = fwnode_property_read_u32(mac_cb->fw_port, + "port-mode-offset", + &mac_cb->port_mode_off); + if (ret) { + dev_dbg(mac_cb->dev, + "mac%d port-mode-offset not found, use default value.\n", + mac_cb->mac_id); + } + + ret = of_parse_phandle_with_fixed_args( + to_of_node(mac_cb->fw_port), "cpld-syscon", 1, 0, + &cpld_args); + if (ret) { + dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n", + mac_cb->mac_id); mac_cb->cpld_ctrl = NULL; } else { - mac_cb->cpld_ctrl = syscon; - mac_cb->cpld_ctrl_reg = cpld_args.args[0]; + syscon = syscon_node_to_regmap(cpld_args.np); + if (IS_ERR_OR_NULL(syscon)) { + dev_dbg(mac_cb->dev, "no cpld-syscon found!\n"); + mac_cb->cpld_ctrl = NULL; + } else { + mac_cb->cpld_ctrl = syscon; + mac_cb->cpld_ctrl_reg = cpld_args.args[0]; + } + } + } else if (is_acpi_node(mac_cb->fw_port)) { + hns_mac_register_phy(mac_cb); + } else { + dev_err(mac_cb->dev, "mac%d cannot find phy node\n", + mac_cb->mac_id); + } + + if (!fwnode_property_read_string(mac_cb->fw_port, "media-type", + &media_type)) { + for (i = 0; i < ARRAY_SIZE(media_type_defs); i++) { + if (!strncmp(media_type_defs[i].name, media_type, + MAC_MEDIA_TYPE_MAX_LEN)) { + mac_cb->media_type = media_type_defs[i].value; + break; + } } } @@ -790,7 +937,7 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb) else mac_cb->mac_type = HNAE_PORT_DEBUG; - mac_cb->phy_if = hns_mac_get_phy_if(mac_cb); + mac_cb->phy_if = dsaf_dev->misc_op->get_phy_if(mac_cb); ret = hns_mac_get_mode(mac_cb->phy_if); if (ret < 0) { @@ -805,7 +952,7 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb) if (ret) return ret; - cpld_led_reset(mac_cb); + mac_cb->dsaf_dev->misc_op->cpld_reset_led(mac_cb); mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx); return 0; @@ -892,7 +1039,7 @@ void hns_mac_uninit(struct dsaf_device *dsaf_dev) int max_port_num = hns_mac_get_max_port_num(dsaf_dev); for (i = 0; i < max_port_num; i++) { - cpld_led_reset(dsaf_dev->mac_cb[i]); + dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]); dsaf_dev->mac_cb[i] = NULL; } } @@ -975,7 +1122,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb) nic_data = 0; mac_cb->txpkt_for_led = mac_cb->hw_stats.tx_good_pkts; mac_cb->rxpkt_for_led = mac_cb->hw_stats.rx_good_pkts; - hns_cpld_set_led(mac_cb, (int)mac_cb->link, + mac_cb->dsaf_dev->misc_op->cpld_set_led(mac_cb, (int)mac_cb->link, mac_cb->speed, nic_data); } @@ -985,5 +1132,5 @@ int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb, if (!mac_cb || !mac_cb->cpld_ctrl) return 0; - return cpld_set_led_id(mac_cb, status); + return mac_cb->dsaf_dev->misc_op->cpld_set_led_id(mac_cb, status); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index 97ce9a750..4cbdf14f5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -335,10 +335,11 @@ struct hns_mac_cb { u64 txpkt_for_led; u64 rxpkt_for_led; enum hnae_port_type mac_type; + enum hnae_media_type media_type; phy_interface_t phy_if; enum hnae_loop loop_mode; - struct device_node *phy_node; + struct phy_device *phy_dev; struct mac_hw_stats hw_stats; }; @@ -448,8 +449,6 @@ int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en); int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu); int hns_mac_get_port_info(struct hns_mac_cb *mac_cb, u8 *auto_neg, u16 *speed, u8 *duplex); -phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb); -int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en); int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb, enum hnae_loop loop, int en); void hns_mac_update_stats(struct hns_mac_cb *mac_cb); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 1c2ddb25e..afb5daa37 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -7,6 +7,7 @@ * (at your option) any later version. */ +#include #include #include #include @@ -24,6 +25,7 @@ #include "hns_dsaf_main.h" #include "hns_dsaf_ppe.h" #include "hns_dsaf_rcb.h" +#include "hns_dsaf_misc.h" const char *g_dsaf_mode_match[DSAF_MODE_MAX] = { [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf", @@ -32,6 +34,13 @@ const char *g_dsaf_mode_match[DSAF_MODE_MAX] = { [DSAF_MODE_DISABLE_SP] = "single-port", }; +static const struct acpi_device_id hns_dsaf_acpi_match[] = { + { "HISI00B1", 0 }, + { "HISI00B2", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, hns_dsaf_acpi_match); + int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) { int ret, i; @@ -42,15 +51,27 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) const char *mode_str; struct regmap *syscon; struct resource *res; - struct device_node *np = dsaf_dev->dev->of_node; + struct device_node *np = dsaf_dev->dev->of_node, *np_temp; struct platform_device *pdev = to_platform_device(dsaf_dev->dev); - if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1")) - dsaf_dev->dsaf_ver = AE_VERSION_1; - else - dsaf_dev->dsaf_ver = AE_VERSION_2; + if (dev_of_node(dsaf_dev->dev)) { + if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1")) + dsaf_dev->dsaf_ver = AE_VERSION_1; + else + dsaf_dev->dsaf_ver = AE_VERSION_2; + } else if (is_acpi_node(dsaf_dev->dev->fwnode)) { + if (acpi_dev_found(hns_dsaf_acpi_match[0].id)) + dsaf_dev->dsaf_ver = AE_VERSION_1; + else if (acpi_dev_found(hns_dsaf_acpi_match[1].id)) + dsaf_dev->dsaf_ver = AE_VERSION_2; + else + return -ENXIO; + } else { + dev_err(dsaf_dev->dev, "cannot get cfg data from of or acpi\n"); + return -ENXIO; + } - ret = of_property_read_string(np, "mode", &mode_str); + ret = device_property_read_string(dsaf_dev->dev, "mode", &mode_str); if (ret) { dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret); return ret; @@ -80,32 +101,41 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) else dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE; - syscon = syscon_node_to_regmap( - of_parse_phandle(np, "subctrl-syscon", 0)); - if (IS_ERR_OR_NULL(syscon)) { - res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); - if (!res) { - dev_err(dsaf_dev->dev, "subctrl info is needed!\n"); - return -ENOMEM; - } - dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev, res); - if (!dsaf_dev->sc_base) { - dev_err(dsaf_dev->dev, "subctrl can not map!\n"); - return -ENOMEM; - } + if (dev_of_node(dsaf_dev->dev)) { + np_temp = of_parse_phandle(np, "subctrl-syscon", 0); + syscon = syscon_node_to_regmap(np_temp); + of_node_put(np_temp); + if (IS_ERR_OR_NULL(syscon)) { + res = platform_get_resource(pdev, IORESOURCE_MEM, + res_idx++); + if (!res) { + dev_err(dsaf_dev->dev, "subctrl info is needed!\n"); + return -ENOMEM; + } - res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); - if (!res) { - dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n"); - return -ENOMEM; - } - dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev, res); - if (!dsaf_dev->sds_base) { - dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n"); - return -ENOMEM; + dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev, + res); + if (IS_ERR(dsaf_dev->sc_base)) { + dev_err(dsaf_dev->dev, "subctrl can not map!\n"); + return PTR_ERR(dsaf_dev->sc_base); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, + res_idx++); + if (!res) { + dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n"); + return -ENOMEM; + } + + dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev, + res); + if (IS_ERR(dsaf_dev->sds_base)) { + dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n"); + return PTR_ERR(dsaf_dev->sds_base); + } + } else { + dsaf_dev->sub_ctrl = syscon; } - } else { - dsaf_dev->sub_ctrl = syscon; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe-base"); @@ -117,9 +147,9 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) } } dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res); - if (!dsaf_dev->ppe_base) { + if (IS_ERR(dsaf_dev->ppe_base)) { dev_err(dsaf_dev->dev, "ppe-base resource can not map!\n"); - return -ENOMEM; + return PTR_ERR(dsaf_dev->ppe_base); } dsaf_dev->ppe_paddr = res->start; @@ -136,33 +166,34 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) } } dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res); - if (!dsaf_dev->io_base) { + if (IS_ERR(dsaf_dev->io_base)) { dev_err(dsaf_dev->dev, "dsaf-base resource can not map!\n"); - return -ENOMEM; + return PTR_ERR(dsaf_dev->io_base); } } - ret = of_property_read_u32(np, "desc-num", &desc_num); + ret = device_property_read_u32(dsaf_dev->dev, "desc-num", &desc_num); if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT || desc_num > HNS_DSAF_MAX_DESC_CNT) { dev_err(dsaf_dev->dev, "get desc-num(%d) fail, ret=%d!\n", desc_num, ret); - goto unmap_base_addr; + return -EINVAL; } dsaf_dev->desc_num = desc_num; - ret = of_property_read_u32(np, "reset-field-offset", &reset_offset); + ret = device_property_read_u32(dsaf_dev->dev, "reset-field-offset", + &reset_offset); if (ret < 0) { dev_dbg(dsaf_dev->dev, "get reset-field-offset fail, ret=%d!\r\n", ret); } dsaf_dev->reset_offset = reset_offset; - ret = of_property_read_u32(np, "buf-size", &buf_size); + ret = device_property_read_u32(dsaf_dev->dev, "buf-size", &buf_size); if (ret < 0) { dev_err(dsaf_dev->dev, "get buf-size fail, ret=%d!\r\n", ret); - goto unmap_base_addr; + return ret; } dsaf_dev->buf_size = buf_size; @@ -170,41 +201,19 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) if (dsaf_dev->buf_size_type < 0) { dev_err(dsaf_dev->dev, "buf_size(%d) is wrong!\n", buf_size); - goto unmap_base_addr; + return -EINVAL; } + dsaf_dev->misc_op = hns_misc_op_get(dsaf_dev); + if (!dsaf_dev->misc_op) + return -ENOMEM; + if (!dma_set_mask_and_coherent(dsaf_dev->dev, DMA_BIT_MASK(64ULL))) dev_dbg(dsaf_dev->dev, "set mask to 64bit\n"); else dev_err(dsaf_dev->dev, "set mask to 64bit fail!\n"); return 0; - -unmap_base_addr: - if (dsaf_dev->io_base) - iounmap(dsaf_dev->io_base); - if (dsaf_dev->ppe_base) - iounmap(dsaf_dev->ppe_base); - if (dsaf_dev->sds_base) - iounmap(dsaf_dev->sds_base); - if (dsaf_dev->sc_base) - iounmap(dsaf_dev->sc_base); - return ret; -} - -static void hns_dsaf_free_cfg(struct dsaf_device *dsaf_dev) -{ - if (dsaf_dev->io_base) - iounmap(dsaf_dev->io_base); - - if (dsaf_dev->ppe_base) - iounmap(dsaf_dev->ppe_base); - - if (dsaf_dev->sds_base) - iounmap(dsaf_dev->sds_base); - - if (dsaf_dev->sc_base) - iounmap(dsaf_dev->sc_base); } /** @@ -508,10 +517,10 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev) o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, - DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110); + DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 48); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, - DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160); + DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 80); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); /* for no enable pfc mode */ @@ -519,29 +528,39 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev) o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M, - DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128); + DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 192); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M, - DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192); + DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 240); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } /* PPE */ - reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M, - DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 10); - dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M, - DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 12); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + for (i = 0; i < DSAFV2_SBM_PPE_CHN; i++) { + reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_M, + DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_S, 2); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_M, + DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_S, 3); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_M, + DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_S, 52); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + } + /* RoCEE */ for (i = 0; i < DASFV2_ROCEE_CRD_NUM; i++) { reg = DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i; o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M, - DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 2); - dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M, - DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 4); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M, + DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S, 2); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_M, + DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S, 4); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } } @@ -852,6 +871,8 @@ static void hns_dsaf_single_line_tbl_cfg( struct dsaf_device *dsaf_dev, u32 address, struct dsaf_tbl_line_cfg *ptbl_line) { + spin_lock_bh(&dsaf_dev->tcam_lock); + /*Write Addr*/ hns_dsaf_tbl_line_addr_cfg(dsaf_dev, address); @@ -860,6 +881,8 @@ static void hns_dsaf_single_line_tbl_cfg( /*Write Plus*/ hns_dsaf_tbl_line_pul(dsaf_dev); + + spin_unlock_bh(&dsaf_dev->tcam_lock); } /** @@ -873,6 +896,8 @@ static void hns_dsaf_tcam_uc_cfg( struct dsaf_tbl_tcam_data *ptbl_tcam_data, struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast) { + spin_lock_bh(&dsaf_dev->tcam_lock); + /*Write Addr*/ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); /*Write Tcam Data*/ @@ -881,6 +906,8 @@ static void hns_dsaf_tcam_uc_cfg( hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, ptbl_tcam_ucast); /*Write Plus*/ hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev); + + spin_unlock_bh(&dsaf_dev->tcam_lock); } /** @@ -895,6 +922,8 @@ static void hns_dsaf_tcam_mc_cfg( struct dsaf_tbl_tcam_data *ptbl_tcam_data, struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast) { + spin_lock_bh(&dsaf_dev->tcam_lock); + /*Write Addr*/ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); /*Write Tcam Data*/ @@ -903,6 +932,8 @@ static void hns_dsaf_tcam_mc_cfg( hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, ptbl_tcam_mcast); /*Write Plus*/ hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev); + + spin_unlock_bh(&dsaf_dev->tcam_lock); } /** @@ -912,6 +943,8 @@ static void hns_dsaf_tcam_mc_cfg( */ static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address) { + spin_lock_bh(&dsaf_dev->tcam_lock); + /*Write Addr*/ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); @@ -924,6 +957,8 @@ static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address) /*Write Plus*/ hns_dsaf_tbl_tcam_mcast_pul(dsaf_dev); + + spin_unlock_bh(&dsaf_dev->tcam_lock); } /** @@ -941,6 +976,8 @@ static void hns_dsaf_tcam_uc_get( u32 tcam_read_data0; u32 tcam_read_data4; + spin_lock_bh(&dsaf_dev->tcam_lock); + /*Write Addr*/ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); @@ -949,9 +986,9 @@ static void hns_dsaf_tcam_uc_get( /*read tcam data*/ ptbl_tcam_data->tbl_tcam_data_high - = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG); - ptbl_tcam_data->tbl_tcam_data_low = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG); + ptbl_tcam_data->tbl_tcam_data_low + = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG); /*read tcam mcast*/ tcam_read_data0 = dsaf_read_dev(dsaf_dev, @@ -973,6 +1010,8 @@ static void hns_dsaf_tcam_uc_get( DSAF_TBL_UCAST_CFG1_OUT_PORT_S); ptbl_tcam_ucast->tbl_ucast_dvc = dsaf_get_bit(tcam_read_data0, DSAF_TBL_UCAST_CFG1_DVC_S); + + spin_unlock_bh(&dsaf_dev->tcam_lock); } /** @@ -989,6 +1028,8 @@ static void hns_dsaf_tcam_mc_get( { u32 data_tmp; + spin_lock_bh(&dsaf_dev->tcam_lock); + /*Write Addr*/ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); @@ -997,9 +1038,9 @@ static void hns_dsaf_tcam_mc_get( /*read tcam data*/ ptbl_tcam_data->tbl_tcam_data_high = - dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG); - ptbl_tcam_data->tbl_tcam_data_low = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG); + ptbl_tcam_data->tbl_tcam_data_low = + dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG); /*read tcam mcast*/ ptbl_tcam_mcast->tbl_mcast_port_msk[0] = @@ -1019,6 +1060,8 @@ static void hns_dsaf_tcam_mc_get( ptbl_tcam_mcast->tbl_mcast_port_msk[4] = dsaf_get_field(data_tmp, DSAF_TBL_MCAST_CFG4_VM128_112_M, DSAF_TBL_MCAST_CFG4_VM128_112_S); + + spin_unlock_bh(&dsaf_dev->tcam_lock); } /** @@ -1080,10 +1123,10 @@ int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, u32 en) { if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { - if (!en) + if (!en) { dev_err(dsaf_dev->dev, "dsafv1 can't close rx_pause!\n"); - - return -EINVAL; + return -EINVAL; + } } dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4, @@ -1295,9 +1338,9 @@ static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev) dev_dbg(dsaf_dev->dev, "hns_dsaf_init_hw begin %s !\n", dsaf_dev->ae_dev.name); - hns_dsaf_rst(dsaf_dev, 0); + dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0); mdelay(10); - hns_dsaf_rst(dsaf_dev, 1); + dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 1); hns_dsaf_comm_init(dsaf_dev); @@ -1325,7 +1368,7 @@ static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev) static void hns_dsaf_remove_hw(struct dsaf_device *dsaf_dev) { /*reset*/ - hns_dsaf_rst(dsaf_dev, 0); + dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0); } /** @@ -1343,6 +1386,7 @@ static int hns_dsaf_init(struct dsaf_device *dsaf_dev) if (HNS_DSAF_IS_DEBUG(dsaf_dev)) return 0; + spin_lock_init(&dsaf_dev->tcam_lock); ret = hns_dsaf_init_hw(dsaf_dev); if (ret) return ret; @@ -2088,11 +2132,24 @@ void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb) hns_dsaf_port_work_rate_cfg(dsaf_dev, mac_id, mode); } +static u32 hns_dsaf_get_inode_prio_reg(int index) +{ + int base_index, offset; + u32 base_addr = DSAF_INODE_IN_PRIO_PAUSE_BASE_REG; + + base_index = (index + 1) / DSAF_REG_PER_ZONE; + offset = (index + 1) % DSAF_REG_PER_ZONE; + + return base_addr + DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET * base_index + + DSAF_INODE_IN_PRIO_PAUSE_OFFSET * offset; +} + void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num) { struct dsaf_hw_stats *hw_stats = &dsaf_dev->hw_stats[node_num]; bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver); + int i; u32 reg_tmp; hw_stats->pad_drop += dsaf_read_dev(dsaf_dev, @@ -2127,6 +2184,18 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num) hw_stats->stp_drop += dsaf_read_dev(dsaf_dev, DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num); + /* pfc pause frame statistics stored in dsaf inode*/ + if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) { + for (i = 0; i < DSAF_PRIO_NR; i++) { + reg_tmp = hns_dsaf_get_inode_prio_reg(i); + hw_stats->rx_pfc[i] += dsaf_read_dev(dsaf_dev, + reg_tmp + 0x4 * (u64)node_num); + hw_stats->tx_pfc[i] += dsaf_read_dev(dsaf_dev, + DSAF_XOD_XGE_PFC_PRIO_CNT_BASE_REG + + DSAF_XOD_XGE_PFC_PRIO_CNT_OFFSET * i + + 0xF0 * (u64)node_num); + } + } hw_stats->tx_pkts += dsaf_read_dev(dsaf_dev, DSAF_XOD_RCVPKT_CNT_0_REG + 0x90 * (u64)node_num); } @@ -2464,38 +2533,53 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) p[i] = 0xdddddddd; } -static char *hns_dsaf_get_node_stats_strings(char *data, int node) +static char *hns_dsaf_get_node_stats_strings(char *data, int node, + struct dsaf_device *dsaf_dev) { char *buff = data; + int i; + bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver); snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; + if (node < DSAF_SERVICE_NW_NUM && !is_ver1) { + for (i = 0; i < DSAF_PRIO_NR; i++) { + snprintf(buff + 0 * ETH_GSTRING_LEN * DSAF_PRIO_NR, + ETH_GSTRING_LEN, "inod%d_pfc_prio%d_pkts", + node, i); + snprintf(buff + 1 * ETH_GSTRING_LEN * DSAF_PRIO_NR, + ETH_GSTRING_LEN, "onod%d_pfc_prio%d_pkts", + node, i); + buff += ETH_GSTRING_LEN; + } + buff += 1 * DSAF_PRIO_NR * ETH_GSTRING_LEN; + } snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; return buff; } @@ -2504,7 +2588,9 @@ static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data, int node_num) { u64 *p = data; + int i; struct dsaf_hw_stats *hw_stats = &ddev->hw_stats[node_num]; + bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver); p[0] = hw_stats->pad_drop; p[1] = hw_stats->man_pkts; @@ -2519,8 +2605,16 @@ static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data, p[10] = hw_stats->local_addr_false; p[11] = hw_stats->vlan_drop; p[12] = hw_stats->stp_drop; - p[13] = hw_stats->tx_pkts; + if (node_num < DSAF_SERVICE_NW_NUM && !is_ver1) { + for (i = 0; i < DSAF_PRIO_NR; i++) { + p[13 + i + 0 * DSAF_PRIO_NR] = hw_stats->rx_pfc[i]; + p[13 + i + 1 * DSAF_PRIO_NR] = hw_stats->tx_pfc[i]; + } + p[29] = hw_stats->tx_pkts; + return &p[30]; + } + p[13] = hw_stats->tx_pkts; return &p[14]; } @@ -2548,11 +2642,16 @@ void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port) *@stringset: type of values in data *return dsaf string name count */ -int hns_dsaf_get_sset_count(int stringset) +int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset) { - if (stringset == ETH_SS_STATS) - return DSAF_STATIC_NUM; + bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver); + if (stringset == ETH_SS_STATS) { + if (is_ver1) + return DSAF_STATIC_NUM; + else + return DSAF_V2_STATIC_NUM; + } return 0; } @@ -2562,7 +2661,8 @@ int hns_dsaf_get_sset_count(int stringset) *@data:strings name value *@port:port index */ -void hns_dsaf_get_strings(int stringset, u8 *data, int port) +void hns_dsaf_get_strings(int stringset, u8 *data, int port, + struct dsaf_device *dsaf_dev) { char *buff = (char *)data; int node = port; @@ -2571,11 +2671,11 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port) return; /* for ge/xge node info */ - buff = hns_dsaf_get_node_stats_strings(buff, node); + buff = hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev); /* for ppe node info */ node = port + DSAF_PPE_INODE_BASE; - (void)hns_dsaf_get_node_stats_strings(buff, node); + (void)hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev); } /** @@ -2611,7 +2711,7 @@ static int hns_dsaf_probe(struct platform_device *pdev) ret = hns_dsaf_init(dsaf_dev); if (ret) - goto free_cfg; + goto free_dev; ret = hns_mac_init(dsaf_dev); if (ret) @@ -2636,9 +2736,6 @@ uninit_mac: uninit_dsaf: hns_dsaf_free(dsaf_dev); -free_cfg: - hns_dsaf_free_cfg(dsaf_dev); - free_dev: hns_dsaf_free_dev(dsaf_dev); @@ -2661,8 +2758,6 @@ static int hns_dsaf_remove(struct platform_device *pdev) hns_dsaf_free(dsaf_dev); - hns_dsaf_free_cfg(dsaf_dev); - hns_dsaf_free_dev(dsaf_dev); return 0; @@ -2680,6 +2775,7 @@ static struct platform_driver g_dsaf_driver = { .driver = { .name = DSAF_DRV_NAME, .of_match_table = g_dsaf_match, + .acpi_match_table = hns_dsaf_acpi_match, }, }; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index f0502ba0a..1daf018d9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -39,6 +39,9 @@ struct hns_mac_cb; #define DSAF_DUMP_REGS_NUM 504 #define DSAF_STATIC_NUM 28 +#define DSAF_V2_STATIC_NUM 44 +#define DSAF_PRIO_NR 8 +#define DSAF_REG_PER_ZONE 3 #define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) #define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP) @@ -176,6 +179,8 @@ struct dsaf_hw_stats { u64 local_addr_false; u64 vlan_drop; u64 stp_drop; + u64 rx_pfc[DSAF_PRIO_NR]; + u64 tx_pfc[DSAF_PRIO_NR]; u64 tx_pkts; }; @@ -268,6 +273,27 @@ struct dsaf_int_stat { }; +struct dsaf_misc_op { + void (*cpld_set_led)(struct hns_mac_cb *mac_cb, int link_status, + u16 speed, int data); + void (*cpld_reset_led)(struct hns_mac_cb *mac_cb); + int (*cpld_set_led_id)(struct hns_mac_cb *mac_cb, + enum hnae_led_state status); + /* reset seris function, it will be reset if the dereseet is 0 */ + void (*dsaf_reset)(struct dsaf_device *dsaf_dev, bool dereset); + void (*xge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset); + void (*xge_core_srst)(struct dsaf_device *dsaf_dev, u32 port, + bool dereset); + void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset); + void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset); + void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset); + + phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb); + int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt); + + int (*cfg_serdes_loopback)(struct hns_mac_cb *mac_cb, bool en); +}; + /* Dsaf device struct define ,and mac -> dsaf */ struct dsaf_device { struct device *dev; @@ -292,9 +318,12 @@ struct dsaf_device { struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM]; struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM]; struct hns_mac_cb *mac_cb[DSAF_MAX_PORT_NUM]; + struct dsaf_misc_op *misc_op; struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM]; struct dsaf_int_stat int_stat; + /* make sure tcam table config spinlock */ + spinlock_t tcam_lock; }; static inline void *hns_dsaf_dev_priv(const struct dsaf_device *dsaf_dev) @@ -388,27 +417,17 @@ int hns_dsaf_get_mac_entry_by_index( u16 entry_index, struct dsaf_drv_mac_multi_dest_entry *mac_entry); -void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val); - -void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val); - -void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val); - void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb); int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev); void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev); -void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val); -void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val); -void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, - u32 port, u32 val); - void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num); -int hns_dsaf_get_sset_count(int stringset); +int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset); void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port); -void hns_dsaf_get_strings(int stringset, u8 *data, int port); +void hns_dsaf_get_strings(int stringset, u8 *data, int port, + struct dsaf_device *dsaf_dev); void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data); int hns_dsaf_get_regs_count(void); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index a837bb9e3..611b67b6f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -12,6 +12,27 @@ #include "hns_dsaf_ppe.h" #include "hns_dsaf_reg.h" +enum _dsm_op_index { + HNS_OP_RESET_FUNC = 0x1, + HNS_OP_SERDES_LP_FUNC = 0x2, + HNS_OP_LED_SET_FUNC = 0x3, + HNS_OP_GET_PORT_TYPE_FUNC = 0x4, + HNS_OP_GET_SFP_STAT_FUNC = 0x5, +}; + +enum _dsm_rst_type { + HNS_DSAF_RESET_FUNC = 0x1, + HNS_PPE_RESET_FUNC = 0x2, + HNS_XGE_CORE_RESET_FUNC = 0x3, + HNS_XGE_RESET_FUNC = 0x4, + HNS_GE_RESET_FUNC = 0x5, +}; + +const u8 hns_dsaf_acpi_dsm_uuid[] = { + 0x1A, 0xAA, 0x85, 0x1A, 0x93, 0xE2, 0x5E, 0x41, + 0x8E, 0x28, 0x8D, 0x69, 0x0A, 0x0F, 0x82, 0x0A +}; + static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val) { if (dsaf_dev->sub_ctrl) @@ -32,8 +53,8 @@ static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg) return ret; } -void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, - u16 speed, int data) +static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, + u16 speed, int data) { int speed_reg = 0; u8 value; @@ -65,13 +86,14 @@ void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, mac_cb->cpld_led_value = value; } } else { - dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg, - CPLD_LED_DEFAULT_VALUE); - mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE; + value = (mac_cb->cpld_led_value) & (0x1 << DSAF_LED_ANCHOR_B); + dsaf_write_syscon(mac_cb->cpld_ctrl, + mac_cb->cpld_ctrl_reg, value); + mac_cb->cpld_led_value = value; } } -void cpld_led_reset(struct hns_mac_cb *mac_cb) +static void cpld_led_reset(struct hns_mac_cb *mac_cb) { if (!mac_cb || !mac_cb->cpld_ctrl) return; @@ -81,8 +103,8 @@ void cpld_led_reset(struct hns_mac_cb *mac_cb) mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE; } -int cpld_set_led_id(struct hns_mac_cb *mac_cb, - enum hnae_led_state status) +static int cpld_set_led_id(struct hns_mac_cb *mac_cb, + enum hnae_led_state status) { switch (status) { case HNAE_LED_ACTIVE: @@ -93,7 +115,7 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb, CPLD_LED_ON_VALUE); dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg, mac_cb->cpld_led_value); - return 2; + break; case HNAE_LED_INACTIVE: dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B, CPLD_LED_DEFAULT_VALUE); @@ -101,7 +123,8 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb, mac_cb->cpld_led_value); break; default: - break; + dev_err(mac_cb->dev, "invalid led state: %d!", status); + return -EINVAL; } return 0; @@ -109,12 +132,40 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb, #define RESET_REQ_OR_DREQ 1 -void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val) +static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type, + u32 port_type, u32 port, u32 val) +{ + union acpi_object *obj; + union acpi_object obj_args[3], argv4; + + obj_args[0].integer.type = ACPI_TYPE_INTEGER; + obj_args[0].integer.value = port_type; + obj_args[1].integer.type = ACPI_TYPE_INTEGER; + obj_args[1].integer.value = port; + obj_args[2].integer.type = ACPI_TYPE_INTEGER; + obj_args[2].integer.value = val; + + argv4.type = ACPI_TYPE_PACKAGE; + argv4.package.count = 3; + argv4.package.elements = obj_args; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(dsaf_dev->dev), + hns_dsaf_acpi_dsm_uuid, 0, op_type, &argv4); + if (!obj) { + dev_warn(dsaf_dev->dev, "reset port_type%d port%d fail!", + port_type, port); + return; + } + + ACPI_FREE(obj); +} + +static void hns_dsaf_rst(struct dsaf_device *dsaf_dev, bool dereset) { u32 xbar_reg_addr; u32 nt_reg_addr; - if (!val) { + if (!dereset) { xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_REQ_REG; nt_reg_addr = DSAF_SUB_SC_NT_RESET_REQ_REG; } else { @@ -126,7 +177,15 @@ void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val) dsaf_write_sub(dsaf_dev, nt_reg_addr, RESET_REQ_OR_DREQ); } -void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) +static void hns_dsaf_rst_acpi(struct dsaf_device *dsaf_dev, bool dereset) +{ + hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC, + HNS_DSAF_RESET_FUNC, + 0, dereset); +} + +static void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, + bool dereset) { u32 reg_val = 0; u32 reg_addr; @@ -137,7 +196,7 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) reg_val |= RESET_REQ_OR_DREQ; reg_val |= 0x2082082 << dsaf_dev->mac_cb[port]->port_rst_off; - if (val == 0) + if (!dereset) reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG; else reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG; @@ -145,8 +204,15 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) dsaf_write_sub(dsaf_dev, reg_addr, reg_val); } -void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, - u32 port, u32 val) +static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev, + u32 port, bool dereset) +{ + hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC, + HNS_XGE_RESET_FUNC, port, dereset); +} + +static void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, + u32 port, bool dereset) { u32 reg_val = 0; u32 reg_addr; @@ -157,7 +223,7 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, reg_val |= XGMAC_TRX_CORE_SRST_M << dsaf_dev->mac_cb[port]->port_rst_off; - if (val == 0) + if (!dereset) reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG; else reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG; @@ -165,7 +231,16 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, dsaf_write_sub(dsaf_dev, reg_addr, reg_val); } -void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) +static void +hns_dsaf_xge_core_srst_by_port_acpi(struct dsaf_device *dsaf_dev, + u32 port, bool dereset) +{ + hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC, + HNS_XGE_CORE_RESET_FUNC, port, dereset); +} + +static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, + bool dereset) { u32 reg_val_1; u32 reg_val_2; @@ -178,12 +253,11 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) reg_val_1 = 0x1 << port; port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off; /* there is difference between V1 and V2 in register.*/ - if (AE_IS_VER1(dsaf_dev->dsaf_ver)) - reg_val_2 = 0x1041041 << port_rst_off; - else - reg_val_2 = 0x2082082 << port_rst_off; + reg_val_2 = AE_IS_VER1(dsaf_dev->dsaf_ver) ? + 0x1041041 : 0x2082082; + reg_val_2 <<= port_rst_off; - if (val == 0) { + if (!dereset) { dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG, reg_val_1); @@ -197,10 +271,13 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) reg_val_1); } } else { - reg_val_1 = 0x15540 << dsaf_dev->reset_offset; - reg_val_2 = 0x100 << dsaf_dev->reset_offset; + reg_val_1 = 0x15540; + reg_val_2 = AE_IS_VER1(dsaf_dev->dsaf_ver) ? 0x100 : 0x40; + + reg_val_1 <<= dsaf_dev->reset_offset; + reg_val_2 <<= dsaf_dev->reset_offset; - if (val == 0) { + if (!dereset) { dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG, reg_val_1); @@ -216,14 +293,22 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) } } -void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) +static void hns_dsaf_ge_srst_by_port_acpi(struct dsaf_device *dsaf_dev, + u32 port, bool dereset) +{ + hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC, + HNS_GE_RESET_FUNC, port, dereset); +} + +static void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, + bool dereset) { u32 reg_val = 0; u32 reg_addr; reg_val |= RESET_REQ_OR_DREQ << dsaf_dev->mac_cb[port]->port_rst_off; - if (val == 0) + if (!dereset) reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG; else reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG; @@ -231,15 +316,24 @@ void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) dsaf_write_sub(dsaf_dev, reg_addr, reg_val); } -void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val) +static void +hns_ppe_srst_by_port_acpi(struct dsaf_device *dsaf_dev, u32 port, bool dereset) +{ + hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC, + HNS_PPE_RESET_FUNC, port, dereset); +} + +static void hns_ppe_com_srst(struct dsaf_device *dsaf_dev, bool dereset) { - struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev; u32 reg_val; u32 reg_addr; + if (!(dev_of_node(dsaf_dev->dev))) + return; + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { reg_val = RESET_REQ_OR_DREQ; - if (val == 0) + if (!dereset) reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG; else reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG; @@ -247,7 +341,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val) } else { reg_val = 0x100 << dsaf_dev->reset_offset; - if (val == 0) + if (!dereset) reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG; else reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG; @@ -261,7 +355,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val) * @mac_cb: mac control block * retuen phy interface */ -phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) +static phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) { u32 mode; u32 reg; @@ -293,6 +387,36 @@ phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) return phy_if; } +static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb) +{ + phy_interface_t phy_if = PHY_INTERFACE_MODE_NA; + union acpi_object *obj; + union acpi_object obj_args, argv4; + + obj_args.integer.type = ACPI_TYPE_INTEGER; + obj_args.integer.value = mac_cb->mac_id; + + argv4.type = ACPI_TYPE_PACKAGE, + argv4.package.count = 1, + argv4.package.elements = &obj_args, + + obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev), + hns_dsaf_acpi_dsm_uuid, 0, + HNS_OP_GET_PORT_TYPE_FUNC, &argv4); + + if (!obj || obj->type != ACPI_TYPE_INTEGER) + return phy_if; + + phy_if = obj->integer.value ? + PHY_INTERFACE_MODE_XGMII : PHY_INTERFACE_MODE_SGMII; + + dev_dbg(mac_cb->dev, "mac_id=%d, phy_if=%d\n", mac_cb->mac_id, phy_if); + + ACPI_FREE(obj); + + return phy_if; +} + int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) { if (!mac_cb->cpld_ctrl) @@ -309,13 +433,8 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) * @mac_cb: mac control block * retuen 0 == success */ -int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en) +static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en) { - /* port 0-3 hilink4 base is serdes_vaddr + 0x00280000 - * port 4-7 hilink3 base is serdes_vaddr + 0x00200000 - */ - u8 *base_addr = (u8 *)mac_cb->serdes_vaddr + - (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); const u8 lane_id[] = { 0, /* mac 0 -> lane 0 */ 1, /* mac 1 -> lane 1 */ @@ -332,7 +451,7 @@ int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en) int sfp_prsnt; int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt); - if (!mac_cb->phy_node) { + if (!mac_cb->phy_dev) { if (ret) pr_info("please confirm sfp is present or not\n"); else @@ -341,13 +460,110 @@ int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en) } if (mac_cb->serdes_ctrl) { - u32 origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset); + u32 origin; + + if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) { +#define HILINK_ACCESS_SEL_CFG 0x40008 + /* hilink4 & hilink3 use the same xge training and + * xge u adaptor. There is a hilink access sel cfg + * register to select which one to be configed + */ + if ((!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev)) && + (mac_cb->mac_id <= 3)) + dsaf_write_syscon(mac_cb->serdes_ctrl, + HILINK_ACCESS_SEL_CFG, 0); + else + dsaf_write_syscon(mac_cb->serdes_ctrl, + HILINK_ACCESS_SEL_CFG, 3); + } - dsaf_set_field(origin, 1ull << 10, 10, !!en); + origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset); + + dsaf_set_field(origin, 1ull << 10, 10, en); dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); } else { - dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en); + u8 *base_addr = (u8 *)mac_cb->serdes_vaddr + + (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); + dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en); + } + + return 0; +} + +static int +hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en) +{ + union acpi_object *obj; + union acpi_object obj_args[3], argv4; + + obj_args[0].integer.type = ACPI_TYPE_INTEGER; + obj_args[0].integer.value = mac_cb->mac_id; + obj_args[1].integer.type = ACPI_TYPE_INTEGER; + obj_args[1].integer.value = !!en; + + argv4.type = ACPI_TYPE_PACKAGE; + argv4.package.count = 2; + argv4.package.elements = obj_args; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dsaf_dev->dev), + hns_dsaf_acpi_dsm_uuid, 0, + HNS_OP_SERDES_LP_FUNC, &argv4); + if (!obj) { + dev_warn(mac_cb->dsaf_dev->dev, "set port%d serdes lp fail!", + mac_cb->mac_id); + + return -ENOTSUPP; } + ACPI_FREE(obj); + return 0; } + +struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev) +{ + struct dsaf_misc_op *misc_op; + + misc_op = devm_kzalloc(dsaf_dev->dev, sizeof(*misc_op), GFP_KERNEL); + if (!misc_op) + return NULL; + + if (dev_of_node(dsaf_dev->dev)) { + misc_op->cpld_set_led = hns_cpld_set_led; + misc_op->cpld_reset_led = cpld_led_reset; + misc_op->cpld_set_led_id = cpld_set_led_id; + + misc_op->dsaf_reset = hns_dsaf_rst; + misc_op->xge_srst = hns_dsaf_xge_srst_by_port; + misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port; + misc_op->ge_srst = hns_dsaf_ge_srst_by_port; + misc_op->ppe_srst = hns_ppe_srst_by_port; + misc_op->ppe_comm_srst = hns_ppe_com_srst; + + misc_op->get_phy_if = hns_mac_get_phy_if; + misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt; + + misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback; + } else if (is_acpi_node(dsaf_dev->dev->fwnode)) { + misc_op->cpld_set_led = hns_cpld_set_led; + misc_op->cpld_reset_led = cpld_led_reset; + misc_op->cpld_set_led_id = cpld_set_led_id; + + misc_op->dsaf_reset = hns_dsaf_rst_acpi; + misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi; + misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port_acpi; + misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi; + misc_op->ppe_srst = hns_ppe_srst_by_port_acpi; + misc_op->ppe_comm_srst = hns_ppe_com_srst; + + misc_op->get_phy_if = hns_mac_get_phy_if_acpi; + misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt; + + misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi; + } else { + devm_kfree(dsaf_dev->dev, (void *)misc_op); + misc_op = NULL; + } + + return (void *)misc_op; +} diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h index 419f07aa9..f06bb03d4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h @@ -33,11 +33,6 @@ #define DSAF_LED_DATA_B 4 #define DSAF_LED_ANCHOR_B 5 -void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, - u16 speed, int data); -void cpld_led_reset(struct hns_mac_cb *mac_cb); -int cpld_set_led_id(struct hns_mac_cb *mac_cb, - enum hnae_led_state status); -int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt); +struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 8cd151a52..6ea872287 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -112,7 +112,6 @@ void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index) static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, int ppe_idx) { - return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; } @@ -200,11 +199,12 @@ static void hns_ppe_set_port_mode(struct hns_ppe_cb *ppe_cb, static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common) { enum ppe_qid_mode qid_mode; - enum dsaf_mode dsaf_mode = ppe_common->dsaf_dev->dsaf_mode; + struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev; + enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode; - hns_ppe_com_srst(ppe_common, 0); + dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 0); mdelay(100); - hns_ppe_com_srst(ppe_common, 1); + dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 1); mdelay(100); if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) { @@ -288,9 +288,9 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb) /* get default RSS key */ netdev_rss_key_fill(ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE); - hns_ppe_srst_by_port(dsaf_dev, port, 0); + dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0); mdelay(10); - hns_ppe_srst_by_port(dsaf_dev, port, 1); + dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 1); /* clr and msk except irq*/ hns_ppe_exc_irq_en(ppe_cb, 0); @@ -330,8 +330,10 @@ static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb) u32 port; if (ppe_cb->ppe_common_cb) { + struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev; + port = ppe_cb->index; - hns_ppe_srst_by_port(ppe_cb->ppe_common_cb->dsaf_dev, port, 0); + dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0); } } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 4ef6d23d9..ef1107777 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -458,7 +458,6 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) u32 i; u32 ring_num = rcb_common->ring_num; int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common); - struct device_node *np = rcb_common->dsaf_dev->dev->of_node; struct platform_device *pdev = to_platform_device(rcb_common->dsaf_dev->dev); bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); @@ -473,10 +472,10 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) ring_pair_cb->port_id_in_comm = hns_rcb_get_port_in_comm(rcb_common, i); ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] = - is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) : + is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2) : platform_get_irq(pdev, base_irq_idx + i * 3 + 1); ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] = - is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1) : + is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2 + 1) : platform_get_irq(pdev, base_irq_idx + i * 3); ring_pair_cb->q.phy_base = RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i); @@ -541,7 +540,7 @@ int hns_rcb_set_coalesce_usecs( } if (timeout > HNS_RCB_MAX_COALESCED_USECS) { dev_err(rcb_common->dsaf_dev->dev, - "error: not support coalesce %dus!\n", timeout); + "error: coalesce_usecs setting supports 0~1023us\n"); return -EINVAL; } hns_rcb_set_port_timeout(rcb_common, port_idx, timeout); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index bd54dac82..99b4e1ba0 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h @@ -40,7 +40,7 @@ struct rcb_common_cb; #define HNS_RCB_DEF_COALESCED_FRAMES 50 #define HNS_RCB_CLK_FREQ_MHZ 350 #define HNS_RCB_MAX_COALESCED_USECS 0x3ff -#define HNS_RCB_DEF_COALESCED_USECS 3 +#define HNS_RCB_DEF_COALESCED_USECS 50 #define HNS_RCB_COMMON_ENDIAN 1 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 7c3b5103d..235f74444 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -32,7 +32,7 @@ #define DSAFV2_SBM_NUM 8 #define DSAFV2_SBM_XGE_CHN 6 #define DSAFV2_SBM_PPE_CHN 1 -#define DASFV2_ROCEE_CRD_NUM 8 +#define DASFV2_ROCEE_CRD_NUM 1 #define DSAF_VOQ_NUM DSAF_NODE_NUM #define DSAF_INODE_NUM DSAF_NODE_NUM @@ -166,6 +166,9 @@ #define DSAF_INODE_GE_FC_EN_0_REG 0x1B00 #define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50 #define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x1C00 +#define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG 0x1C00 +#define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET 0x100 +#define DSAF_INODE_IN_PRIO_PAUSE_OFFSET 0x50 #define DSAF_SBM_CFG_REG_0_REG 0x2000 #define DSAF_SBM_BP_CFG_0_XGE_REG_0_REG 0x2004 @@ -175,7 +178,7 @@ #define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C #define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C #define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C -#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C +#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C #define DSAF_SBM_FREE_CNT_0_0_REG 0x2010 #define DSAF_SBM_FREE_CNT_1_0_REG 0x2014 #define DSAF_SBM_BP_CNT_0_0_REG 0x2018 @@ -232,6 +235,8 @@ #define DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG 0x3074 #define DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG 0x3078 #define DSAF_XOD_FIFO_STATUS_0_REG 0x307C +#define DSAF_XOD_XGE_PFC_PRIO_CNT_BASE_REG 0x3A00 +#define DSAF_XOD_XGE_PFC_PRIO_CNT_OFFSET 0x4 #define DSAF_VOQ_ECC_INVERT_EN_0_REG 0x4004 #define DSAF_VOQ_SRAM_PKT_NUM_0_REG 0x4008 @@ -791,6 +796,18 @@ #define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S 9 #define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9) +#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S 0 +#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M (((1ULL << 8) - 1) << 0) +#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S 8 +#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_M (((1ULL << 8) - 1) << 8) + +#define DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_S (0) +#define DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_M (((1ULL << 6) - 1) << 0) +#define DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_S (6) +#define DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_M (((1ULL << 6) - 1) << 6) +#define DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_S (12) +#define DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_M (((1ULL << 6) - 1) << 12) + #define DSAF_TBL_TCAM_ADDR_S 0 #define DSAF_TBL_TCAM_ADDR_M ((1ULL << 9) - 1) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index fd90f3737..8f4f0e8da 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -119,7 +119,7 @@ static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode) = (struct dsaf_device *)dev_get_drvdata(drv->dev); u32 port = drv->mac_id; - hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 1); + dsaf_dev->misc_op->xge_core_srst(dsaf_dev, port, 1); mdelay(10); /*enable XGE rX/tX */ @@ -157,7 +157,7 @@ static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode) } mdelay(10); - hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 0); + dsaf_dev->misc_op->xge_core_srst(dsaf_dev, port, 0); } /** @@ -198,9 +198,9 @@ static void hns_xgmac_init(void *mac_drv) = (struct dsaf_device *)dev_get_drvdata(drv->dev); u32 port = drv->mac_id; - hns_dsaf_xge_srst_by_port(dsaf_dev, port, 0); + dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0); mdelay(100); - hns_dsaf_xge_srst_by_port(dsaf_dev, port, 1); + dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1); mdelay(100); hns_xgmac_exc_irq_en(drv, 0); @@ -425,7 +425,7 @@ static void hns_xgmac_free(void *mac_drv) u32 mac_id = drv->mac_id; - hns_dsaf_xge_srst_by_port(dsaf_dev, mac_id, 0); + dsaf_dev->misc_op->xge_srst(dsaf_dev, mac_id, 0); } /** diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index e621636e6..d7e1f8c7a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -132,6 +132,13 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv, ring_ptr_move_fw(ring, next_to_use); } +static const struct acpi_device_id hns_enet_acpi_match[] = { + { "HISI00C1", 0 }, + { "HISI00C2", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match); + static void fill_desc(struct hnae_ring *ring, void *priv, int size, dma_addr_t dma, int frag_end, int buf_num, enum hns_desc_type type, int mtu) @@ -593,6 +600,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, ring->stats.sw_err_cnt++; return -ENOMEM; } + skb_reset_mac_header(skb); prefetchw(skb->data); length = le16_to_cpu(desc->rx.pkt_len); @@ -754,16 +762,16 @@ static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, recv_pkts = 0, recv_bds = 0, clean_count = 0; recv: while (recv_pkts < budget && recv_bds < num) { - /* reuse or realloc buffers*/ + /* reuse or realloc buffers */ if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { hns_nic_alloc_rx_buffers(ring_data, clean_count); clean_count = 0; } - /* poll one pkg*/ + /* poll one pkt */ err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum); if (unlikely(!skb)) /* this fault cannot be repaired */ - break; + goto out; recv_bds += bnum; clean_count += bnum; @@ -789,6 +797,7 @@ recv: } } +out: /* make all data has been write before submit */ if (clean_count > 0) hns_nic_alloc_rx_buffers(ring_data, clean_count); @@ -983,8 +992,26 @@ static void hns_nic_adjust_link(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); struct hnae_handle *h = priv->ae_handle; + int state = 1; + + if (priv->phy) { + h->dev->ops->adjust_link(h, ndev->phydev->speed, + ndev->phydev->duplex); + state = priv->phy->link; + } + state = state && h->dev->ops->get_status(h); - h->dev->ops->adjust_link(h, ndev->phydev->speed, ndev->phydev->duplex); + if (state != priv->link) { + if (state) { + netif_carrier_on(ndev); + netif_tx_wake_all_queues(ndev); + netdev_info(ndev, "link up\n"); + } else { + netif_carrier_off(ndev); + netdev_info(ndev, "link down\n"); + } + priv->link = state; + } } /** @@ -996,19 +1023,22 @@ static void hns_nic_adjust_link(struct net_device *ndev) int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) { struct hns_nic_priv *priv = netdev_priv(ndev); - struct phy_device *phy_dev = NULL; + struct phy_device *phy_dev = h->phy_dev; + int ret; - if (!h->phy_node) + if (!h->phy_dev) return 0; - if (h->phy_if != PHY_INTERFACE_MODE_XGMII) - phy_dev = of_phy_connect(ndev, h->phy_node, - hns_nic_adjust_link, 0, h->phy_if); - else - phy_dev = of_phy_attach(ndev, h->phy_node, 0, h->phy_if); + if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { + phy_dev->dev_flags = 0; - if (unlikely(!phy_dev) || IS_ERR(phy_dev)) - return !phy_dev ? -ENODEV : PTR_ERR(phy_dev); + ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link, + h->phy_if); + } else { + ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if); + } + if (unlikely(ret)) + return -ENODEV; phy_dev->supported &= h->if_support; phy_dev->advertising = phy_dev->supported; @@ -1067,13 +1097,8 @@ void hns_nic_update_stats(struct net_device *netdev) static void hns_init_mac_addr(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); - struct device_node *node = priv->dev->of_node; - const void *mac_addr_temp; - mac_addr_temp = of_get_mac_address(node); - if (mac_addr_temp && is_valid_ether_addr(mac_addr_temp)) { - memcpy(ndev->dev_addr, mac_addr_temp, ndev->addr_len); - } else { + if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) { eth_hw_addr_random(ndev); dev_warn(priv->dev, "No valid mac, use random mac %pM", ndev->dev_addr); @@ -1176,7 +1201,7 @@ static int hns_nic_net_up(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); struct hnae_handle *h = priv->ae_handle; - int i, j, k; + int i, j; int ret; ret = hns_nic_init_irq(priv); @@ -1191,9 +1216,6 @@ static int hns_nic_net_up(struct net_device *ndev) goto out_has_some_queues; } - for (k = 0; k < h->q_num; k++) - h->dev->ops->toggle_queue_status(h->qs[k], 1); - ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr); if (ret) goto out_set_mac_addr_err; @@ -1213,8 +1235,6 @@ static int hns_nic_net_up(struct net_device *ndev) out_start_err: netif_stop_queue(ndev); out_set_mac_addr_err: - for (k = 0; k < h->q_num; k++) - h->dev->ops->toggle_queue_status(h->qs[k], 0); out_has_some_queues: for (j = i - 1; j >= 0; j--) hns_nic_ring_close(ndev, j); @@ -1421,7 +1441,6 @@ static int hns_nic_set_features(struct net_device *netdev, netdev_features_t features) { struct hns_nic_priv *priv = netdev_priv(netdev); - struct hnae_handle *h = priv->ae_handle; switch (priv->enet_ver) { case AE_VERSION_1: @@ -1434,11 +1453,9 @@ static int hns_nic_set_features(struct net_device *netdev, priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; /* The chip only support 7*4096 */ netif_set_gso_max_size(netdev, 7 * 4096); - h->dev->ops->set_tso_stats(h, 1); } else { priv->ops.fill_desc = fill_v2_desc; priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; - h->dev->ops->set_tso_stats(h, 0); } break; } @@ -1571,27 +1588,14 @@ static void hns_nic_update_link_status(struct net_device *netdev) struct hns_nic_priv *priv = netdev_priv(netdev); struct hnae_handle *h = priv->ae_handle; - int state = 1; - if (priv->phy) { - if (!genphy_update_link(priv->phy)) - state = priv->phy->link; - else - state = 0; - } - state = state && h->dev->ops->get_status(h); + if (h->phy_dev) { + if (h->phy_if != PHY_INTERFACE_MODE_XGMII) + return; - if (state != priv->link) { - if (state) { - netif_carrier_on(netdev); - netif_tx_wake_all_queues(netdev); - netdev_info(netdev, "link up\n"); - } else { - netif_carrier_off(netdev); - netdev_info(netdev, "link down\n"); - } - priv->link = state; + (void)genphy_read_status(h->phy_dev); } + hns_nic_adjust_link(netdev); } /* for dumping key regs*/ @@ -1627,7 +1631,7 @@ static void hns_nic_dump(struct hns_nic_priv *priv) } } -/* for resetting suntask*/ +/* for resetting subtask */ static void hns_nic_reset_subtask(struct hns_nic_priv *priv) { enum hnae_port_type type = priv->ae_handle->port_type; @@ -1797,11 +1801,14 @@ static void hns_nic_set_priv_ops(struct net_device *netdev) priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; /* This chip only support 7*4096 */ netif_set_gso_max_size(netdev, 7 * 4096); - h->dev->ops->set_tso_stats(h, 1); } else { priv->ops.fill_desc = fill_v2_desc; priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; } + /* enable tso when init + * control tso on/off through TSE bit in bd + */ + h->dev->ops->set_tso_stats(h, 1); } } @@ -1812,7 +1819,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev) int ret; h = hnae_get_handle(&priv->netdev->dev, - priv->ae_node, priv->port_id, NULL); + priv->fwnode, priv->port_id, NULL); if (IS_ERR_OR_NULL(h)) { ret = -ENODEV; dev_dbg(priv->dev, "has not handle, register notifier!\n"); @@ -1872,7 +1879,6 @@ static int hns_nic_dev_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct net_device *ndev; struct hns_nic_priv *priv; - struct device_node *node = dev->of_node; u32 port_id; int ret; @@ -1886,22 +1892,49 @@ static int hns_nic_dev_probe(struct platform_device *pdev) priv->dev = dev; priv->netdev = ndev; - if (of_device_is_compatible(node, "hisilicon,hns-nic-v1")) - priv->enet_ver = AE_VERSION_1; - else - priv->enet_ver = AE_VERSION_2; + if (dev_of_node(dev)) { + struct device_node *ae_node; - priv->ae_node = (void *)of_parse_phandle(node, "ae-handle", 0); - if (IS_ERR_OR_NULL(priv->ae_node)) { - ret = PTR_ERR(priv->ae_node); - dev_err(dev, "not find ae-handle\n"); - goto out_read_prop_fail; + if (of_device_is_compatible(dev->of_node, + "hisilicon,hns-nic-v1")) + priv->enet_ver = AE_VERSION_1; + else + priv->enet_ver = AE_VERSION_2; + + ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0); + if (IS_ERR_OR_NULL(ae_node)) { + ret = PTR_ERR(ae_node); + dev_err(dev, "not find ae-handle\n"); + goto out_read_prop_fail; + } + priv->fwnode = &ae_node->fwnode; + } else if (is_acpi_node(dev->fwnode)) { + struct acpi_reference_args args; + + if (acpi_dev_found(hns_enet_acpi_match[0].id)) + priv->enet_ver = AE_VERSION_1; + else if (acpi_dev_found(hns_enet_acpi_match[1].id)) + priv->enet_ver = AE_VERSION_2; + else + return -ENXIO; + + /* try to find port-idx-in-ae first */ + ret = acpi_node_get_property_reference(dev->fwnode, + "ae-handle", 0, &args); + if (ret) { + dev_err(dev, "not find ae-handle\n"); + goto out_read_prop_fail; + } + priv->fwnode = acpi_fwnode_handle(args.adev); + } else { + dev_err(dev, "cannot read cfg data from OF or acpi\n"); + return -ENXIO; } - /* try to find port-idx-in-ae first */ - ret = of_property_read_u32(node, "port-idx-in-ae", &port_id); + + ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id); if (ret) { /* only for old code compatible */ - ret = of_property_read_u32(node, "port-id", &port_id); + ret = device_property_read_u32(dev, "port-id", &port_id); if (ret) goto out_read_prop_fail; /* for old dts, we need to caculate the port offset */ @@ -1940,7 +1973,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev) if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) dev_dbg(dev, "set mask to 64bit\n"); else - dev_err(dev, "set mask to 32bit fail!\n"); + dev_err(dev, "set mask to 64bit fail!\n"); /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(ndev); @@ -2014,6 +2047,7 @@ static struct platform_driver hns_nic_dev_driver = { .driver = { .name = "hns-nic", .of_match_table = hns_enet_of_match, + .acpi_match_table = ACPI_PTR(hns_enet_acpi_match), }, .probe = hns_nic_dev_probe, .remove = hns_nic_dev_remove, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index 337efa582..44bb3015e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -54,7 +54,7 @@ struct hns_nic_ops { }; struct hns_nic_priv { - const struct device_node *ae_node; + const struct fwnode_handle *fwnode; u32 enet_ver; u32 port_id; int phy_mode; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 67a648c7d..ab33487a5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -49,7 +49,7 @@ static u32 hns_nic_get_link(struct net_device *net_dev) h = priv->ae_handle; if (priv->phy) { - if (!genphy_update_link(priv->phy)) + if (!genphy_read_status(priv->phy)) link_stat = priv->phy->link; else link_stat = 0; @@ -165,13 +165,21 @@ static int hns_nic_get_settings(struct net_device *net_dev, cmd->advertising |= ADVERTISED_10000baseKR_Full; } - if (h->port_type == HNAE_PORT_SERVICE) { + switch (h->media_type) { + case HNAE_MEDIA_TYPE_FIBER: cmd->port = PORT_FIBRE; - cmd->supported |= SUPPORTED_Pause; - } else { + break; + case HNAE_MEDIA_TYPE_COPPER: cmd->port = PORT_TP; + break; + case HNAE_MEDIA_TYPE_UNKNOWN: + default: + break; } + if (!(AE_IS_VER1(priv->enet_ver) && h->port_type == HNAE_PORT_DEBUG)) + cmd->supported |= SUPPORTED_Pause; + cmd->transceiver = XCVR_EXTERNAL; cmd->mdio_support = (ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22); hns_get_mdix_mode(net_dev, cmd); @@ -242,6 +250,7 @@ static const char hns_nic_test_strs[][ETH_GSTRING_LEN] = { static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) { #define COPPER_CONTROL_REG 0 +#define PHY_POWER_DOWN BIT(11) #define PHY_LOOP_BACK BIT(14) u16 val = 0; @@ -252,33 +261,40 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) /* speed : 1000M */ phy_write(phy_dev, HNS_PHY_PAGE_REG, 2); phy_write(phy_dev, 21, 0x1046); + + phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); /* Force Master */ phy_write(phy_dev, 9, 0x1F00); + /* Soft-reset */ phy_write(phy_dev, 0, 0x9140); /* If autoneg disabled,two soft-reset operations */ phy_write(phy_dev, 0, 0x9140); - phy_write(phy_dev, 22, 0xFA); + + phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA); /* Default is 0x0400 */ phy_write(phy_dev, 1, 0x418); /* Force 1000M Link, Default is 0x0200 */ phy_write(phy_dev, 7, 0x20C); - phy_write(phy_dev, 22, 0); + phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); - /* Enable MAC loop-back */ + /* Enable PHY loop-back */ val = phy_read(phy_dev, COPPER_CONTROL_REG); val |= PHY_LOOP_BACK; + val &= ~PHY_POWER_DOWN; phy_write(phy_dev, COPPER_CONTROL_REG, val); } else { - phy_write(phy_dev, 22, 0xFA); + phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA); phy_write(phy_dev, 1, 0x400); phy_write(phy_dev, 7, 0x200); - phy_write(phy_dev, 22, 0); + phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); + phy_write(phy_dev, 9, 0xF00); val = phy_read(phy_dev, COPPER_CONTROL_REG); val &= ~PHY_LOOP_BACK; + val |= PHY_POWER_DOWN; phy_write(phy_dev, COPPER_CONTROL_REG, val); } return 0; @@ -339,28 +355,16 @@ static int __lb_up(struct net_device *ndev, hns_nic_net_reset(ndev); - if (priv->phy) { - phy_disconnect(priv->phy); - msleep(100); - - ret = hns_nic_init_phy(ndev, h); - if (ret) - return ret; - } - ret = __lb_setup(ndev, loop_mode); if (ret) return ret; - msleep(100); + msleep(200); ret = h->dev->ops->start ? h->dev->ops->start(h) : 0; if (ret) return ret; - if (priv->phy) - phy_start(priv->phy); - /* link adjust duplex*/ if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII) speed = 1000; @@ -561,9 +565,6 @@ static int __lb_down(struct net_device *ndev) __func__, ret); - if (priv->phy) - phy_stop(priv->phy); - if (h->dev->ops->stop) h->dev->ops->stop(h); @@ -596,7 +597,7 @@ static void hns_nic_self_test(struct net_device *ndev, st_param[1][0] = MAC_INTERNALLOOP_SERDES; st_param[1][1] = 1; /*serdes must exist*/ st_param[2][0] = MAC_INTERNALLOOP_PHY; /* only supporte phy node*/ - st_param[2][1] = ((!!(priv->ae_handle->phy_node)) && + st_param[2][1] = ((!!(priv->ae_handle->phy_dev)) && (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)); if (eth_test->flags == ETH_TEST_FL_OFFLINE) { @@ -758,6 +759,16 @@ static int hns_get_coalesce(struct net_device *net_dev, &ec->tx_max_coalesced_frames, &ec->rx_max_coalesced_frames); + ops->get_coalesce_range(priv->ae_handle, + &ec->tx_max_coalesced_frames_low, + &ec->rx_max_coalesced_frames_low, + &ec->tx_max_coalesced_frames_high, + &ec->rx_max_coalesced_frames_high, + &ec->tx_coalesce_usecs_low, + &ec->rx_coalesce_usecs_low, + &ec->tx_coalesce_usecs_high, + &ec->rx_coalesce_usecs_high); + return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 765ddb3dc..33f4c483a 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -7,6 +7,7 @@ * (at your option) any later version. */ +#include #include #include #include @@ -36,9 +37,19 @@ #define MDIO_TIMEOUT 1000000 +struct hns_mdio_sc_reg { + u16 mdio_clk_en; + u16 mdio_clk_dis; + u16 mdio_reset_req; + u16 mdio_reset_dreq; + u16 mdio_clk_st; + u16 mdio_reset_st; +}; + struct hns_mdio_device { void *vbase; /* mdio reg base address */ struct regmap *subctrl_vbase; + struct hns_mdio_sc_reg sc_reg; }; /* mdio reg */ @@ -92,7 +103,6 @@ enum mdio_c45_op_seq { #define MDIO_SC_CLK_DIS 0x33C #define MDIO_SC_RESET_REQ 0xA38 #define MDIO_SC_RESET_DREQ 0xA3C -#define MDIO_SC_CTRL 0x2010 #define MDIO_SC_CLK_ST 0x531C #define MDIO_SC_RESET_ST 0x5A1C @@ -352,68 +362,67 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum) static int hns_mdio_reset(struct mii_bus *bus) { struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv; + const struct hns_mdio_sc_reg *sc_reg; int ret; - if (!mdio_dev->subctrl_vbase) { - dev_err(&bus->dev, "mdio sys ctl reg has not maped\n"); - return -ENODEV; - } - - /*1. reset req, and read reset st check*/ - ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_REQ, 0x1, - MDIO_SC_RESET_ST, 0x1, - MDIO_CHECK_SET_ST); - if (ret) { - dev_err(&bus->dev, "MDIO reset fail\n"); - return ret; - } + if (dev_of_node(bus->parent)) { + if (!mdio_dev->subctrl_vbase) { + dev_err(&bus->dev, "mdio sys ctl reg has not maped\n"); + return -ENODEV; + } - /*2. dis clk, and read clk st check*/ - ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_DIS, - 0x1, MDIO_SC_CLK_ST, 0x1, - MDIO_CHECK_CLR_ST); - if (ret) { - dev_err(&bus->dev, "MDIO dis clk fail\n"); - return ret; - } + sc_reg = &mdio_dev->sc_reg; + /* 1. reset req, and read reset st check */ + ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_reset_req, + 0x1, sc_reg->mdio_reset_st, 0x1, + MDIO_CHECK_SET_ST); + if (ret) { + dev_err(&bus->dev, "MDIO reset fail\n"); + return ret; + } - /*3. reset dreq, and read reset st check*/ - ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_DREQ, 0x1, - MDIO_SC_RESET_ST, 0x1, - MDIO_CHECK_CLR_ST); - if (ret) { - dev_err(&bus->dev, "MDIO dis clk fail\n"); - return ret; - } + /* 2. dis clk, and read clk st check */ + ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_clk_dis, + 0x1, sc_reg->mdio_clk_st, 0x1, + MDIO_CHECK_CLR_ST); + if (ret) { + dev_err(&bus->dev, "MDIO dis clk fail\n"); + return ret; + } - /*4. en clk, and read clk st check*/ - ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_EN, - 0x1, MDIO_SC_CLK_ST, 0x1, - MDIO_CHECK_SET_ST); - if (ret) - dev_err(&bus->dev, "MDIO en clk fail\n"); + /* 3. reset dreq, and read reset st check */ + ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_reset_dreq, + 0x1, sc_reg->mdio_reset_st, 0x1, + MDIO_CHECK_CLR_ST); + if (ret) { + dev_err(&bus->dev, "MDIO dis clk fail\n"); + return ret; + } + /* 4. en clk, and read clk st check */ + ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_clk_en, + 0x1, sc_reg->mdio_clk_st, 0x1, + MDIO_CHECK_SET_ST); + if (ret) + dev_err(&bus->dev, "MDIO en clk fail\n"); + } else if (is_acpi_node(bus->parent->fwnode)) { + acpi_status s; + + s = acpi_evaluate_object(ACPI_HANDLE(bus->parent), + "_RST", NULL, NULL); + if (ACPI_FAILURE(s)) { + dev_err(&bus->dev, "Reset failed, return:%#x\n", s); + ret = -EBUSY; + } else { + ret = 0; + } + } else { + dev_err(&bus->dev, "Can not get cfg data from DT or ACPI\n"); + ret = -ENXIO; + } return ret; } -/** - * hns_mdio_bus_name - get mdio bus name - * @name: mdio bus name - * @np: mdio device node pointer - */ -static void hns_mdio_bus_name(char *name, struct device_node *np) -{ - const u32 *addr; - u64 taddr = OF_BAD_ADDR; - - addr = of_get_address(np, 0, NULL, NULL); - if (addr) - taddr = of_translate_address(np, addr); - - snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name, - (unsigned long long)taddr); -} - /** * hns_mdio_probe - probe mdio device * @pdev: mdio platform device @@ -422,17 +431,16 @@ static void hns_mdio_bus_name(char *name, struct device_node *np) */ static int hns_mdio_probe(struct platform_device *pdev) { - struct device_node *np; struct hns_mdio_device *mdio_dev; struct mii_bus *new_bus; struct resource *res; - int ret; + int ret = -ENODEV; if (!pdev) { dev_err(NULL, "pdev is NULL!\r\n"); return -ENODEV; } - np = pdev->dev.of_node; + mdio_dev = devm_kzalloc(&pdev->dev, sizeof(*mdio_dev), GFP_KERNEL); if (!mdio_dev) return -ENOMEM; @@ -448,7 +456,7 @@ static int hns_mdio_probe(struct platform_device *pdev) new_bus->write = hns_mdio_write; new_bus->reset = hns_mdio_reset; new_bus->priv = mdio_dev; - hns_mdio_bus_name(new_bus->id, np); + new_bus->parent = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mdio_dev->vbase = devm_ioremap_resource(&pdev->dev, res); @@ -457,16 +465,73 @@ static int hns_mdio_probe(struct platform_device *pdev) return ret; } - mdio_dev->subctrl_vbase = - syscon_node_to_regmap(of_parse_phandle(np, "subctrl-vbase", 0)); - if (IS_ERR(mdio_dev->subctrl_vbase)) { - dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n"); - mdio_dev->subctrl_vbase = NULL; - } - new_bus->parent = &pdev->dev; platform_set_drvdata(pdev, new_bus); + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%s", "Mii", + dev_name(&pdev->dev)); + if (dev_of_node(&pdev->dev)) { + struct of_phandle_args reg_args; + + ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, + "subctrl-vbase", + 4, + 0, + ®_args); + if (!ret) { + mdio_dev->subctrl_vbase = + syscon_node_to_regmap(reg_args.np); + if (IS_ERR(mdio_dev->subctrl_vbase)) { + dev_warn(&pdev->dev, "syscon_node_to_regmap error\n"); + mdio_dev->subctrl_vbase = NULL; + } else { + if (reg_args.args_count == 4) { + mdio_dev->sc_reg.mdio_clk_en = + (u16)reg_args.args[0]; + mdio_dev->sc_reg.mdio_clk_dis = + (u16)reg_args.args[0] + 4; + mdio_dev->sc_reg.mdio_reset_req = + (u16)reg_args.args[1]; + mdio_dev->sc_reg.mdio_reset_dreq = + (u16)reg_args.args[1] + 4; + mdio_dev->sc_reg.mdio_clk_st = + (u16)reg_args.args[2]; + mdio_dev->sc_reg.mdio_reset_st = + (u16)reg_args.args[3]; + } else { + /* for compatible */ + mdio_dev->sc_reg.mdio_clk_en = + MDIO_SC_CLK_EN; + mdio_dev->sc_reg.mdio_clk_dis = + MDIO_SC_CLK_DIS; + mdio_dev->sc_reg.mdio_reset_req = + MDIO_SC_RESET_REQ; + mdio_dev->sc_reg.mdio_reset_dreq = + MDIO_SC_RESET_DREQ; + mdio_dev->sc_reg.mdio_clk_st = + MDIO_SC_CLK_ST; + mdio_dev->sc_reg.mdio_reset_st = + MDIO_SC_RESET_ST; + } + } + } else { + dev_warn(&pdev->dev, "find syscon ret = %#x\n", ret); + mdio_dev->subctrl_vbase = NULL; + } + + ret = of_mdiobus_register(new_bus, pdev->dev.of_node); + } else if (is_acpi_node(pdev->dev.fwnode)) { + /* Clear all the IRQ properties */ + memset(new_bus->irq, PHY_POLL, 4 * PHY_MAX_ADDR); + + /* Mask out all PHYs from auto probing. */ + new_bus->phy_mask = ~0; + + /* Register the MDIO bus */ + ret = mdiobus_register(new_bus); + } else { + dev_err(&pdev->dev, "Can not get cfg data from DT or ACPI\n"); + ret = -ENXIO; + } - ret = of_mdiobus_register(new_bus, np); if (ret) { dev_err(&pdev->dev, "Cannot register as MDIO bus!\n"); platform_set_drvdata(pdev, NULL); @@ -499,12 +564,19 @@ static const struct of_device_id hns_mdio_match[] = { {} }; +static const struct acpi_device_id hns_mdio_acpi_match[] = { + { "HISI0141", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, hns_mdio_acpi_match); + static struct platform_driver hns_mdio_driver = { .probe = hns_mdio_probe, .remove = hns_mdio_remove, .driver = { .name = MDIO_DRV_NAME, .of_match_table = hns_mdio_match, + .acpi_match_table = ACPI_PTR(hns_mdio_acpi_match), }, }; diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 4c9771d57..7af09cbc5 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -977,7 +977,37 @@ static void emac_set_multicast_list(struct net_device *ndev) dev->mcast_pending = 1; return; } + + mutex_lock(&dev->link_lock); __emac_set_multicast_list(dev); + mutex_unlock(&dev->link_lock); +} + +static int emac_set_mac_address(struct net_device *ndev, void *sa) +{ + struct emac_instance *dev = netdev_priv(ndev); + struct sockaddr *addr = sa; + struct emac_regs __iomem *p = dev->emacp; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + mutex_lock(&dev->link_lock); + + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + + emac_rx_disable(dev); + emac_tx_disable(dev); + out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]); + out_be32(&p->ialr, (ndev->dev_addr[2] << 24) | + (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) | + ndev->dev_addr[5]); + emac_tx_enable(dev); + emac_rx_enable(dev); + + mutex_unlock(&dev->link_lock); + + return 0; } static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu) @@ -2686,7 +2716,7 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_do_ioctl = emac_ioctl, .ndo_tx_timeout = emac_tx_timeout, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = emac_set_mac_address, .ndo_start_xmit = emac_start_xmit, .ndo_change_mtu = eth_change_mtu, }; @@ -2699,7 +2729,7 @@ static const struct net_device_ops emac_gige_netdev_ops = { .ndo_do_ioctl = emac_ioctl, .ndo_tx_timeout = emac_tx_timeout, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = emac_set_mac_address, .ndo_start_xmit = emac_start_xmit_sg, .ndo_change_mtu = emac_change_mtu, }; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 714bd1014..c0e17433f 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -167,17 +167,6 @@ config IXGBE To compile this driver as a module, choose M here. The module will be called ixgbe. -config IXGBE_VXLAN - bool "Virtual eXtensible Local Area Network Support" - default n - depends on IXGBE && VXLAN && !(IXGBE=y && VXLAN=m) - ---help--- - This allows one to create VXLAN virtual interfaces that provide - Layer 2 Networks over Layer 3 Networks. VXLAN is often used - to tunnel virtual network infrastructure in virtualized environments. - Say Y here if you want to use Virtual eXtensible Local Area Network - (VXLAN) in the driver. - config IXGBE_HWMON bool "Intel(R) 10GbE PCI Express adapters HWMON support" default y @@ -236,27 +225,6 @@ config I40E To compile this driver as a module, choose M here. The module will be called i40e. -config I40E_VXLAN - bool "Virtual eXtensible Local Area Network Support" - default n - depends on I40E && VXLAN && !(I40E=y && VXLAN=m) - ---help--- - This allows one to create VXLAN virtual interfaces that provide - Layer 2 Networks over Layer 3 Networks. VXLAN is often used - to tunnel virtual network infrastructure in virtualized environments. - Say Y here if you want to use Virtual eXtensible Local Area Network - (VXLAN) in the driver. - -config I40E_GENEVE - bool "Generic Network Virtualization Encapsulation (GENEVE) Support" - depends on I40E && GENEVE && !(I40E=y && GENEVE=m) - default n - ---help--- - This allows one to create GENEVE virtual interfaces that provide - Layer 2 Networks over Layer 3 Networks. GENEVE is often used - to tunnel virtual network infrastructure in virtualized environments. - Say Y here if you want to use GENEVE in the driver. - config I40E_DCB bool "Data Center Bridging (DCB) Support" default n @@ -307,15 +275,4 @@ config FM10K To compile this driver as a module, choose M here. The module will be called fm10k. MSI-X interrupt support is required -config FM10K_VXLAN - bool "Virtual eXtensible Local Area Network Support" - default n - depends on FM10K && VXLAN && !(FM10K=y && VXLAN=m) - ---help--- - This allows one to create VXLAN virtual interfaces that provide - Layer 2 Networks over Layer 3 Networks. VXLAN is often used - to tunnel virtual network infrastructure in virtualized environments. - Say Y here if you want to use Virtual eXtensible Local Area Network - (VXLAN) in the driver. - endif # NET_VENDOR_INTEL diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 7fd4d5459..6b03c8553 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -2032,7 +2032,8 @@ const struct e1000_info e1000_82574_info = { | FLAG2_DISABLE_ASPM_L0S | FLAG2_DISABLE_ASPM_L1 | FLAG2_NO_DISABLE_RX - | FLAG2_DMA_BURST, + | FLAG2_DMA_BURST + | FLAG2_CHECK_SYSTIM_OVERFLOW, .pba = 32, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, @@ -2053,7 +2054,8 @@ const struct e1000_info e1000_82583_info = { | FLAG_HAS_CTRLEXT_ON_LOAD, .flags2 = FLAG2_DISABLE_ASPM_L0S | FLAG2_DISABLE_ASPM_L1 - | FLAG2_NO_DISABLE_RX, + | FLAG2_NO_DISABLE_RX + | FLAG2_CHECK_SYSTIM_OVERFLOW, .pba = 32, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index ef96cd11d..879cca47b 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -452,6 +452,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); #define FLAG2_PCIM2PCI_ARBITER_WA BIT(11) #define FLAG2_DFLT_CRC_STRIPPING BIT(12) #define FLAG2_CHECK_RX_HWTSTAMP BIT(13) +#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14) #define E1000_RX_DESC_PS(R, i) \ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 3e11322d8..f3aaca743 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -5885,7 +5885,8 @@ const struct e1000_info e1000_pch_lpt_info = { | FLAG_HAS_JUMBO_FRAMES | FLAG_APME_IN_WUC, .flags2 = FLAG2_HAS_PHY_STATS - | FLAG2_HAS_EEE, + | FLAG2_HAS_EEE + | FLAG2_CHECK_SYSTIM_OVERFLOW, .pba = 26, .max_hw_frame_size = 9022, .get_variants = e1000_get_variants_ich8lan, diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 2b2e2f8c6..7017281ba 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4302,6 +4302,42 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter) clear_bit(__E1000_RESETTING, &adapter->state); } +/** + * e1000e_sanitize_systim - sanitize raw cycle counter reads + * @hw: pointer to the HW structure + * @systim: cycle_t value read, sanitized and returned + * + * Errata for 82574/82583 possible bad bits read from SYSTIMH/L: + * check to see that the time is incrementing at a reasonable + * rate and is a multiple of incvalue. + **/ +static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim) +{ + u64 time_delta, rem, temp; + cycle_t systim_next; + u32 incvalue; + int i; + + incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; + for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { + /* latch SYSTIMH on read of SYSTIML */ + systim_next = (cycle_t)er32(SYSTIML); + systim_next |= (cycle_t)er32(SYSTIMH) << 32; + + time_delta = systim_next - systim; + temp = time_delta; + /* VMWare users have seen incvalue of zero, don't div / 0 */ + rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0); + + systim = systim_next; + + if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0)) + break; + } + + return systim; +} + /** * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) * @cc: cyclecounter structure @@ -4312,7 +4348,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) cc); struct e1000_hw *hw = &adapter->hw; u32 systimel, systimeh; - cycle_t systim, systim_next; + cycle_t systim; /* SYSTIMH latching upon SYSTIML read does not work well. * This means that if SYSTIML overflows after we read it but before * we read SYSTIMH, the value of SYSTIMH has been incremented and we @@ -4335,32 +4371,9 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) systim = (cycle_t)systimel; systim |= (cycle_t)systimeh << 32; - if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { - u64 time_delta, rem, temp; - u32 incvalue; - int i; - - /* errata for 82574/82583 possible bad bits read from SYSTIMH/L - * check to see that the time is incrementing at a reasonable - * rate and is a multiple of incvalue - */ - incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; - for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { - /* latch SYSTIMH on read of SYSTIML */ - systim_next = (cycle_t)er32(SYSTIML); - systim_next |= (cycle_t)er32(SYSTIMH) << 32; - - time_delta = systim_next - systim; - temp = time_delta; - rem = do_div(temp, incvalue); - - systim = systim_next; + if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) + systim = e1000e_sanitize_systim(hw, systim); - if ((time_delta < E1000_82574_SYSTIM_EPSILON) && - (rem == 0)) - break; - } - } return systim; } @@ -7329,8 +7342,7 @@ err_flashmap: err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -7397,8 +7409,7 @@ static void e1000_remove(struct pci_dev *pdev) if ((adapter->hw.flash_address) && (adapter->hw.mac.type < e1000_pch_spt)) iounmap(adapter->hw.flash_address); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); free_netdev(netdev); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index fcf106e54..c4cf08dcf 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -362,6 +362,7 @@ enum fm10k_state_t { __FM10K_SERVICE_DISABLE, __FM10K_MBX_LOCK, __FM10K_LINK_DOWN, + __FM10K_UPDATING_STATS, }; static inline void fm10k_mbx_lock(struct fm10k_intfc *interface) @@ -406,7 +407,7 @@ static inline u16 fm10k_desc_unused(struct fm10k_ring *ring) (&(((union fm10k_rx_desc *)((R)->desc))[i])) #define FM10K_MAX_TXD_PWR 14 -#define FM10K_MAX_DATA_PER_TXD BIT(FM10K_MAX_TXD_PWR) +#define FM10K_MAX_DATA_PER_TXD (1u << FM10K_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD) @@ -457,6 +458,7 @@ __be16 fm10k_tx_encap_offload(struct sk_buff *skb); netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, struct fm10k_ring *tx_ring); void fm10k_tx_timeout_reset(struct fm10k_intfc *interface); +u64 fm10k_get_tx_pending(struct fm10k_ring *ring); bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring); void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c index 5bbf19cfe..d6baaea8b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -519,8 +519,12 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready) goto out; /* interface cannot receive traffic without logical ports */ - if (mac->dglort_map == FM10K_DGLORTMAP_NONE) + if (mac->dglort_map == FM10K_DGLORTMAP_NONE) { + if (hw->mac.ops.request_lport_map) + ret_val = hw->mac.ops.request_lport_map(hw); + goto out; + } /* if we passed all the tests above then the switch is ready and we no * longer need to check for link diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 9c0d87503..c04cbe9c9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -76,6 +76,8 @@ static const struct fm10k_stats fm10k_gstrings_global_stats[] = { FM10K_STAT("mac_rules_used", hw.swapi.mac.used), FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail), + FM10K_STAT("reset_while_pending", hw.mac.reset_while_pending), + FM10K_STAT("tx_hang_count", tx_timeout_count), }; @@ -983,9 +985,10 @@ void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir) /* generate a new table if we weren't given one */ for (j = 0; j < 4; j++) { if (indir) - n = indir[i + j]; + n = indir[4 * i + j]; else - n = ethtool_rxfh_indir_default(i + j, rss_i); + n = ethtool_rxfh_indir_default(4 * i + j, + rss_i); table[j] = n; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 0e166e9c9..e9767b636 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -28,7 +28,7 @@ #include "fm10k.h" -#define DRV_VERSION "0.19.3-k" +#define DRV_VERSION "0.21.2-k" #define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver" const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; @@ -56,7 +56,7 @@ static int __init fm10k_init_module(void) pr_info("%s\n", fm10k_copyright); /* create driver workqueue */ - fm10k_workqueue = create_workqueue("fm10k"); + fm10k_workqueue = alloc_workqueue("fm10k", WQ_MEM_RECLAIM, 0); fm10k_dbg_init(); @@ -77,7 +77,6 @@ static void __exit fm10k_exit_module(void) fm10k_dbg_exit(); /* destroy driver workqueue */ - flush_workqueue(fm10k_workqueue); destroy_workqueue(fm10k_workqueue); } module_exit(fm10k_exit_module); @@ -272,7 +271,7 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, #if (PAGE_SIZE < 8192) unsigned int truesize = FM10K_RX_BUFSZ; #else - unsigned int truesize = SKB_DATA_ALIGN(size); + unsigned int truesize = ALIGN(size, 512); #endif unsigned int pull_len; @@ -1129,11 +1128,13 @@ static u64 fm10k_get_tx_completed(struct fm10k_ring *ring) return ring->stats.packets; } -static u64 fm10k_get_tx_pending(struct fm10k_ring *ring) +u64 fm10k_get_tx_pending(struct fm10k_ring *ring) { - /* use SW head and tail until we have real hardware */ - u32 head = ring->next_to_clean; - u32 tail = ring->next_to_use; + struct fm10k_intfc *interface = ring->q_vector->interface; + struct fm10k_hw *hw = &interface->hw; + + u32 head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx)); + u32 tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx)); return ((head <= tail) ? tail : tail + ring->count) - head; } @@ -1857,7 +1858,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface) if (v_budget < 0) { kfree(interface->msix_entries); interface->msix_entries = NULL; - return -ENOMEM; + return v_budget; } /* record the number of queues available for q_vectors */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h index b7dbc8a84..35c1dbad1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h @@ -41,6 +41,8 @@ struct fm10k_mbx_info; #define FM10K_MBX_ACK_INTERRUPT 0x00000010 #define FM10K_MBX_INTERRUPT_ENABLE 0x00000020 #define FM10K_MBX_INTERRUPT_DISABLE 0x00000040 +#define FM10K_MBX_GLOBAL_REQ_INTERRUPT 0x00000200 +#define FM10K_MBX_GLOBAL_ACK_INTERRUPT 0x00000400 #define FM10K_MBICR(_n) ((_n) + 0x18840) #define FM10K_GMBX 0x18842 diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 2a08d3f5b..20a5bbe3f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -20,9 +20,7 @@ #include "fm10k.h" #include -#ifdef CONFIG_FM10K_VXLAN -#include -#endif /* CONFIG_FM10K_VXLAN */ +#include /** * fm10k_setup_tx_resources - allocate Tx resources (Descriptors) @@ -434,8 +432,7 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface) /** * fm10k_add_vxlan_port * @netdev: network interface device structure - * @sa_family: Address family of new port - * @port: port number used for VXLAN + * @ti: Tunnel endpoint information * * This function is called when a new VXLAN interface has added a new port * number to the range that is currently in use for VXLAN. The new port @@ -444,18 +441,21 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface) * is always used as the VXLAN port number for offloads. **/ static void fm10k_add_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) { + struct udp_tunnel_info *ti) +{ struct fm10k_intfc *interface = netdev_priv(dev); struct fm10k_vxlan_port *vxlan_port; + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; /* only the PF supports configuring tunnels */ if (interface->hw.mac.type != fm10k_mac_pf) return; /* existing ports are pulled out so our new entry is always last */ fm10k_vxlan_port_for_each(vxlan_port, interface) { - if ((vxlan_port->port == port) && - (vxlan_port->sa_family == sa_family)) { + if ((vxlan_port->port == ti->port) && + (vxlan_port->sa_family == ti->sa_family)) { list_del(&vxlan_port->list); goto insert_tail; } @@ -465,8 +465,8 @@ static void fm10k_add_vxlan_port(struct net_device *dev, vxlan_port = kmalloc(sizeof(*vxlan_port), GFP_ATOMIC); if (!vxlan_port) return; - vxlan_port->port = port; - vxlan_port->sa_family = sa_family; + vxlan_port->port = ti->port; + vxlan_port->sa_family = ti->sa_family; insert_tail: /* add new port value to list */ @@ -478,8 +478,7 @@ insert_tail: /** * fm10k_del_vxlan_port * @netdev: network interface device structure - * @sa_family: Address family of freed port - * @port: port number used for VXLAN + * @ti: Tunnel endpoint information * * This function is called when a new VXLAN interface has freed a port * number from the range that is currently in use for VXLAN. The freed @@ -487,17 +486,20 @@ insert_tail: * the port number for offloads. **/ static void fm10k_del_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) { + struct udp_tunnel_info *ti) +{ struct fm10k_intfc *interface = netdev_priv(dev); struct fm10k_vxlan_port *vxlan_port; + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; if (interface->hw.mac.type != fm10k_mac_pf) return; /* find the port in the list and free it */ fm10k_vxlan_port_for_each(vxlan_port, interface) { - if ((vxlan_port->port == port) && - (vxlan_port->sa_family == sa_family)) { + if ((vxlan_port->port == ti->port) && + (vxlan_port->sa_family == ti->sa_family)) { list_del(&vxlan_port->list); kfree(vxlan_port); break; @@ -553,10 +555,8 @@ int fm10k_open(struct net_device *netdev) if (err) goto err_set_queues; -#ifdef CONFIG_FM10K_VXLAN /* update VXLAN port configuration */ - vxlan_get_rx_port(netdev); -#endif + udp_tunnel_get_rx_info(netdev); fm10k_up(interface); @@ -1375,8 +1375,8 @@ static const struct net_device_ops fm10k_netdev_ops = { .ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan, .ndo_set_vf_rate = fm10k_ndo_set_vf_bw, .ndo_get_vf_config = fm10k_ndo_get_vf_config, - .ndo_add_vxlan_port = fm10k_add_vxlan_port, - .ndo_del_vxlan_port = fm10k_del_vxlan_port, + .ndo_udp_tunnel_add = fm10k_add_vxlan_port, + .ndo_udp_tunnel_del = fm10k_del_vxlan_port, .ndo_dfwd_add_station = fm10k_dfwd_add_station, .ndo_dfwd_del_station = fm10k_dfwd_del_station, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index e05aca9be..774a5654b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -123,11 +123,24 @@ static void fm10k_service_timer(unsigned long data) static void fm10k_detach_subtask(struct fm10k_intfc *interface) { struct net_device *netdev = interface->netdev; + u32 __iomem *hw_addr; + u32 value; /* do nothing if device is still present or hw_addr is set */ if (netif_device_present(netdev) || interface->hw.hw_addr) return; + /* check the real address space to see if we've recovered */ + hw_addr = READ_ONCE(interface->uc_addr); + value = readl(hw_addr); + if ((~value)) { + interface->hw.hw_addr = interface->uc_addr; + netif_device_attach(netdev); + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + netdev_warn(netdev, "PCIe link restored, device now attached\n"); + return; + } + rtnl_lock(); if (netif_running(netdev)) @@ -136,11 +149,9 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface) rtnl_unlock(); } -static void fm10k_reinit(struct fm10k_intfc *interface) +static void fm10k_prepare_for_reset(struct fm10k_intfc *interface) { struct net_device *netdev = interface->netdev; - struct fm10k_hw *hw = &interface->hw; - int err; WARN_ON(in_interrupt()); @@ -165,6 +176,19 @@ static void fm10k_reinit(struct fm10k_intfc *interface) /* delay any future reset requests */ interface->last_reset = jiffies + (10 * HZ); + rtnl_unlock(); +} + +static int fm10k_handle_reset(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + int err; + + rtnl_lock(); + + pci_set_master(interface->pdev); + /* reset and initialize the hardware so it is in a known state */ err = hw->mac.ops.reset_hw(hw); if (err) { @@ -185,7 +209,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface) goto reinit_err; } - /* reassociate interrupts */ + /* re-associate interrupts */ err = fm10k_mbx_request_irq(interface); if (err) goto err_mbx_irq; @@ -219,7 +243,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface) clear_bit(__FM10K_RESETTING, &interface->state); - return; + return err; err_open: fm10k_mbx_free_irq(interface); err_mbx_irq: @@ -230,6 +254,20 @@ reinit_err: rtnl_unlock(); clear_bit(__FM10K_RESETTING, &interface->state); + + return err; +} + +static void fm10k_reinit(struct fm10k_intfc *interface) +{ + int err; + + fm10k_prepare_for_reset(interface); + + err = fm10k_handle_reset(interface); + if (err) + dev_err(&interface->pdev->dev, + "fm10k_handle_reset failed: %d\n", err); } static void fm10k_reset_subtask(struct fm10k_intfc *interface) @@ -372,12 +410,19 @@ void fm10k_update_stats(struct fm10k_intfc *interface) u64 bytes, pkts; int i; + /* ensure only one thread updates stats at a time */ + if (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state)) + return; + /* do not allow stats update via service task for next second */ interface->next_stats_update = jiffies + HZ; /* gather some stats to the interface struct that are per queue */ for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) { - struct fm10k_ring *tx_ring = interface->tx_ring[i]; + struct fm10k_ring *tx_ring = READ_ONCE(interface->tx_ring[i]); + + if (!tx_ring) + continue; restart_queue += tx_ring->tx_stats.restart_queue; tx_busy += tx_ring->tx_stats.tx_busy; @@ -396,7 +441,10 @@ void fm10k_update_stats(struct fm10k_intfc *interface) /* gather some stats to the interface struct that are per queue */ for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) { - struct fm10k_ring *rx_ring = interface->rx_ring[i]; + struct fm10k_ring *rx_ring = READ_ONCE(interface->rx_ring[i]); + + if (!rx_ring) + continue; bytes += rx_ring->stats.bytes; pkts += rx_ring->stats.packets; @@ -443,6 +491,8 @@ void fm10k_update_stats(struct fm10k_intfc *interface) /* Fill out the OS statistics structure */ net_stats->rx_errors = rx_errors; net_stats->rx_dropped = interface->stats.nodesc_drop.count; + + clear_bit(__FM10K_UPDATING_STATS, &interface->state); } /** @@ -1566,6 +1616,9 @@ void fm10k_up(struct fm10k_intfc *interface) /* configure interrupts */ hw->mac.ops.update_int_moderator(hw); + /* enable statistics capture again */ + clear_bit(__FM10K_UPDATING_STATS, &interface->state); + /* clear down bit to indicate we are ready to go */ clear_bit(__FM10K_DOWN, &interface->state); @@ -1598,10 +1651,11 @@ void fm10k_down(struct fm10k_intfc *interface) { struct net_device *netdev = interface->netdev; struct fm10k_hw *hw = &interface->hw; - int err; + int err, i = 0, count = 0; /* signal that we are down to the interrupt handler and service task */ - set_bit(__FM10K_DOWN, &interface->state); + if (test_and_set_bit(__FM10K_DOWN, &interface->state)) + return; /* call carrier off first to avoid false dev_watchdog timeouts */ netif_carrier_off(netdev); @@ -1613,18 +1667,57 @@ void fm10k_down(struct fm10k_intfc *interface) /* reset Rx filters */ fm10k_reset_rx_state(interface); - /* allow 10ms for device to quiesce */ - usleep_range(10000, 20000); - /* disable polling routines */ fm10k_napi_disable_all(interface); /* capture stats one last time before stopping interface */ fm10k_update_stats(interface); + /* prevent updating statistics while we're down */ + while (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state)) + usleep_range(1000, 2000); + + /* skip waiting for TX DMA if we lost PCIe link */ + if (FM10K_REMOVED(hw->hw_addr)) + goto skip_tx_dma_drain; + + /* In some rare circumstances it can take a while for Tx queues to + * quiesce and be fully disabled. Attempt to .stop_hw() first, and + * then if we get ERR_REQUESTS_PENDING, go ahead and wait in a loop + * until the Tx queues have emptied, or until a number of retries. If + * we fail to clear within the retry loop, we will issue a warning + * indicating that Tx DMA is probably hung. Note this means we call + * .stop_hw() twice but this shouldn't cause any problems. + */ + err = hw->mac.ops.stop_hw(hw); + if (err != FM10K_ERR_REQUESTS_PENDING) + goto skip_tx_dma_drain; + +#define TX_DMA_DRAIN_RETRIES 25 + for (count = 0; count < TX_DMA_DRAIN_RETRIES; count++) { + usleep_range(10000, 20000); + + /* start checking at the last ring to have pending Tx */ + for (; i < interface->num_tx_queues; i++) + if (fm10k_get_tx_pending(interface->tx_ring[i])) + break; + + /* if all the queues are drained, we can break now */ + if (i == interface->num_tx_queues) + break; + } + + if (count >= TX_DMA_DRAIN_RETRIES) + dev_err(&interface->pdev->dev, + "Tx queues failed to drain after %d tries. Tx DMA is probably hung.\n", + count); +skip_tx_dma_drain: /* Disable DMA engine for Tx/Rx */ err = hw->mac.ops.stop_hw(hw); - if (err) + if (err == FM10K_ERR_REQUESTS_PENDING) + dev_err(&interface->pdev->dev, + "due to pending requests hw was not shut down gracefully\n"); + else if (err) dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err); /* free any buffers still on the rings */ @@ -1750,6 +1843,7 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, /* Start off interface as being down */ set_bit(__FM10K_DOWN, &interface->state); + set_bit(__FM10K_UPDATING_STATS, &interface->state); return 0; } @@ -1869,10 +1963,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_dma; } - err = pci_request_selected_regions(pdev, - pci_select_bars(pdev, - IORESOURCE_MEM), - fm10k_driver_name); + err = pci_request_mem_regions(pdev, fm10k_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_selected_regions failed: %d\n", err); @@ -1976,8 +2067,7 @@ err_sw_init: err_ioremap: free_netdev(netdev); err_alloc_netdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -2025,14 +2115,55 @@ static void fm10k_remove(struct pci_dev *pdev) free_netdev(netdev); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); } +static void fm10k_prepare_suspend(struct fm10k_intfc *interface) +{ + /* the watchdog task reads from registers, which might appear like + * a surprise remove if the PCIe device is disabled while we're + * stopped. We stop the watchdog task until after we resume software + * activity. + */ + set_bit(__FM10K_SERVICE_DISABLE, &interface->state); + cancel_work_sync(&interface->service_task); + + fm10k_prepare_for_reset(interface); +} + +static int fm10k_handle_resume(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + int err; + + /* reset statistics starting values */ + hw->mac.ops.rebind_hw_stats(hw, &interface->stats); + + err = fm10k_handle_reset(interface); + if (err) + return err; + + /* assume host is not ready, to prevent race with watchdog in case we + * actually don't have connection to the switch + */ + interface->host_ready = false; + fm10k_watchdog_host_not_ready(interface); + + /* force link to stay down for a second to prevent link flutter */ + interface->link_down_event = jiffies + (HZ); + set_bit(__FM10K_LINK_DOWN, &interface->state); + + /* clear the service task disable bit to allow service task to start */ + clear_bit(__FM10K_SERVICE_DISABLE, &interface->state); + fm10k_service_event_schedule(interface); + + return err; +} + #ifdef CONFIG_PM /** * fm10k_resume - Restore device to pre-sleep state @@ -2069,60 +2200,13 @@ static int fm10k_resume(struct pci_dev *pdev) /* refresh hw_addr in case it was dropped */ hw->hw_addr = interface->uc_addr; - /* reset hardware to known state */ - err = hw->mac.ops.init_hw(&interface->hw); - if (err) { - dev_err(&pdev->dev, "init_hw failed: %d\n", err); - return err; - } - - /* reset statistics starting values */ - hw->mac.ops.rebind_hw_stats(hw, &interface->stats); - - rtnl_lock(); - - err = fm10k_init_queueing_scheme(interface); - if (err) - goto err_queueing_scheme; - - err = fm10k_mbx_request_irq(interface); - if (err) - goto err_mbx_irq; - - err = fm10k_hw_ready(interface); + err = fm10k_handle_resume(interface); if (err) - goto err_open; - - err = netif_running(netdev) ? fm10k_open(netdev) : 0; - if (err) - goto err_open; - - rtnl_unlock(); - - /* assume host is not ready, to prevent race with watchdog in case we - * actually don't have connection to the switch - */ - interface->host_ready = false; - fm10k_watchdog_host_not_ready(interface); - - /* clear the service task disable bit to allow service task to start */ - clear_bit(__FM10K_SERVICE_DISABLE, &interface->state); - fm10k_service_event_schedule(interface); - - /* restore SR-IOV interface */ - fm10k_iov_resume(pdev); + return err; netif_device_attach(netdev); return 0; -err_open: - fm10k_mbx_free_irq(interface); -err_mbx_irq: - fm10k_clear_queueing_scheme(interface); -err_queueing_scheme: - rtnl_unlock(); - - return err; } /** @@ -2142,27 +2226,7 @@ static int fm10k_suspend(struct pci_dev *pdev, netif_device_detach(netdev); - fm10k_iov_suspend(pdev); - - /* the watchdog tasks may read registers, which will appear like a - * surprise-remove event once the PCI device is disabled. This will - * cause us to close the netdevice, so we don't retain the open/closed - * state post-resume. Prevent this by disabling the service task while - * suspended, until we actually resume. - */ - set_bit(__FM10K_SERVICE_DISABLE, &interface->state); - cancel_work_sync(&interface->service_task); - - rtnl_lock(); - - if (netif_running(netdev)) - fm10k_close(netdev); - - fm10k_mbx_free_irq(interface); - - fm10k_clear_queueing_scheme(interface); - - rtnl_unlock(); + fm10k_prepare_suspend(interface); err = pci_save_state(pdev); if (err) @@ -2195,17 +2259,7 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; - rtnl_lock(); - - if (netif_running(netdev)) - fm10k_close(netdev); - - fm10k_mbx_free_irq(interface); - - /* free interrupts */ - fm10k_clear_queueing_scheme(interface); - - rtnl_unlock(); + fm10k_prepare_suspend(interface); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; @@ -2219,7 +2273,6 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, */ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev) { - struct fm10k_intfc *interface = pci_get_drvdata(pdev); pci_ers_result_t result; if (pci_enable_device_mem(pdev)) { @@ -2237,12 +2290,6 @@ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev) pci_wake_from_d3(pdev, false); - /* refresh hw_addr in case it was dropped */ - interface->hw.hw_addr = interface->uc_addr; - - interface->flags |= FM10K_FLAG_RESET_REQUESTED; - fm10k_service_event_schedule(interface); - result = PCI_ERS_RESULT_RECOVERED; } @@ -2262,50 +2309,54 @@ static void fm10k_io_resume(struct pci_dev *pdev) { struct fm10k_intfc *interface = pci_get_drvdata(pdev); struct net_device *netdev = interface->netdev; - struct fm10k_hw *hw = &interface->hw; - int err = 0; - - /* reset hardware to known state */ - err = hw->mac.ops.init_hw(&interface->hw); - if (err) { - dev_err(&pdev->dev, "init_hw failed: %d\n", err); - return; - } - - /* reset statistics starting values */ - hw->mac.ops.rebind_hw_stats(hw, &interface->stats); - - rtnl_lock(); + int err; - err = fm10k_init_queueing_scheme(interface); - if (err) { - dev_err(&interface->pdev->dev, - "init_queueing_scheme failed: %d\n", err); - goto unlock; - } + err = fm10k_handle_resume(interface); - /* reassociate interrupts */ - fm10k_mbx_request_irq(interface); + if (err) + dev_warn(&pdev->dev, + "fm10k_io_resume failed: %d\n", err); + else + netif_device_attach(netdev); +} - rtnl_lock(); - if (netif_running(netdev)) - err = fm10k_open(netdev); - rtnl_unlock(); +/** + * fm10k_io_reset_notify - called when PCI function is reset + * @pdev: Pointer to PCI device + * + * This callback is called when the PCI function is reset such as from + * /sys/class/net//device/reset or similar. When prepare is true, it + * means we should prepare for a function reset. If prepare is false, it means + * the function reset just occurred. + */ +static void fm10k_io_reset_notify(struct pci_dev *pdev, bool prepare) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + int err = 0; - /* final check of hardware state before registering the interface */ - err = err ? : fm10k_hw_ready(interface); + if (prepare) { + /* warn incase we have any active VF devices */ + if (pci_num_vf(pdev)) + dev_warn(&pdev->dev, + "PCIe FLR may cause issues for any active VF devices\n"); - if (!err) - netif_device_attach(netdev); + fm10k_prepare_suspend(interface); + } else { + err = fm10k_handle_resume(interface); + } -unlock: - rtnl_unlock(); + if (err) { + dev_warn(&pdev->dev, + "fm10k_io_reset_notify failed: %d\n", err); + netif_device_detach(interface->netdev); + } } static const struct pci_error_handlers fm10k_err_handler = { .error_detected = fm10k_io_error_detected, .slot_reset = fm10k_io_slot_reset, .resume = fm10k_io_resume, + .reset_notify = fm10k_io_reset_notify, }; static struct pci_driver fm10k_driver = { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index dc75507c9..682299dd0 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -51,34 +51,37 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw) /* shut down all rings */ err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES); - if (err) + if (err == FM10K_ERR_REQUESTS_PENDING) { + hw->mac.reset_while_pending++; + goto force_reset; + } else if (err) { return err; + } /* Verify that DMA is no longer active */ reg = fm10k_read_reg(hw, FM10K_DMA_CTRL); if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE)) return FM10K_ERR_DMA_PENDING; - /* verify the switch is ready for reset */ - reg = fm10k_read_reg(hw, FM10K_DMA_CTRL2); - if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY)) - goto out; - +force_reset: /* Inititate data path reset */ - reg |= FM10K_DMA_CTRL_DATAPATH_RESET; + reg = FM10K_DMA_CTRL_DATAPATH_RESET; fm10k_write_reg(hw, FM10K_DMA_CTRL, reg); /* Flush write and allow 100us for reset to complete */ fm10k_write_flush(hw); udelay(FM10K_RESET_TIMEOUT); + /* Reset mailbox global interrupts */ + reg = FM10K_MBX_GLOBAL_REQ_INTERRUPT | FM10K_MBX_GLOBAL_ACK_INTERRUPT; + fm10k_write_reg(hw, FM10K_GMBX, reg); + /* Verify we made it out of reset */ reg = fm10k_read_reg(hw, FM10K_IP); if (!(reg & FM10K_IP_NOTINRESET)) - err = FM10K_ERR_RESET_FAILED; + return FM10K_ERR_RESET_FAILED; -out: - return err; + return 0; } /** @@ -1619,25 +1622,15 @@ static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw) **/ static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready) { - s32 ret_val = 0; u32 dma_ctrl2; /* verify the switch is ready for interaction */ dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2); if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY)) - goto out; + return 0; /* retrieve generic host state info */ - ret_val = fm10k_get_host_state_generic(hw, switch_ready); - if (ret_val) - goto out; - - /* interface cannot receive traffic without logical ports */ - if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE) - ret_val = fm10k_request_lport_map_pf(hw); - -out: - return ret_val; + return fm10k_get_host_state_generic(hw, switch_ready); } /* This structure defines the attibutes to be parsed below */ @@ -1813,6 +1806,7 @@ static const struct fm10k_mac_ops mac_ops_pf = { .set_dma_mask = fm10k_set_dma_mask_pf, .get_fault = fm10k_get_fault_pf, .get_host_state = fm10k_get_host_state_pf, + .request_lport_map = fm10k_request_lport_map_pf, }; static const struct fm10k_iov_ops iov_ops_pf = { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index b8bc06183..f4e75c498 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -526,6 +526,7 @@ struct fm10k_mac_ops { s32 (*stop_hw)(struct fm10k_hw *); s32 (*get_bus_info)(struct fm10k_hw *); s32 (*get_host_state)(struct fm10k_hw *, bool *); + s32 (*request_lport_map)(struct fm10k_hw *); s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool); s32 (*read_mac_addr)(struct fm10k_hw *); s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *, @@ -562,6 +563,7 @@ struct fm10k_mac_info { bool tx_ready; u32 dglort_map; u8 itr_scale; + u64 reset_while_pending; }; struct fm10k_swapi_table_info { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index 3b06685ea..337ba65a9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -34,7 +34,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) /* we need to disable the queues before taking further steps */ err = fm10k_stop_hw_generic(hw); - if (err) + if (err && err != FM10K_ERR_REQUESTS_PENDING) return err; /* If permanent address is set then we need to restore it */ @@ -67,7 +67,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) fm10k_write_reg(hw, FM10K_TDLEN(i), tdlen); } - return 0; + return err; } /** @@ -83,7 +83,9 @@ static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw) /* shut down queues we own and reset DMA configuration */ err = fm10k_stop_hw_vf(hw); - if (err) + if (err == FM10K_ERR_REQUESTS_PENDING) + hw->mac.reset_while_pending++; + else if (err) return err; /* Inititate VF reset */ @@ -96,9 +98,9 @@ static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw) /* Clear reset bit and verify it was cleared */ fm10k_write_reg(hw, FM10K_VFCTRL, 0); if (fm10k_read_reg(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST) - err = FM10K_ERR_RESET_FAILED; + return FM10K_ERR_RESET_FAILED; - return err; + return 0; } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 9c44739da..2a882916b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -283,6 +283,7 @@ struct i40e_pf { #endif /* I40E_FCOE */ u16 num_lan_qps; /* num lan queues this PF has set up */ u16 num_lan_msix; /* num queue vectors for the base PF vsi */ + u16 num_fdsb_msix; /* num queue vectors for sideband Fdir */ u16 num_iwarp_msix; /* num of iwarp vectors for this PF */ int iwarp_base_vector; int queues_left; /* queues left unclaimed */ @@ -447,6 +448,14 @@ struct i40e_pf { u16 phy_led_val; }; +enum i40e_filter_state { + I40E_FILTER_INVALID = 0, /* Invalid state */ + I40E_FILTER_NEW, /* New, not sent to FW yet */ + I40E_FILTER_ACTIVE, /* Added to switch by FW */ + I40E_FILTER_FAILED, /* Rejected by FW */ + I40E_FILTER_REMOVE, /* To be removed */ +/* There is no 'removed' state; the filter struct is freed */ +}; struct i40e_mac_filter { struct list_head list; u8 macaddr[ETH_ALEN]; @@ -455,8 +464,7 @@ struct i40e_mac_filter { u8 counter; /* number of instances of this filter */ bool is_vf; /* filter belongs to a VF */ bool is_netdev; /* filter belongs to a netdev */ - bool changed; /* filter needs to be sync'd to the HW */ - bool is_laa; /* filter is a Locally Administered Address */ + enum i40e_filter_state state; }; struct i40e_veb { @@ -522,6 +530,9 @@ struct i40e_vsi { struct i40e_ring **rx_rings; struct i40e_ring **tx_rings; + u32 active_filters; + u32 promisc_threshold; + u16 work_limit; u16 int_rate_limit; /* value in usecs */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 0e6ac8413..618f18436 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -199,6 +199,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi) { struct i40e_client_instance *cdev; + int ret = 0; if (!vsi) return; @@ -211,7 +212,14 @@ void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi) "Cannot locate client instance open routine\n"); continue; } - cdev->client->ops->open(&cdev->lan_info, cdev->client); + if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cdev->state))) { + ret = cdev->client->ops->open(&cdev->lan_info, + cdev->client); + if (!ret) + set_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cdev->state); + } } } mutex_unlock(&i40e_client_instance_mutex); @@ -407,12 +415,14 @@ struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, * i40e_client_add_instance - add a client instance struct to the instance list * @pf: pointer to the board struct * @client: pointer to a client struct in the client list. + * @existing: if there was already an existing instance * - * Returns cdev ptr on success, NULL on failure + * Returns cdev ptr on success or if already exists, NULL on failure **/ static struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf, - struct i40e_client *client) + struct i40e_client *client, + bool *existing) { struct i40e_client_instance *cdev; struct netdev_hw_addr *mac = NULL; @@ -421,7 +431,7 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf, mutex_lock(&i40e_client_instance_mutex); list_for_each_entry(cdev, &i40e_client_instances, list) { if ((cdev->lan_info.pf == pf) && (cdev->client == client)) { - cdev = NULL; + *existing = true; goto out; } } @@ -505,6 +515,7 @@ void i40e_client_subtask(struct i40e_pf *pf) { struct i40e_client_instance *cdev; struct i40e_client *client; + bool existing = false; int ret = 0; if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED)) @@ -528,18 +539,25 @@ void i40e_client_subtask(struct i40e_pf *pf) /* check if L2 VSI is up, if not we are not ready */ if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) continue; + } else { + dev_warn(&pf->pdev->dev, "This client %s is being instanciated at probe\n", + client->name); } /* Add the client instance to the instance list */ - cdev = i40e_client_add_instance(pf, client); + cdev = i40e_client_add_instance(pf, client, &existing); if (!cdev) continue; - /* Also up the ref_cnt of no. of instances of this client */ - atomic_inc(&client->ref_cnt); - dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n", - client->name, pf->hw.pf_id, - pf->hw.bus.device, pf->hw.bus.func); + if (!existing) { + /* Also up the ref_cnt for no. of instances of this + * client. + */ + atomic_inc(&client->ref_cnt); + dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n", + client->name, pf->hw.pf_id, + pf->hw.bus.device, pf->hw.bus.func); + } /* Send an Open request to the client */ atomic_inc(&cdev->ref_cnt); @@ -588,7 +606,8 @@ int i40e_lan_add_device(struct i40e_pf *pf) pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func); /* Since in some cases register may have happened before a device gets - * added, we can schedule a subtask to go initiate the clients. + * added, we can schedule a subtask to go initiate the clients if + * they can be launched at probe time. */ pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; i40e_service_event_schedule(pf); @@ -980,13 +999,13 @@ int i40e_unregister_client(struct i40e_client *client) * a close for each of the client instances that were opened. * client_release function is called to handle this. */ + mutex_lock(&i40e_client_mutex); if (!client || i40e_client_release(client)) { ret = -EIO; goto out; } /* TODO: check if device is in reset, or if that matters? */ - mutex_lock(&i40e_client_mutex); if (!i40e_client_is_registered(client)) { pr_info("i40e: Client %s has not been registered\n", client->name); @@ -1005,8 +1024,8 @@ int i40e_unregister_client(struct i40e_client *client) client->name); } - mutex_unlock(&i40e_client_mutex); out: + mutex_unlock(&i40e_client_mutex); return ret; } EXPORT_SYMBOL(i40e_unregister_client); diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 422b41d61..2154a34c1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -61,7 +61,6 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_SFP_I_X722: - case I40E_DEV_ID_QSFP_I_X722: hw->mac.type = I40E_MAC_X722; break; default: @@ -297,13 +296,15 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, void *buffer, u16 buf_len) { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; - u16 len = le16_to_cpu(aq_desc->datalen); + u16 len; u8 *buf = (u8 *)buffer; u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; + len = le16_to_cpu(aq_desc->datalen); + i40e_debug(hw, mask, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", le16_to_cpu(aq_desc->opcode), @@ -1966,6 +1967,62 @@ aq_add_vsi_exit: return status; } +/** + * i40e_aq_set_default_vsi + * @hw: pointer to the hw struct + * @seid: vsi number + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, + u16 seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *) + &desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); + cmd->seid = cpu_to_le16(seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_clear_default_vsi + * @hw: pointer to the hw struct + * @seid: vsi number + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, + u16 seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *) + &desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + cmd->promiscuous_flags = cpu_to_le16(0); + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); + cmd->seid = cpu_to_le16(seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + /** * i40e_aq_set_vsi_unicast_promiscuous * @hw: pointer to the hw struct diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index e6af8c8d7..05cf9a719 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -116,6 +116,14 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer, return len; } +static char *i40e_filter_state_string[] = { + "INVALID", + "NEW", + "ACTIVE", + "FAILED", + "REMOVE", +}; + /** * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum * @pf: the i40e_pf created in command write @@ -160,10 +168,14 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) pf->hw.mac.port_addr); list_for_each_entry(f, &vsi->mac_filter_list, list) { dev_info(&pf->pdev->dev, - " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n", + " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d, state %s\n", f->macaddr, f->vlan, f->is_netdev, f->is_vf, - f->counter); + f->counter, i40e_filter_state_string[f->state]); } + dev_info(&pf->pdev->dev, " active_filters %d, promisc_threshold %d, overflow promisc %s\n", + vsi->active_filters, vsi->promisc_threshold, + (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) ? + "ON" : "OFF")); nstat = i40e_get_vsi_stats_struct(vsi); dev_info(&pf->pdev->dev, " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index d701861c6..dd4457d29 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -45,7 +45,6 @@ #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 -#define I40E_DEV_ID_QSFP_I_X722 0x37D4 #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ (d) == I40E_DEV_ID_QSFP_B || \ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 5e8d84ff7..c912e041d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -272,15 +272,16 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, u32 *advertising) { enum i40e_aq_capabilities_phy_type phy_types = pf->hw.phy.phy_types; - + struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info; *supported = 0x0; *advertising = 0x0; if (phy_types & I40E_CAP_PHY_TYPE_SGMII) { *supported |= SUPPORTED_Autoneg | SUPPORTED_1000baseT_Full; - *advertising |= ADVERTISED_Autoneg | - ADVERTISED_1000baseT_Full; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + *advertising |= ADVERTISED_1000baseT_Full; if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { *supported |= SUPPORTED_100baseT_Full; *advertising |= ADVERTISED_100baseT_Full; @@ -299,8 +300,9 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) { *supported |= SUPPORTED_Autoneg | SUPPORTED_10000baseT_Full; - *advertising |= ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + *advertising |= ADVERTISED_10000baseT_Full; } if (phy_types & I40E_CAP_PHY_TYPE_XLAUI || phy_types & I40E_CAP_PHY_TYPE_XLPPI || @@ -310,15 +312,16 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) { *supported |= SUPPORTED_Autoneg | SUPPORTED_40000baseCR4_Full; - *advertising |= ADVERTISED_Autoneg | - ADVERTISED_40000baseCR4_Full; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB) + *advertising |= ADVERTISED_40000baseCR4_Full; } - if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) && - !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) { + if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) { *supported |= SUPPORTED_Autoneg | SUPPORTED_100baseT_Full; - *advertising |= ADVERTISED_Autoneg | - ADVERTISED_100baseT_Full; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + *advertising |= ADVERTISED_100baseT_Full; } if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T || phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || @@ -326,8 +329,9 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) { *supported |= SUPPORTED_Autoneg | SUPPORTED_1000baseT_Full; - *advertising |= ADVERTISED_Autoneg | - ADVERTISED_1000baseT_Full; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + *advertising |= ADVERTISED_1000baseT_Full; } if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) *supported |= SUPPORTED_40000baseSR4_Full; @@ -342,26 +346,30 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) { *supported |= SUPPORTED_20000baseKR2_Full | SUPPORTED_Autoneg; - *advertising |= ADVERTISED_20000baseKR2_Full | - ADVERTISED_Autoneg; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB) + *advertising |= ADVERTISED_20000baseKR2_Full; } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) { *supported |= SUPPORTED_10000baseKR_Full | SUPPORTED_Autoneg; - *advertising |= ADVERTISED_10000baseKR_Full | - ADVERTISED_Autoneg; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + *advertising |= ADVERTISED_10000baseKR_Full; } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) { *supported |= SUPPORTED_10000baseKX4_Full | SUPPORTED_Autoneg; - *advertising |= ADVERTISED_10000baseKX4_Full | - ADVERTISED_Autoneg; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + *advertising |= ADVERTISED_10000baseKX4_Full; } if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) { *supported |= SUPPORTED_1000baseKX_Full | SUPPORTED_Autoneg; - *advertising |= ADVERTISED_1000baseKX_Full | - ADVERTISED_Autoneg; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + *advertising |= ADVERTISED_1000baseKX_Full; } } @@ -453,6 +461,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, case I40E_PHY_TYPE_10GBASE_SFPP_CU: case I40E_PHY_TYPE_10GBASE_AOC: ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->advertising = SUPPORTED_10000baseT_Full; break; case I40E_PHY_TYPE_SGMII: ecmd->supported = SUPPORTED_Autoneg | @@ -663,6 +672,7 @@ static int i40e_set_settings(struct net_device *netdev, if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE && + hw->phy.media_type != I40E_MEDIA_TYPE_DA && hw->phy.link_info.link_info & I40E_AQ_LINK_UP) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 501f15d9f..d0b3a1bb8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -31,12 +31,7 @@ /* Local includes */ #include "i40e.h" #include "i40e_diag.h" -#if IS_ENABLED(CONFIG_VXLAN) -#include -#endif -#if IS_ENABLED(CONFIG_GENEVE) -#include -#endif +#include const char i40e_driver_name[] = "i40e"; static const char i40e_driver_string[] = @@ -45,8 +40,8 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 5 -#define DRV_VERSION_BUILD 16 +#define DRV_VERSION_MINOR 6 +#define DRV_VERSION_BUILD 11 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -91,7 +86,6 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_I_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, /* required last entry */ @@ -1280,8 +1274,9 @@ int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, (is_vf == f->is_vf) && (is_netdev == f->is_netdev)) { f->counter--; - f->changed = true; changed = 1; + if (f->counter == 0) + f->state = I40E_FILTER_REMOVE; } } if (changed) { @@ -1297,29 +1292,32 @@ int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, * @vsi: the PF Main VSI - inappropriate for any other VSI * @macaddr: the MAC address * - * Some older firmware configurations set up a default promiscuous VLAN - * filter that needs to be removed. + * Remove whatever filter the firmware set up so the driver can manage + * its own filtering intelligently. **/ -static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) +static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) { struct i40e_aqc_remove_macvlan_element_data element; struct i40e_pf *pf = vsi->back; - i40e_status ret; /* Only appropriate for the PF main VSI */ if (vsi->type != I40E_VSI_MAIN) - return -EINVAL; + return; memset(&element, 0, sizeof(element)); ether_addr_copy(element.mac_addr, macaddr); element.vlan_tag = 0; - element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | - I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; - ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); - if (ret) - return -ENOENT; + /* Ignore error returns, some firmware does it this way... */ + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); - return 0; + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, macaddr); + element.vlan_tag = 0; + /* ...and some firmware does it this way. */ + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | + I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; + i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); } /** @@ -1340,6 +1338,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, bool is_vf, bool is_netdev) { struct i40e_mac_filter *f; + int changed = false; if (!vsi || !macaddr) return NULL; @@ -1359,8 +1358,15 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, ether_addr_copy(f->macaddr, macaddr); f->vlan = vlan; - f->changed = true; - + /* If we're in overflow promisc mode, set the state directly + * to failed, so we don't bother to try sending the filter + * to the hardware. + */ + if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)) + f->state = I40E_FILTER_FAILED; + else + f->state = I40E_FILTER_NEW; + changed = true; INIT_LIST_HEAD(&f->list); list_add_tail(&f->list, &vsi->mac_filter_list); } @@ -1380,10 +1386,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, f->counter++; } - /* changed tells sync_filters_subtask to - * push the filter down to the firmware - */ - if (f->changed) { + if (changed) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->back->flags |= I40E_FLAG_FILTER_SYNC; } @@ -1402,6 +1405,9 @@ add_filter_out: * * NOTE: This function is expected to be called with mac_filter_list_lock * being held. + * ANOTHER NOTE: This function MUST be called from within the context of + * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() + * instead of list_for_each_entry(). **/ void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, @@ -1441,9 +1447,18 @@ void i40e_del_filter(struct i40e_vsi *vsi, * remove the filter from the firmware's list */ if (f->counter == 0) { - f->changed = true; - vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - vsi->back->flags |= I40E_FLAG_FILTER_SYNC; + if ((f->state == I40E_FILTER_FAILED) || + (f->state == I40E_FILTER_NEW)) { + /* this one never got added by the FW. Just remove it, + * no need to sync anything. + */ + list_del(&f->list); + kfree(f); + } else { + f->state = I40E_FILTER_REMOVE; + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + vsi->back->flags |= I40E_FLAG_FILTER_SYNC; + } } } @@ -1465,7 +1480,6 @@ static int i40e_set_mac(struct net_device *netdev, void *p) struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct sockaddr *addr = p; - struct i40e_mac_filter *f; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; @@ -1486,52 +1500,23 @@ static int i40e_set_mac(struct net_device *netdev, void *p) else netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); + spin_lock_bh(&vsi->mac_filter_list_lock); + i40e_del_mac_all_vlan(vsi, netdev->dev_addr, false, true); + i40e_put_mac_in_vlan(vsi, addr->sa_data, false, true); + spin_unlock_bh(&vsi->mac_filter_list_lock); + ether_addr_copy(netdev->dev_addr, addr->sa_data); if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; ret = i40e_aq_mac_address_write(&vsi->back->hw, I40E_AQC_WRITE_TYPE_LAA_WOL, addr->sa_data, NULL); - if (ret) { - netdev_info(netdev, - "Addr change for Main VSI failed: %d\n", - ret); - return -EADDRNOTAVAIL; - } - } - - if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) { - struct i40e_aqc_remove_macvlan_element_data element; - - memset(&element, 0, sizeof(element)); - ether_addr_copy(element.mac_addr, netdev->dev_addr); - element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; - i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); - } else { - spin_lock_bh(&vsi->mac_filter_list_lock); - i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, - false, false); - spin_unlock_bh(&vsi->mac_filter_list_lock); - } - - if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { - struct i40e_aqc_add_macvlan_element_data element; - - memset(&element, 0, sizeof(element)); - ether_addr_copy(element.mac_addr, hw->mac.addr); - element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); - i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); - } else { - spin_lock_bh(&vsi->mac_filter_list_lock); - f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, - false, false); - if (f) - f->is_laa = true; - spin_unlock_bh(&vsi->mac_filter_list_lock); + if (ret) + netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); } - ether_addr_copy(netdev->dev_addr, addr->sa_data); - /* schedule our worker thread which will take care of * applying the new filter changes */ @@ -1591,14 +1576,8 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, vsi->tc_config.numtc = numtc; vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; /* Number of queues per enabled TC */ - /* In MFP case we can have a much lower count of MSIx - * vectors available and so we need to lower the used - * q count. - */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) - qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); - else - qcount = vsi->alloc_queue_pairs; + qcount = vsi->alloc_queue_pairs; + num_tc_qps = qcount / numtc; num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); @@ -1767,28 +1746,6 @@ bottom_of_search_loop: i40e_service_event_schedule(vsi->back); } -/** - * i40e_mac_filter_entry_clone - Clones a MAC filter entry - * @src: source MAC filter entry to be clones - * - * Returns the pointer to newly cloned MAC filter entry or NULL - * in case of error - **/ -static struct i40e_mac_filter *i40e_mac_filter_entry_clone( - struct i40e_mac_filter *src) -{ - struct i40e_mac_filter *f; - - f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - return NULL; - *f = *src; - - INIT_LIST_HEAD(&f->list); - - return f; -} - /** * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries * @vsi: pointer to vsi struct @@ -1803,41 +1760,61 @@ static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, struct i40e_mac_filter *f, *ftmp; list_for_each_entry_safe(f, ftmp, from, list) { - f->changed = true; /* Move the element back into MAC filter list*/ list_move_tail(&f->list, &vsi->mac_filter_list); } } /** - * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries - * @vsi: pointer to vsi struct + * i40e_update_filter_state - Update filter state based on return data + * from firmware + * @count: Number of filters added + * @add_list: return data from fw + * @head: pointer to first filter in current batch + * @aq_err: status from fw * - * MAC filter entries from list were slated to be added from device. + * MAC filter entries from list were slated to be added to device. Returns + * number of successful filters. Note that 0 does NOT mean success! **/ -static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi) +static int +i40e_update_filter_state(int count, + struct i40e_aqc_add_macvlan_element_data *add_list, + struct i40e_mac_filter *add_head, int aq_err) { - struct i40e_mac_filter *f, *ftmp; - - list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { - if (!f->changed && f->counter) - f->changed = true; - } -} + int retval = 0; + int i; -/** - * i40e_cleanup_add_list - Deletes the element from add list and release - * memory - * @add_list: Pointer to list which contains MAC filter entries - **/ -static void i40e_cleanup_add_list(struct list_head *add_list) -{ - struct i40e_mac_filter *f, *ftmp; - list_for_each_entry_safe(f, ftmp, add_list, list) { - list_del(&f->list); - kfree(f); + if (!aq_err) { + retval = count; + /* Everything's good, mark all filters active. */ + for (i = 0; i < count ; i++) { + add_head->state = I40E_FILTER_ACTIVE; + add_head = list_next_entry(add_head, list); + } + } else if (aq_err == I40E_AQ_RC_ENOSPC) { + /* Device ran out of filter space. Check the return value + * for each filter to see which ones are active. + */ + for (i = 0; i < count ; i++) { + if (add_list[i].match_method == + I40E_AQC_MM_ERR_NO_RES) { + add_head->state = I40E_FILTER_FAILED; + } else { + add_head->state = I40E_FILTER_ACTIVE; + retval++; + } + add_head = list_next_entry(add_head, list); + } + } else { + /* Some other horrible thing happened, fail all filters */ + retval = 0; + for (i = 0; i < count ; i++) { + add_head->state = I40E_FILTER_FAILED; + add_head = list_next_entry(add_head, list); + } } + return retval; } /** @@ -1850,20 +1827,22 @@ static void i40e_cleanup_add_list(struct list_head *add_list) **/ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) { - struct list_head tmp_del_list, tmp_add_list; - struct i40e_mac_filter *f, *ftmp, *fclone; - bool promisc_forced_on = false; - bool add_happened = false; + struct i40e_mac_filter *f, *ftmp, *add_head = NULL; + struct list_head tmp_add_list, tmp_del_list; + struct i40e_hw *hw = &vsi->back->hw; + bool promisc_changed = false; + char vsi_name[16] = "PF"; int filter_list_len = 0; u32 changed_flags = 0; i40e_status aq_ret = 0; - bool err_cond = false; int retval = 0; struct i40e_pf *pf; int num_add = 0; int num_del = 0; int aq_err = 0; u16 cmd_flags; + int list_size; + int fcnt; /* empty array typed pointers, kcalloc later */ struct i40e_aqc_add_macvlan_element_data *add_list; @@ -1878,72 +1857,46 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) vsi->current_netdev_flags = vsi->netdev->flags; } - INIT_LIST_HEAD(&tmp_del_list); INIT_LIST_HEAD(&tmp_add_list); + INIT_LIST_HEAD(&tmp_del_list); + + if (vsi->type == I40E_VSI_SRIOV) + snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id); + else if (vsi->type != I40E_VSI_MAIN) + snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid); if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; spin_lock_bh(&vsi->mac_filter_list_lock); + /* Create a list of filters to delete. */ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { - if (!f->changed) - continue; - - if (f->counter != 0) - continue; - f->changed = false; - - /* Move the element into temporary del_list */ - list_move_tail(&f->list, &tmp_del_list); - } - - list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { - if (!f->changed) - continue; - - if (f->counter == 0) - continue; - f->changed = false; - - /* Clone MAC filter entry and add into temporary list */ - fclone = i40e_mac_filter_entry_clone(f); - if (!fclone) { - err_cond = true; - break; + if (f->state == I40E_FILTER_REMOVE) { + WARN_ON(f->counter != 0); + /* Move the element into temporary del_list */ + list_move_tail(&f->list, &tmp_del_list); + vsi->active_filters--; + } + if (f->state == I40E_FILTER_NEW) { + WARN_ON(f->counter == 0); + /* Move the element into temporary add_list */ + list_move_tail(&f->list, &tmp_add_list); } - list_add_tail(&fclone->list, &tmp_add_list); - } - - /* if failed to clone MAC filter entry - undo */ - if (err_cond) { - i40e_undo_del_filter_entries(vsi, &tmp_del_list); - i40e_undo_add_filter_entries(vsi); } spin_unlock_bh(&vsi->mac_filter_list_lock); - - if (err_cond) { - i40e_cleanup_add_list(&tmp_add_list); - retval = -ENOMEM; - goto out; - } } /* Now process 'del_list' outside the lock */ if (!list_empty(&tmp_del_list)) { - int del_list_size; - - filter_list_len = pf->hw.aq.asq_buf_size / + filter_list_len = hw->aq.asq_buf_size / sizeof(struct i40e_aqc_remove_macvlan_element_data); - del_list_size = filter_list_len * + list_size = filter_list_len * sizeof(struct i40e_aqc_remove_macvlan_element_data); - del_list = kzalloc(del_list_size, GFP_ATOMIC); + del_list = kzalloc(list_size, GFP_ATOMIC); if (!del_list) { - i40e_cleanup_add_list(&tmp_add_list); - /* Undo VSI's MAC filter entry element updates */ spin_lock_bh(&vsi->mac_filter_list_lock); i40e_undo_del_filter_entries(vsi, &tmp_del_list); - i40e_undo_add_filter_entries(vsi); spin_unlock_bh(&vsi->mac_filter_list_lock); retval = -ENOMEM; goto out; @@ -1954,9 +1907,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) /* add to delete list */ ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); - del_list[num_del].vlan_tag = - cpu_to_le16((u16)(f->vlan == - I40E_VLAN_ANY ? 0 : f->vlan)); + if (f->vlan == I40E_VLAN_ANY) { + del_list[num_del].vlan_tag = 0; + cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; + } else { + del_list[num_del].vlan_tag = + cpu_to_le16((u16)(f->vlan)); + } cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; del_list[num_del].flags = cmd_flags; @@ -1964,21 +1921,23 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) /* flush a full buffer */ if (num_del == filter_list_len) { - aq_ret = i40e_aq_remove_macvlan(&pf->hw, - vsi->seid, + aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list, - num_del, - NULL); - aq_err = pf->hw.aq.asq_last_status; + num_del, NULL); + aq_err = hw->aq.asq_last_status; num_del = 0; - memset(del_list, 0, del_list_size); + memset(del_list, 0, list_size); - if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) { + /* Explicitly ignore and do not report when + * firmware returns ENOENT. + */ + if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) { retval = -EIO; - dev_err(&pf->pdev->dev, - "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); + dev_info(&pf->pdev->dev, + "ignoring delete macvlan error on %s, err %s, aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, aq_err)); } } /* Release memory for MAC filter entries which were @@ -1989,17 +1948,22 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } if (num_del) { - aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, - del_list, num_del, - NULL); - aq_err = pf->hw.aq.asq_last_status; + aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list, + num_del, NULL); + aq_err = hw->aq.asq_last_status; num_del = 0; - if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) + /* Explicitly ignore and do not report when firmware + * returns ENOENT. + */ + if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) { + retval = -EIO; dev_info(&pf->pdev->dev, - "ignoring delete macvlan error, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); + "ignoring delete macvlan error on %s, err %s aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, aq_err)); + } } kfree(del_list); @@ -2007,84 +1971,117 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } if (!list_empty(&tmp_add_list)) { - int add_list_size; - - /* do all the adds now */ - filter_list_len = pf->hw.aq.asq_buf_size / - sizeof(struct i40e_aqc_add_macvlan_element_data), - add_list_size = filter_list_len * + /* Do all the adds now. */ + filter_list_len = hw->aq.asq_buf_size / + sizeof(struct i40e_aqc_add_macvlan_element_data); + list_size = filter_list_len * sizeof(struct i40e_aqc_add_macvlan_element_data); - add_list = kzalloc(add_list_size, GFP_ATOMIC); + add_list = kzalloc(list_size, GFP_ATOMIC); if (!add_list) { - /* Purge element from temporary lists */ - i40e_cleanup_add_list(&tmp_add_list); - - /* Undo add filter entries from VSI MAC filter list */ - spin_lock_bh(&vsi->mac_filter_list_lock); - i40e_undo_add_filter_entries(vsi); - spin_unlock_bh(&vsi->mac_filter_list_lock); retval = -ENOMEM; goto out; } - - list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { - - add_happened = true; - cmd_flags = 0; - + num_add = 0; + list_for_each_entry(f, &tmp_add_list, list) { + if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, + &vsi->state)) { + f->state = I40E_FILTER_FAILED; + continue; + } /* add to add array */ + if (num_add == 0) + add_head = f; + cmd_flags = 0; ether_addr_copy(add_list[num_add].mac_addr, f->macaddr); - add_list[num_add].vlan_tag = - cpu_to_le16( - (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); + if (f->vlan == I40E_VLAN_ANY) { + add_list[num_add].vlan_tag = 0; + cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; + } else { + add_list[num_add].vlan_tag = + cpu_to_le16((u16)(f->vlan)); + } add_list[num_add].queue_number = 0; - cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; add_list[num_add].flags = cpu_to_le16(cmd_flags); num_add++; /* flush a full buffer */ if (num_add == filter_list_len) { - aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + aq_ret = i40e_aq_add_macvlan(hw, vsi->seid, add_list, num_add, NULL); - aq_err = pf->hw.aq.asq_last_status; + aq_err = hw->aq.asq_last_status; + fcnt = i40e_update_filter_state(num_add, + add_list, + add_head, + aq_ret); + vsi->active_filters += fcnt; + + if (fcnt != num_add) { + promisc_changed = true; + set_bit(__I40E_FILTER_OVERFLOW_PROMISC, + &vsi->state); + vsi->promisc_threshold = + (vsi->active_filters * 3) / 4; + dev_warn(&pf->pdev->dev, + "Error %s adding RX filters on %s, promiscuous mode forced on\n", + i40e_aq_str(hw, aq_err), + vsi_name); + } + memset(add_list, 0, list_size); num_add = 0; - - if (aq_ret) - break; - memset(add_list, 0, add_list_size); } - /* Entries from tmp_add_list were cloned from MAC - * filter list, hence clean those cloned entries - */ - list_del(&f->list); - kfree(f); } - if (num_add) { - aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + aq_ret = i40e_aq_add_macvlan(hw, vsi->seid, add_list, num_add, NULL); - aq_err = pf->hw.aq.asq_last_status; - num_add = 0; + aq_err = hw->aq.asq_last_status; + fcnt = i40e_update_filter_state(num_add, add_list, + add_head, aq_ret); + vsi->active_filters += fcnt; + if (fcnt != num_add) { + promisc_changed = true; + set_bit(__I40E_FILTER_OVERFLOW_PROMISC, + &vsi->state); + vsi->promisc_threshold = + (vsi->active_filters * 3) / 4; + dev_warn(&pf->pdev->dev, + "Error %s adding RX filters on %s, promiscuous mode forced on\n", + i40e_aq_str(hw, aq_err), vsi_name); + } } + /* Now move all of the filters from the temp add list back to + * the VSI's list. + */ + spin_lock_bh(&vsi->mac_filter_list_lock); + list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { + list_move_tail(&f->list, &vsi->mac_filter_list); + } + spin_unlock_bh(&vsi->mac_filter_list_lock); kfree(add_list); add_list = NULL; + } - if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) { - retval = i40e_aq_rc_to_posix(aq_ret, aq_err); + /* Check to see if we can drop out of overflow promiscuous mode. */ + if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) && + (vsi->active_filters < vsi->promisc_threshold)) { + int failed_count = 0; + /* See if we have any failed filters. We can't drop out of + * promiscuous until these have all been deleted. + */ + spin_lock_bh(&vsi->mac_filter_list_lock); + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if (f->state == I40E_FILTER_FAILED) + failed_count++; + } + spin_unlock_bh(&vsi->mac_filter_list_lock); + if (!failed_count) { dev_info(&pf->pdev->dev, - "add filter failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && - !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, - &vsi->state)) { - promisc_forced_on = true; - set_bit(__I40E_FILTER_OVERFLOW_PROMISC, - &vsi->state); - dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); - } + "filter logjam cleared on %s, leaving overflow promiscuous mode\n", + vsi_name); + clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); + promisc_changed = true; + vsi->promisc_threshold = 0; } } @@ -2105,15 +2102,17 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) NULL); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, - pf->hw.aq.asq_last_status); + hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set multi promisc failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + "set multi promisc failed on %s, err %s aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); } } - if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { + if ((changed_flags & IFF_PROMISC) || + (promisc_changed && + test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) { bool cur_promisc; cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || @@ -2129,35 +2128,72 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) */ if (pf->cur_promisc != cur_promisc) { pf->cur_promisc = cur_promisc; - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + if (cur_promisc) + aq_ret = + i40e_aq_set_default_vsi(hw, + vsi->seid, + NULL); + else + aq_ret = + i40e_aq_clear_default_vsi(hw, + vsi->seid, + NULL); + if (aq_ret) { + retval = i40e_aq_rc_to_posix(aq_ret, + hw->aq.asq_last_status); + dev_info(&pf->pdev->dev, + "Set default VSI failed on %s, err %s, aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); + } } } else { aq_ret = i40e_aq_set_vsi_unicast_promiscuous( - &vsi->back->hw, + hw, vsi->seid, cur_promisc, NULL, true); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, - pf->hw.aq.asq_last_status); + hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set unicast promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "set unicast promisc failed on %s, err %s, aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); } aq_ret = i40e_aq_set_vsi_multicast_promiscuous( - &vsi->back->hw, + hw, vsi->seid, cur_promisc, NULL); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, - pf->hw.aq.asq_last_status); + hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set multicast promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "set multicast promisc failed on %s, err %s, aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); } } + aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (aq_ret) { + retval = i40e_aq_rc_to_posix(aq_ret, + pf->hw.aq.asq_last_status); + dev_info(&pf->pdev->dev, + "set brdcast promisc failed, err %s, aq_err %s\n", + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); + } } out: /* if something went wrong then set the changed flag so we try again */ @@ -2325,7 +2361,7 @@ static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) **/ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) { - struct i40e_mac_filter *f, *add_f; + struct i40e_mac_filter *f, *ftmp, *add_f; bool is_netdev, is_vf; is_vf = (vsi->type == I40E_VSI_SRIOV); @@ -2346,7 +2382,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) } } - list_for_each_entry(f, &vsi->mac_filter_list, list) { + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); if (!add_f) { dev_info(&vsi->back->pdev->dev, @@ -2360,7 +2396,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) /* Now if we add a vlan tag, make sure to check if it is the first * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" * with 0, so we now accept untagged and specified tagged traffic - * (and not any taged and untagged) + * (and not all tags along with untagged) */ if (vid > 0) { if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, @@ -2382,7 +2418,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ if (vid > 0 && !vsi->info.pvid) { - list_for_each_entry(f, &vsi->mac_filter_list, list) { + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, is_vf, is_netdev)) continue; @@ -2419,7 +2455,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) { struct net_device *netdev = vsi->netdev; - struct i40e_mac_filter *f, *add_f; + struct i40e_mac_filter *f, *ftmp, *add_f; bool is_vf, is_netdev; int filter_count = 0; @@ -2432,7 +2468,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) if (is_netdev) i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); - list_for_each_entry(f, &vsi->mac_filter_list, list) + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); /* go through all the filters for this VSI and if there is only @@ -2465,7 +2501,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) } if (!filter_count) { - list_for_each_entry(f, &vsi->mac_filter_list, list) { + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, is_vf, is_netdev); @@ -2510,8 +2546,6 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev, if (vid > 4095) return -EINVAL; - netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); - /* If the network stack called us with vid = 0 then * it is asking to receive priority tagged packets with * vlan id 0. Our HW receives them by default when configured @@ -2545,8 +2579,6 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); - /* return code is ignored as there is nothing a user * can do about failure to remove and a log message was * already printed from the other function @@ -2558,6 +2590,44 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev, return 0; } +/** + * i40e_macaddr_init - explicitly write the mac address filters + * + * @vsi: pointer to the vsi + * @macaddr: the MAC address + * + * This is needed when the macaddr has been obtained by other + * means than the default, e.g., from Open Firmware or IDPROM. + * Returns 0 on success, negative on failure + **/ +static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr) +{ + int ret; + struct i40e_aqc_add_macvlan_element_data element; + + ret = i40e_aq_mac_address_write(&vsi->back->hw, + I40E_AQC_WRITE_TYPE_LAA_WOL, + macaddr, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "Addr change for VSI failed: %d\n", ret); + return -EADDRNOTAVAIL; + } + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, macaddr); + element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); + ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "add filter failed err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); + } + return ret; +} + /** * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up * @vsi: the vsi being brought back up @@ -3004,8 +3074,19 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) **/ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) { + struct i40e_pf *pf = vsi->back; + int err; + if (vsi->netdev) i40e_set_rx_mode(vsi->netdev); + + if (!!(pf->flags & I40E_FLAG_PF_MAC)) { + err = i40e_macaddr_init(vsi, pf->hw.mac.addr); + if (err) { + dev_warn(&pf->pdev->dev, + "could not set up macaddr; err %d\n", err); + } + } } /** @@ -3947,6 +4028,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(pf->msix_entries[vector].vector, NULL); + synchronize_irq(pf->msix_entries[vector].vector); free_irq(pf->msix_entries[vector].vector, vsi->q_vectors[i]); @@ -4472,23 +4554,38 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) **/ static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) { + int i, tc_unused = 0; u8 num_tc = 0; - int i; + u8 ret = 0; /* Scan the ETS Config Priority Table to find * traffic class enabled for a given priority - * and use the traffic class index to get the - * number of traffic classes enabled + * and create a bitmask of enabled TCs */ - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - if (dcbcfg->etscfg.prioritytable[i] > num_tc) - num_tc = dcbcfg->etscfg.prioritytable[i]; - } + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) + num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]); - /* Traffic class index starts from zero so - * increment to return the actual count + /* Now scan the bitmask to check for + * contiguous TCs starting with TC0 */ - return num_tc + 1; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (num_tc & BIT(i)) { + if (!tc_unused) { + ret++; + } else { + pr_err("Non-contiguous TC - Disabling DCB\n"); + return 1; + } + } else { + tc_unused = 1; + } + } + + /* There is always at least TC0 */ + if (!ret) + ret = 1; + + return ret; } /** @@ -4953,7 +5050,6 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf) if (pf->vsi[v]->netdev) i40e_dcbnl_set_all(pf->vsi[v]); } - i40e_notify_client_of_l2_param_changes(pf->vsi[v]); } } @@ -5017,9 +5113,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) DCB_CAP_DCBX_VER_IEEE; pf->flags |= I40E_FLAG_DCB_CAPABLE; - /* Enable DCB tagging only when more than one TC */ + /* Enable DCB tagging only when more than one TC + * or explicitly disable if only one TC + */ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) pf->flags |= I40E_FLAG_DCB_ENABLED; + else + pf->flags &= ~I40E_FLAG_DCB_ENABLED; dev_dbg(&pf->pdev->dev, "DCBX offload is supported for this PF.\n"); } @@ -5178,12 +5278,6 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) usleep_range(1000, 2000); i40e_down(vsi); - /* Give a VF some time to respond to the reset. The - * two second wait is based upon the watchdog cycle in - * the VF driver. - */ - if (vsi->type == I40E_VSI_SRIOV) - msleep(2000); i40e_up(vsi); clear_bit(__I40E_CONFIG_BUSY, &pf->state); } @@ -5226,6 +5320,9 @@ void i40e_down(struct i40e_vsi *vsi) i40e_clean_tx_ring(vsi->tx_rings[i]); i40e_clean_rx_ring(vsi->rx_rings[i]); } + + i40e_notify_client_of_netdev_close(vsi, false); + } /** @@ -5337,15 +5434,7 @@ int i40e_open(struct net_device *netdev) TCP_FLAG_CWR) >> 16); wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); -#ifdef CONFIG_I40E_VXLAN - vxlan_get_rx_port(netdev); -#endif -#ifdef CONFIG_I40E_GENEVE - if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE) - geneve_get_rx_port(netdev); -#endif - - i40e_notify_client_of_netdev_open(vsi); + udp_tunnel_get_rx_info(netdev); return 0; } @@ -5631,7 +5720,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, u8 type; /* Not DCB capable or capability disabled */ - if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) + if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) return ret; /* Ignore if event is not for Nearest Bridge */ @@ -5711,6 +5800,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, i40e_service_event_schedule(pf); } else { i40e_pf_unquiesce_all_vsi(pf); + /* Notify the client for the DCB changes */ + i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]); } exit: @@ -5935,7 +6026,6 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } - } /** @@ -7052,7 +7142,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) **/ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) { -#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) struct i40e_hw *hw = &pf->hw; i40e_status ret; __be16 port; @@ -7087,7 +7176,6 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) } } } -#endif } /** @@ -7169,7 +7257,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) vsi->alloc_queue_pairs = 1; vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, I40E_REQ_DESCRIPTOR_MULTIPLE); - vsi->num_q_vectors = 1; + vsi->num_q_vectors = pf->num_fdsb_msix; break; case I40E_VSI_VMDQ2: @@ -7553,9 +7641,11 @@ static int i40e_init_msix(struct i40e_pf *pf) /* reserve one vector for sideband flow director */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { if (vectors_left) { + pf->num_fdsb_msix = 1; v_budget++; vectors_left--; } else { + pf->num_fdsb_msix = 0; pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; } } @@ -7810,6 +7900,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) #endif I40E_FLAG_RSS_ENABLED | I40E_FLAG_DCB_CAPABLE | + I40E_FLAG_DCB_ENABLED | I40E_FLAG_SRIOV_ENABLED | I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | @@ -7906,7 +7997,6 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, u8 *rss_lut; int ret, i; - memset(&rss_key, 0, sizeof(rss_key)); memcpy(&rss_key, seed, sizeof(rss_key)); rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL); @@ -8580,7 +8670,9 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) /* Enable filters and mark for reset */ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) need_reset = true; - pf->flags |= I40E_FLAG_FD_SB_ENABLED; + /* enable FD_SB only if there is MSI-X vector */ + if (pf->num_fdsb_msix > 0) + pf->flags |= I40E_FLAG_FD_SB_ENABLED; } else { /* turn off filters, mark for reset and clear SW filter list */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { @@ -8629,7 +8721,6 @@ static int i40e_set_features(struct net_device *netdev, return 0; } -#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) /** * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port * @pf: board private structure @@ -8649,21 +8740,18 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) return i; } -#endif - -#if IS_ENABLED(CONFIG_VXLAN) /** - * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up + * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up * @netdev: This physical port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: New UDP port number that VXLAN started listening to + * @ti: Tunnel endpoint information **/ -static void i40e_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) +static void i40e_udp_tunnel_add(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + __be16 port = ti->port; u8 next_idx; u8 idx; @@ -8671,7 +8759,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev, /* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "vxlan port %d already offloaded\n", + netdev_info(netdev, "port %d already offloaded\n", ntohs(port)); return; } @@ -8680,131 +8768,75 @@ static void i40e_add_vxlan_port(struct net_device *netdev, next_idx = i40e_get_udp_port_idx(pf, 0); if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", + netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n", ntohs(port)); return; } - /* New port: add it and mark its index in the bitmap */ - pf->udp_ports[next_idx].index = port; - pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; - pf->pending_udp_bitmap |= BIT_ULL(next_idx); - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; -} - -/** - * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away - * @netdev: This physical port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: UDP port number that VXLAN stopped listening to - **/ -static void i40e_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) -{ - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - u8 idx; - - idx = i40e_get_udp_port_idx(pf, port); - - /* Check if port already exists */ - if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - /* if port exists, set it to 0 (mark for deletion) - * and make it pending - */ - pf->udp_ports[idx].index = 0; - pf->pending_udp_bitmap |= BIT_ULL(idx); - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; - } else { - netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", - ntohs(port)); - } -} -#endif - -#if IS_ENABLED(CONFIG_GENEVE) -/** - * i40e_add_geneve_port - Get notifications about GENEVE ports that come up - * @netdev: This physical port's netdev - * @sa_family: Socket Family that GENEVE is notifying us about - * @port: New UDP port number that GENEVE started listening to - **/ -static void i40e_add_geneve_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) -{ - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - u8 next_idx; - u8 idx; - - if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - - idx = i40e_get_udp_port_idx(pf, port); - - /* Check if port already exists */ - if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "udp port %d already offloaded\n", - ntohs(port)); - return; - } - - /* Now check if there is space to add the new port */ - next_idx = i40e_get_udp_port_idx(pf, 0); - - if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n", - ntohs(port)); + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) + return; + pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; + break; + default: return; } /* New port: add it and mark its index in the bitmap */ pf->udp_ports[next_idx].index = port; - pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; pf->pending_udp_bitmap |= BIT_ULL(next_idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; - - dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port)); } /** - * i40e_del_geneve_port - Get notifications about GENEVE ports that go away + * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away * @netdev: This physical port's netdev - * @sa_family: Socket Family that GENEVE is notifying us about - * @port: UDP port number that GENEVE stopped listening to + * @ti: Tunnel endpoint information **/ -static void i40e_del_geneve_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) +static void i40e_udp_tunnel_del(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + __be16 port = ti->port; u8 idx; - if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ - if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - /* if port exists, set it to 0 (mark for deletion) - * and make it pending - */ - pf->udp_ports[idx].index = 0; - pf->pending_udp_bitmap |= BIT_ULL(idx); - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS) + goto not_found; - dev_info(&pf->pdev->dev, "deleting geneve port %d\n", - ntohs(port)); - } else { - netdev_warn(netdev, "geneve port %d was not found, not deleting\n", - ntohs(port)); + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN) + goto not_found; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE) + goto not_found; + break; + default: + goto not_found; } + + /* if port exists, set it to 0 (mark for deletion) + * and make it pending + */ + pf->udp_ports[idx].index = 0; + pf->pending_udp_bitmap |= BIT_ULL(idx); + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + + return; +not_found: + netdev_warn(netdev, "UDP port %d was not found, not deleting\n", + ntohs(port)); } -#endif static int i40e_get_phys_port_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) @@ -9034,14 +9066,8 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, .ndo_set_vf_trust = i40e_ndo_set_vf_trust, -#if IS_ENABLED(CONFIG_VXLAN) - .ndo_add_vxlan_port = i40e_add_vxlan_port, - .ndo_del_vxlan_port = i40e_del_vxlan_port, -#endif -#if IS_ENABLED(CONFIG_GENEVE) - .ndo_add_geneve_port = i40e_add_geneve_port, - .ndo_del_geneve_port = i40e_del_geneve_port, -#endif + .ndo_udp_tunnel_add = i40e_udp_tunnel_add, + .ndo_udp_tunnel_del = i40e_udp_tunnel_del, .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, .ndo_features_check = i40e_features_check, @@ -9057,7 +9083,6 @@ static const struct net_device_ops i40e_netdev_ops = { **/ static int i40e_config_netdev(struct i40e_vsi *vsi) { - u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_netdev_priv *np; @@ -9121,18 +9146,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) * default a MAC-VLAN filter that accepts any tagged packet * which must be replaced by a normal filter. */ - if (!i40e_rm_default_mac_filter(vsi, mac_addr)) { - spin_lock_bh(&vsi->mac_filter_list_lock); - i40e_add_filter(vsi, mac_addr, - I40E_VLAN_ANY, false, true); - spin_unlock_bh(&vsi->mac_filter_list_lock); - } - } else if ((pf->hw.aq.api_maj_ver > 1) || - ((pf->hw.aq.api_maj_ver == 1) && - (pf->hw.aq.api_min_ver > 4))) { - /* Supported in FW API version higher than 1.4 */ - pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; - pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; + i40e_rm_default_mac_filter(vsi, mac_addr); + spin_lock_bh(&vsi->mac_filter_list_lock); + i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true); + spin_unlock_bh(&vsi->mac_filter_list_lock); } else { /* relate the VSI_VMDQ name to the VSI_MAIN name */ snprintf(netdev->name, IFNAMSIZ, "%sv%%d", @@ -9144,10 +9161,6 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) spin_unlock_bh(&vsi->mac_filter_list_lock); } - spin_lock_bh(&vsi->mac_filter_list_lock); - i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); - spin_unlock_bh(&vsi->mac_filter_list_lock); - ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); @@ -9226,8 +9239,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) { int ret = -ENODEV; i40e_status aq_ret = 0; - u8 laa_macaddr[ETH_ALEN]; - bool found_laa_mac_filter = false; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_vsi_context ctxt; @@ -9428,41 +9439,16 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) } } + vsi->active_filters = 0; + clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); spin_lock_bh(&vsi->mac_filter_list_lock); /* If macvlan filters already exist, force them to get loaded */ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { - f->changed = true; + f->state = I40E_FILTER_NEW; f_count++; - - /* Expected to have only one MAC filter entry for LAA in list */ - if (f->is_laa && vsi->type == I40E_VSI_MAIN) { - ether_addr_copy(laa_macaddr, f->macaddr); - found_laa_mac_filter = true; - } } spin_unlock_bh(&vsi->mac_filter_list_lock); - if (found_laa_mac_filter) { - struct i40e_aqc_remove_macvlan_element_data element; - - memset(&element, 0, sizeof(element)); - ether_addr_copy(element.mac_addr, laa_macaddr); - element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; - ret = i40e_aq_remove_macvlan(hw, vsi->seid, - &element, 1, NULL); - if (ret) { - /* some older FW has a different default */ - element.flags |= - I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; - i40e_aq_remove_macvlan(hw, vsi->seid, - &element, 1, NULL); - } - - i40e_aq_mac_address_write(hw, - I40E_AQC_WRITE_TYPE_LAA_WOL, - laa_macaddr, NULL); - } - if (f_count) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; pf->flags |= I40E_FLAG_FILTER_SYNC; @@ -9673,6 +9659,8 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); + if (vsi->type == I40E_VSI_MAIN) + i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr); /* assign it some queues */ ret = i40e_alloc_rings(vsi); @@ -9697,44 +9685,6 @@ err_vsi: return NULL; } -/** - * i40e_macaddr_init - explicitly write the mac address filters. - * - * @vsi: pointer to the vsi. - * @macaddr: the MAC address - * - * This is needed when the macaddr has been obtained by other - * means than the default, e.g., from Open Firmware or IDPROM. - * Returns 0 on success, negative on failure - **/ -static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr) -{ - int ret; - struct i40e_aqc_add_macvlan_element_data element; - - ret = i40e_aq_mac_address_write(&vsi->back->hw, - I40E_AQC_WRITE_TYPE_LAA_WOL, - macaddr, NULL); - if (ret) { - dev_info(&vsi->back->pdev->dev, - "Addr change for VSI failed: %d\n", ret); - return -EADDRNOTAVAIL; - } - - memset(&element, 0, sizeof(element)); - ether_addr_copy(element.mac_addr, macaddr); - element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); - ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL); - if (ret) { - dev_info(&vsi->back->pdev->dev, - "add filter failed err %s aq_err %s\n", - i40e_stat_str(&vsi->back->hw, ret), - i40e_aq_str(&vsi->back->hw, - vsi->back->hw.aq.asq_last_status)); - } - return ret; -} - /** * i40e_vsi_setup - Set up a VSI by a given type * @pf: board private structure @@ -10147,14 +10097,14 @@ void i40e_veb_release(struct i40e_veb *veb) static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { struct i40e_pf *pf = veb->pf; - bool is_default = veb->pf->cur_promisc; bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); int ret; - /* get a VEB from the hardware */ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, - veb->enabled_tc, is_default, + veb->enabled_tc, false, &veb->seid, enable_stats, NULL); + + /* get a VEB from the hardware */ if (ret) { dev_info(&pf->pdev->dev, "couldn't add VEB, err %s aq_err %s\n", @@ -10557,6 +10507,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_CAPABLE | + I40E_FLAG_DCB_ENABLED | I40E_FLAG_SRIOV_ENABLED | I40E_FLAG_VMDQ_ENABLED); } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | @@ -10580,7 +10531,8 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) /* Not enough queues for all TCs */ if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && (queues_left < I40E_MAX_TRAFFIC_CLASS)) { - pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | + I40E_FLAG_DCB_ENABLED); dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); } pf->num_lan_qps = max_t(int, pf->rss_size_max, @@ -10703,12 +10655,8 @@ static void i40e_print_features(struct i40e_pf *pf) } if (pf->flags & I40E_FLAG_DCB_CAPABLE) i += snprintf(&buf[i], REMAIN(i), " DCB"); -#if IS_ENABLED(CONFIG_VXLAN) i += snprintf(&buf[i], REMAIN(i), " VxLAN"); -#endif -#if IS_ENABLED(CONFIG_GENEVE) i += snprintf(&buf[i], REMAIN(i), " Geneve"); -#endif if (pf->flags & I40E_FLAG_PTP) i += snprintf(&buf[i], REMAIN(i), " PTP"); #ifdef I40E_FCOE @@ -10783,8 +10731,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } /* set up pci connections */ - err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), i40e_driver_name); + err = pci_request_mem_regions(pdev, i40e_driver_name); if (err) { dev_info(&pdev->dev, "pci_request_selected_regions failed %d\n", err); @@ -10982,7 +10929,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = i40e_init_pf_dcb(pf); if (err) { dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); - pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED); /* Continue without DCB enabled */ } #endif /* CONFIG_I40E_DCB */ @@ -11281,8 +11228,7 @@ err_ioremap: kfree(pf); err_pf_alloc: pci_disable_pcie_error_reporting(pdev); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -11393,8 +11339,7 @@ static void i40e_remove(struct pci_dev *pdev) iounmap(hw->hw_addr); kfree(pf); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); @@ -11539,6 +11484,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) { struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; + int retval = 0; set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_DOWN, &pf->state); @@ -11550,10 +11496,16 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + i40e_stop_misc_vector(pf); + + retval = pci_save_state(pdev); + if (retval) + return retval; + pci_wake_from_d3(pdev, pf->wol_en); pci_set_power_state(pdev, PCI_D3hot); - return 0; + return retval; } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 80403c6ee..4660c5abc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -98,6 +98,8 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id, + struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, bool qualified_modules, bool report_init, struct i40e_aq_get_phy_abilities_resp *abilities, diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index a8868e1bf..df7ecc957 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -740,14 +740,12 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, tx_ring->q_vector->tx.total_packets += total_packets; if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { - unsigned int j = 0; - /* check to see if there are < 4 descriptors * waiting to be written back, then kick the hardware to force * them to be written back in case we stay in NAPI. * In this mode on X722 we do not enable Interrupt. */ - j = i40e_get_tx_pending(tx_ring, false); + unsigned int j = i40e_get_tx_pending(tx_ring, false); if (budget && ((j / (WB_STRIDE + 1)) == 0) && (j != 0) && diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 1fcafcfa8..6fcbf764f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -665,6 +665,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) goto error_alloc_vsi_res; } if (type == I40E_VSI_SRIOV) { + u64 hena = i40e_pf_get_default_rss_hena(pf); + vf->lan_vsi_idx = vsi->idx; vf->lan_vsi_id = vsi->id; /* If the port VLAN has been configured and then the @@ -687,6 +689,10 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) vf->default_lan_addr.addr, vf->vf_id); } spin_unlock_bh(&vsi->mac_filter_list_lock); + i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), + (u32)hena); + i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), + (u32)(hena >> 32)); } /* program mac filter */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 8f6420400..4db0c0326 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -59,7 +59,6 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_SFP_I_X722: - case I40E_DEV_ID_QSFP_I_X722: hw->mac.type = I40E_MAC_X722; break; case I40E_DEV_ID_X722_VF: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h index d34972bab..702357069 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -45,7 +45,6 @@ #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 -#define I40E_DEV_ID_QSFP_I_X722 0x37D4 #define I40E_DEV_ID_X722_VF 0x37CD #define I40E_DEV_ID_X722_VF_HV 0x37D9 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 79d99cd91..a579193b2 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -259,13 +259,12 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, tx_ring->q_vector->tx.total_packets += total_packets; if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { - unsigned int j = 0; /* check to see if there are < 4 descriptors * waiting to be written back, then kick the hardware to force * them to be written back in case we stay in NAPI. * In this mode on X722 we do not enable Interrupt. */ - j = i40evf_get_tx_pending(tx_ring, false); + unsigned int j = i40evf_get_tx_pending(tx_ring, false); if (budget && ((j / (WB_STRIDE + 1)) == 0) && (j > 0) && diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 16c552952..600fb9c4a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -37,8 +37,8 @@ static const char i40evf_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 5 -#define DRV_VERSION_BUILD 10 +#define DRV_VERSION_MINOR 6 +#define DRV_VERSION_BUILD 11 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ @@ -57,7 +57,9 @@ static const char i40evf_copyright[] = */ static const struct pci_device_id i40evf_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF_HV), 0}, /* required last entry */ {0, } }; @@ -825,7 +827,7 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, ether_addr_copy(f->macaddr, macaddr); - list_add(&f->list, &adapter->mac_filter_list); + list_add_tail(&f->list, &adapter->mac_filter_list); f->add = true; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; } diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index f13445691..d76c221d4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -434,6 +434,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) ether_addr_copy(veal->list[i].addr, f->macaddr); i++; f->add = false; + if (i == count) + break; } } if (!more) @@ -497,6 +499,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) i++; list_del(&f->list); kfree(f); + if (i == count) + break; } } if (!more) @@ -560,6 +564,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) vvfl->vlan_id[i] = f->vlan; i++; f->add = false; + if (i == count) + break; } } if (!more) @@ -623,6 +629,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) i++; list_del(&f->list); kfree(f); + if (i == count) + break; } } if (!more) diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index b9609afa5..5387b3a96 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -445,6 +445,7 @@ struct igb_adapter { unsigned long ptp_tx_start; unsigned long last_rx_ptp_check; unsigned long last_rx_timestamp; + unsigned int ptp_flags; spinlock_t tmreg_lock; struct cyclecounter cc; struct timecounter tc; @@ -474,12 +475,15 @@ struct igb_adapter { u16 eee_advert; }; +/* flags controlling PTP/1588 function */ +#define IGB_PTP_ENABLED BIT(0) +#define IGB_PTP_OVERFLOW_CHECK BIT(1) + #define IGB_FLAG_HAS_MSI BIT(0) #define IGB_FLAG_DCA_ENABLED BIT(1) #define IGB_FLAG_QUAD_PORT_A BIT(2) #define IGB_FLAG_QUEUE_PAIRS BIT(3) #define IGB_FLAG_DMAC BIT(4) -#define IGB_FLAG_PTP BIT(5) #define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6) #define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7) #define IGB_FLAG_WOL_SUPPORTED BIT(8) @@ -546,6 +550,7 @@ void igb_set_fw_version(struct igb_adapter *); void igb_ptp_init(struct igb_adapter *adapter); void igb_ptp_stop(struct igb_adapter *adapter); void igb_ptp_reset(struct igb_adapter *adapter); +void igb_ptp_suspend(struct igb_adapter *adapter); void igb_ptp_rx_hang(struct igb_adapter *adapter); void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ef3d642f5..942a89fb0 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2027,7 +2027,8 @@ void igb_reset(struct igb_adapter *adapter) wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); /* Re-enable PTP, where applicable. */ - igb_ptp_reset(adapter); + if (adapter->ptp_flags & IGB_PTP_ENABLED) + igb_ptp_reset(adapter); igb_get_phy_info(hw); } @@ -2323,9 +2324,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } - err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), - igb_driver_name); + err = pci_request_mem_regions(pdev, igb_driver_name); if (err) goto err_pci_reg; @@ -2749,8 +2748,7 @@ err_sw_init: err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -2915,8 +2913,7 @@ static void igb_remove(struct pci_dev *pdev) pci_iounmap(pdev, adapter->io_addr); if (hw->flash_address) iounmap(hw->flash_address); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); kfree(adapter->shadow_vfta); free_netdev(netdev); @@ -6855,12 +6852,12 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, **/ static bool igb_add_rx_frag(struct igb_ring *rx_ring, struct igb_rx_buffer *rx_buffer, + unsigned int size, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) { struct page *page = rx_buffer->page; unsigned char *va = page_address(page) + rx_buffer->page_offset; - unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = IGB_RX_BUFSZ; #else @@ -6912,6 +6909,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) { + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); struct igb_rx_buffer *rx_buffer; struct page *page; @@ -6947,11 +6945,11 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, - IGB_RX_BUFSZ, + size, DMA_FROM_DEVICE); /* pull page into skb */ - if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) { /* hand second half of page back to the ring */ igb_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -7527,6 +7525,8 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, if (netif_running(netdev)) __igb_close(netdev, true); + igb_ptp_suspend(adapter); + igb_clear_interrupt_scheme(adapter); #ifdef CONFIG_PM diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 3c7bcdf76..336c103ae 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -684,6 +684,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter) u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); unsigned long rx_event; + /* Other hardware uses per-packet timestamps */ if (hw->mac.type != e1000_82576) return; @@ -1062,6 +1063,13 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) -EFAULT : 0; } +/** + * igb_ptp_init - Initialize PTP functionality + * @adapter: Board private structure + * + * This function is called at device probe to initialize the PTP + * functionality. + */ void igb_ptp_init(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; @@ -1084,8 +1092,7 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->cc.mask = CYCLECOUNTER_MASK(64); adapter->cc.mult = 1; adapter->cc.shift = IGB_82576_TSYNC_SHIFT; - /* Dial the nominal frequency. */ - wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); + adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK; break; case e1000_82580: case e1000_i354: @@ -1104,8 +1111,7 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580); adapter->cc.mult = 1; adapter->cc.shift = 0; - /* Enable the timer functions by clearing bit 31. */ - wr32(E1000_TSAUXC, 0x0); + adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK; break; case e1000_i210: case e1000_i211: @@ -1130,44 +1136,24 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->ptp_caps.settime64 = igb_ptp_settime_i210; adapter->ptp_caps.enable = igb_ptp_feature_enable_i210; adapter->ptp_caps.verify = igb_ptp_verify_pin; - /* Enable the timer functions by clearing bit 31. */ - wr32(E1000_TSAUXC, 0x0); break; default: adapter->ptp_clock = NULL; return; } - wrfl(); - spin_lock_init(&adapter->tmreg_lock); INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); - /* Initialize the clock and overflow work for devices that need it. */ - if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { - struct timespec64 ts = ktime_to_timespec64(ktime_get_real()); - - igb_ptp_settime_i210(&adapter->ptp_caps, &ts); - } else { - timecounter_init(&adapter->tc, &adapter->cc, - ktime_to_ns(ktime_get_real())); - + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) INIT_DELAYED_WORK(&adapter->ptp_overflow_work, igb_ptp_overflow_check); - schedule_delayed_work(&adapter->ptp_overflow_work, - IGB_SYSTIM_OVERFLOW_PERIOD); - } - - /* Initialize the time sync interrupts for devices that support it. */ - if (hw->mac.type >= e1000_82580) { - wr32(E1000_TSIM, TSYNC_INTERRUPTS); - wr32(E1000_IMS, E1000_IMS_TS); - } - adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + igb_ptp_reset(adapter); + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { @@ -1176,32 +1162,24 @@ void igb_ptp_init(struct igb_adapter *adapter) } else { dev_info(&adapter->pdev->dev, "added PHC on %s\n", adapter->netdev->name); - adapter->flags |= IGB_FLAG_PTP; + adapter->ptp_flags |= IGB_PTP_ENABLED; } } /** - * igb_ptp_stop - Disable PTP device and stop the overflow check. - * @adapter: Board private structure. + * igb_ptp_suspend - Disable PTP work items and prepare for suspend + * @adapter: Board private structure * - * This function stops the PTP support and cancels the delayed work. - **/ -void igb_ptp_stop(struct igb_adapter *adapter) + * This function stops the overflow check work and PTP Tx timestamp work, and + * will prepare the device for OS suspend. + */ +void igb_ptp_suspend(struct igb_adapter *adapter) { - switch (adapter->hw.mac.type) { - case e1000_82576: - case e1000_82580: - case e1000_i354: - case e1000_i350: - cancel_delayed_work_sync(&adapter->ptp_overflow_work); - break; - case e1000_i210: - case e1000_i211: - /* No delayed work to cancel. */ - break; - default: + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) return; - } + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + cancel_delayed_work_sync(&adapter->ptp_overflow_work); cancel_work_sync(&adapter->ptp_tx_work); if (adapter->ptp_tx_skb) { @@ -1209,12 +1187,23 @@ void igb_ptp_stop(struct igb_adapter *adapter) adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); } +} + +/** + * igb_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igb_ptp_stop(struct igb_adapter *adapter) +{ + igb_ptp_suspend(adapter); if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); dev_info(&adapter->pdev->dev, "removed PHC on %s\n", adapter->netdev->name); - adapter->flags &= ~IGB_FLAG_PTP; + adapter->ptp_flags &= ~IGB_PTP_ENABLED; } } @@ -1229,9 +1218,6 @@ void igb_ptp_reset(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; unsigned long flags; - if (!(adapter->flags & IGB_FLAG_PTP)) - return; - /* reset the tstamp_config */ igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); @@ -1268,4 +1254,10 @@ void igb_ptp_reset(struct igb_adapter *adapter) } out: spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + wrfl(); + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + schedule_delayed_work(&adapter->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 9f2db1855..9475ff905 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -804,8 +804,6 @@ struct ixgbe_adapter { #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)]; - - bool need_crosstalk_fix; }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 47afed74a..63b25006a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1813,9 +1813,6 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) /* We need to run link autotry after the driver loads */ hw->mac.autotry_restart = true; - if (ret_val) - return ret_val; - return ixgbe_verify_fw_version_82599(hw); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 902d2061c..c47b605e8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -277,6 +277,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) { s32 ret_val; u32 ctrl_ext; + u16 device_caps; /* Set the media type */ hw->phy.media_type = hw->mac.ops.get_media_type(hw); @@ -301,6 +302,22 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) if (ret_val) return ret_val; + /* Cashe bit indicating need for crosstalk fix */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + hw->mac.ops.get_device_caps(hw, &device_caps); + if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) + hw->need_crosstalk_fix = false; + else + hw->need_crosstalk_fix = true; + break; + default: + hw->need_crosstalk_fix = false; + break; + } + /* Clear adapter stopped flag */ hw->adapter_stopped = false; @@ -763,6 +780,9 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) { u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + if (index > 3) + return IXGBE_ERR_PARAM; + /* To turn on the LED, set mode to ON. */ led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); @@ -781,6 +801,9 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) { u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + if (index > 3) + return IXGBE_ERR_PARAM; + /* To turn off the LED, set mode to OFF. */ led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); @@ -2657,7 +2680,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) **/ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) { - int secrxreg; + u32 secrxreg; secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; @@ -2698,6 +2721,9 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) bool locked = false; s32 ret_val; + if (index > 3) + return IXGBE_ERR_PARAM; + /* * Link must be up to auto-blink the LEDs; * Force it if link is down. @@ -2741,6 +2767,9 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) bool locked = false; s32 ret_val; + if (index > 3) + return IXGBE_ERR_PARAM; + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); if (ret_val) return ret_val; @@ -2929,8 +2958,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) } /* was that the last pool using this rar? */ - if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) + if (mpsar_lo == 0 && mpsar_hi == 0 && + rar != 0 && rar != hw->mac.san_mac_rar_index) hw->mac.ops.clear_rar(hw, rar); + return 0; } @@ -3187,6 +3218,31 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) return 0; } +/** + * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix + * @hw: pointer to hardware structure + * + * Contains the logic to identify if we need to verify link for the + * crosstalk fix + **/ +static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) +{ + /* Does FW say we need the fix */ + if (!hw->need_crosstalk_fix) + return false; + + /* Only consider SFP+ PHYs i.e. media type fiber */ + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + case ixgbe_media_type_fiber_qsfp: + break; + default: + return false; + } + + return true; +} + /** * ixgbe_check_mac_link_generic - Determine link and speed status * @hw: pointer to hardware structure @@ -3202,6 +3258,35 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, u32 links_reg, links_orig; u32 i; + /* If Crosstalk fix enabled do the sanity check of making sure + * the SFP+ cage is full. + */ + if (ixgbe_need_crosstalk_fix(hw)) { + u32 sfp_cage_full; + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP2; + break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP0; + break; + default: + /* sanity check - No SFP+ devices here */ + sfp_cage_full = false; + break; + } + + if (!sfp_cage_full) { + *link_up = false; + *speed = IXGBE_LINK_SPEED_UNKNOWN; + return 0; + } + } + /* clear the old state */ links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 59b771b9b..0d7209eb5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2204,11 +2204,11 @@ static int ixgbe_set_phys_id(struct net_device *netdev, return 2; case ETHTOOL_ID_ON: - hw->mac.ops.led_on(hw, IXGBE_LED_ON); + hw->mac.ops.led_on(hw, hw->bus.func); break; case ETHTOOL_ID_OFF: - hw->mac.ops.led_off(hw, IXGBE_LED_ON); + hw->mac.ops.led_off(hw, hw->bus.func); break; case ETHTOOL_ID_INACTIVE: @@ -2991,10 +2991,15 @@ static int ixgbe_get_ts_info(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); + /* we always support timestamping disabled */ + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); + /* fallthrough */ case ixgbe_mac_X540: case ixgbe_mac_82599EB: info->so_timestamping = @@ -3014,8 +3019,7 @@ static int ixgbe_get_ts_info(struct net_device *dev, BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); - info->rx_filters = - BIT(HWTSTAMP_FILTER_NONE) | + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 58153e818..b4f03748a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -50,7 +50,7 @@ #include #include #include -#include +#include #include #include #include @@ -3084,7 +3084,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) free_irq(entry->vector, q_vector); } - free_irq(adapter->msix_entries[vector++].vector, adapter); + free_irq(adapter->msix_entries[vector].vector, adapter); } /** @@ -5631,7 +5631,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) struct pci_dev *pdev = adapter->pdev; unsigned int rss, fdir; u32 fwsm; - u16 device_caps; int i; /* PCI config space info */ @@ -5728,9 +5727,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) #ifdef CONFIG_IXGBE_DCA adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; #endif -#ifdef CONFIG_IXGBE_VXLAN adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; -#endif break; default: break; @@ -5779,22 +5776,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->tx_ring_count = IXGBE_DEFAULT_TXD; adapter->rx_ring_count = IXGBE_DEFAULT_RXD; - /* Cache bit indicating need for crosstalk fix */ - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - hw->mac.ops.get_device_caps(hw, &device_caps); - if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) - adapter->need_crosstalk_fix = false; - else - adapter->need_crosstalk_fix = true; - break; - default: - adapter->need_crosstalk_fix = false; - break; - } - /* set default work limits */ adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; @@ -6164,9 +6145,7 @@ int ixgbe_open(struct net_device *netdev) ixgbe_up_complete(adapter); ixgbe_clear_vxlan_port(adapter); -#ifdef CONFIG_IXGBE_VXLAN - vxlan_get_rx_port(netdev); -#endif + udp_tunnel_get_rx_info(netdev); return 0; @@ -6717,18 +6696,6 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) link_up = true; } - /* If Crosstalk fix enabled do the sanity check of making sure - * the SFP+ cage is empty. - */ - if (adapter->need_crosstalk_fix) { - u32 sfp_cage_full; - - sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & - IXGBE_ESDP_SDP2; - if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full) - link_up = false; - } - if (adapter->ixgbe_ieee_pfc) pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); @@ -7075,16 +7042,6 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; s32 err; - /* If crosstalk fix enabled verify the SFP+ cage is full */ - if (adapter->need_crosstalk_fix) { - u32 sfp_cage_full; - - sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & - IXGBE_ESDP_SDP2; - if (!sfp_cage_full) - return; - } - /* not searching for SFP so there is nothing to do here */ if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) @@ -7268,14 +7225,12 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_service_event_complete(adapter); return; } -#ifdef CONFIG_IXGBE_VXLAN - rtnl_lock(); if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) { + rtnl_lock(); adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED; - vxlan_get_rx_port(adapter->netdev); + udp_tunnel_get_rx_info(adapter->netdev); + rtnl_unlock(); } - rtnl_unlock(); -#endif /* CONFIG_IXGBE_VXLAN */ ixgbe_reset_subtask(adapter); ixgbe_phy_interrupt_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); @@ -7703,7 +7658,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring, /* snag network header to get L4 type and address */ skb = first->skb; hdr.network = skb_network_header(skb); -#ifdef CONFIG_IXGBE_VXLAN if (skb->encapsulation && first->protocol == htons(ETH_P_IP) && hdr.ipv4->protocol != IPPROTO_UDP) { @@ -7714,7 +7668,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring, udp_hdr(skb)->dest == adapter->vxlan_port) hdr.network = skb_inner_network_header(skb); } -#endif /* CONFIG_IXGBE_VXLAN */ /* Currently only IPv4/IPv6 with TCP is supported */ switch (hdr.ipv4->version) { @@ -8314,14 +8267,53 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, struct tc_cls_u32_offload *cls) { + u32 hdl = cls->knode.handle; u32 uhtid = TC_U32_USERHTID(cls->knode.handle); - u32 loc; - int err; + u32 loc = cls->knode.handle & 0xfffff; + int err = 0, i, j; + struct ixgbe_jump_table *jump = NULL; + + if (loc > IXGBE_MAX_HW_ENTRIES) + return -EINVAL; if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE)) return -EINVAL; - loc = cls->knode.handle & 0xfffff; + /* Clear this filter in the link data it is associated with */ + if (uhtid != 0x800) { + jump = adapter->jump_tables[uhtid]; + if (!jump) + return -EINVAL; + if (!test_bit(loc - 1, jump->child_loc_map)) + return -EINVAL; + clear_bit(loc - 1, jump->child_loc_map); + } + + /* Check if the filter being deleted is a link */ + for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { + jump = adapter->jump_tables[i]; + if (jump && jump->link_hdl == hdl) { + /* Delete filters in the hardware in the child hash + * table associated with this link + */ + for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) { + if (!test_bit(j, jump->child_loc_map)) + continue; + spin_lock(&adapter->fdir_perfect_lock); + err = ixgbe_update_ethtool_fdir_entry(adapter, + NULL, + j + 1); + spin_unlock(&adapter->fdir_perfect_lock); + clear_bit(j, jump->child_loc_map); + } + /* Remove resources for this link */ + kfree(jump->input); + kfree(jump->mask); + kfree(jump); + adapter->jump_tables[i] = NULL; + return err; + } + } spin_lock(&adapter->fdir_perfect_lock); err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc); @@ -8404,12 +8396,14 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, struct tcf_exts *exts, u64 *action, u8 *queue) { const struct tc_action *a; + LIST_HEAD(actions); int err; if (tc_no_actions(exts)) return -EINVAL; - tc_for_each_action(a, exts) { + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { /* Drop action */ if (is_tcf_gact_shot(a)) { @@ -8555,6 +8549,18 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, if (!test_bit(link_uhtid - 1, &adapter->tables)) return err; + /* Multiple filters as links to the same hash table are not + * supported. To add a new filter with the same next header + * but different match/jump conditions, create a new hash table + * and link to it. + */ + if (adapter->jump_tables[link_uhtid] && + (adapter->jump_tables[link_uhtid])->link_hdl) { + e_err(drv, "Link filter exists for link: %x\n", + link_uhtid); + return err; + } + for (i = 0; nexthdr[i].jump; i++) { if (nexthdr[i].o != cls->knode.sel->offoff || nexthdr[i].s != cls->knode.sel->offshift || @@ -8576,6 +8582,8 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, } jump->input = input; jump->mask = mask; + jump->link_hdl = cls->knode.handle; + err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, &nexthdr[i]); if (!err) { @@ -8603,6 +8611,20 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, if ((adapter->jump_tables[uhtid])->mask) memcpy(mask, (adapter->jump_tables[uhtid])->mask, sizeof(*mask)); + + /* Lookup in all child hash tables if this location is already + * filled with a filter + */ + for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { + struct ixgbe_jump_table *link = adapter->jump_tables[i]; + + if (link && (test_bit(loc - 1, link->child_loc_map))) { + e_err(drv, "Filter exists in location: %x\n", + loc); + err = -EINVAL; + goto err_out; + } + } } err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL); if (err) @@ -8634,6 +8656,9 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); spin_unlock(&adapter->fdir_perfect_lock); + if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) + set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map); + kfree(mask); return err; err_out_w_lock: @@ -8776,14 +8801,12 @@ static int ixgbe_set_features(struct net_device *netdev, netdev->features = features; -#ifdef CONFIG_IXGBE_VXLAN if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { if (features & NETIF_F_RXCSUM) adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; else ixgbe_clear_vxlan_port(adapter); } -#endif /* CONFIG_IXGBE_VXLAN */ if (need_reset) ixgbe_do_reset(netdev); @@ -8794,23 +8817,25 @@ static int ixgbe_set_features(struct net_device *netdev, return 0; } -#ifdef CONFIG_IXGBE_VXLAN /** * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up * @dev: The port's netdev - * @sa_family: Socket Family that VXLAN is notifiying us about - * @port: New UDP port number that VXLAN started listening to + * @ti: Tunnel endpoint information **/ -static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, - __be16 port) +static void ixgbe_add_vxlan_port(struct net_device *dev, + struct udp_tunnel_info *ti) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; + __be16 port = ti->port; - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return; - if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) + return; + + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) return; if (adapter->vxlan_port == port) @@ -8830,30 +8855,31 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, /** * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away * @dev: The port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: UDP port number that VXLAN stopped listening to + * @ti: Tunnel endpoint information **/ -static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, - __be16 port) +static void ixgbe_del_vxlan_port(struct net_device *dev, + struct udp_tunnel_info *ti) { struct ixgbe_adapter *adapter = netdev_priv(dev); - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return; - if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) return; - if (adapter->vxlan_port != port) { + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port != ti->port) { netdev_info(dev, "Port %d was not found, not deleting\n", - ntohs(port)); + ntohs(ti->port)); return; } ixgbe_clear_vxlan_port(adapter); adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; } -#endif /* CONFIG_IXGBE_VXLAN */ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, @@ -9166,10 +9192,8 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, .ndo_dfwd_add_station = ixgbe_fwd_add, .ndo_dfwd_del_station = ixgbe_fwd_del, -#ifdef CONFIG_IXGBE_VXLAN - .ndo_add_vxlan_port = ixgbe_add_vxlan_port, - .ndo_del_vxlan_port = ixgbe_del_vxlan_port, -#endif /* CONFIG_IXGBE_VXLAN */ + .ndo_udp_tunnel_add = ixgbe_add_vxlan_port, + .ndo_udp_tunnel_del = ixgbe_del_vxlan_port, .ndo_features_check = ixgbe_features_check, }; @@ -9337,8 +9361,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_using_dac = 0; } - err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), ixgbe_driver_name); + err = pci_request_mem_regions(pdev, ixgbe_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_selected_regions failed 0x%x\n", err); @@ -9725,8 +9748,7 @@ err_ioremap: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: if (!adapter || disable_dev) @@ -9793,8 +9815,7 @@ static void ixgbe_remove(struct pci_dev *pdev) #endif iounmap(adapter->io_addr); - pci_release_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM)); + pci_release_mem_regions(pdev); e_dev_info("complete\n"); @@ -10058,6 +10079,7 @@ static int __init ixgbe_init_module(void) ret = pci_register_driver(&ixgbe_driver); if (ret) { + destroy_workqueue(ixgbe_wq); ixgbe_dbg_exit(); return ret; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h index a8bed3d88..538a1c547 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h @@ -42,8 +42,12 @@ struct ixgbe_jump_table { struct ixgbe_mat_field *mat; struct ixgbe_fdir_filter *input; union ixgbe_atr_input *mask; + u32 link_hdl; + unsigned long child_loc_map[32]; }; +#define IXGBE_MAX_HW_ENTRIES 2045 + static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, u32 val, u32 m) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index c5caacdd1..8618599df 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -954,6 +954,7 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, struct ixgbe_hw *hw = &adapter->hw; hw->mac.ops.set_mac_anti_spoofing(hw, false, vf); + hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); } } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index da3d8358f..1248a9936 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3525,6 +3525,7 @@ struct ixgbe_hw { bool force_full_reset; bool allow_unsupported_sfp; bool wol_enabled; + bool need_crosstalk_fix; }; struct ixgbe_info { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 19b75cd98..4716ca499 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1618,6 +1618,8 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; + mac->ops.setup_fc = ixgbe_setup_fc_x550em; + switch (mac->ops.get_media_type(hw)) { case ixgbe_media_type_fiber: /* CS4227 does not support autoneg, so disable the laser control @@ -1627,7 +1629,6 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) mac->ops.enable_tx_laser = NULL; mac->ops.flap_tx_laser = NULL; mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; - mac->ops.setup_fc = ixgbe_setup_fc_x550em; switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_SFP_N: mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n; @@ -1655,7 +1656,6 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) mac->ops.setup_link = ixgbe_setup_sgmii; break; default: - mac->ops.setup_fc = ixgbe_setup_fc_x550em; break; } } diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index ae09d60e7..8617cae2f 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -32,6 +32,7 @@ #define IXGBE_DEV_ID_X540_VF 0x1515 #define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 #define IXGBE_DEV_ID_82599_VF_HV 0x152E #define IXGBE_DEV_ID_X540_VF_HV 0x1530 diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index d5944c391..be52f5976 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -457,6 +457,7 @@ enum ixgbevf_boards { board_X550_vf_hv, board_X550EM_x_vf, board_X550EM_x_vf_hv, + board_x550em_a_vf, }; enum ixgbevf_xcast_modes { @@ -470,6 +471,7 @@ extern const struct ixgbevf_info ixgbevf_X540_vf_info; extern const struct ixgbevf_info ixgbevf_X550_vf_info; extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; +extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info; extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info; extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index acc24010c..d9d6616f0 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -56,7 +56,7 @@ const char ixgbevf_driver_name[] = "ixgbevf"; static const char ixgbevf_driver_string[] = "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; -#define DRV_VERSION "2.12.1-k" +#define DRV_VERSION "3.2.2-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = "Copyright (c) 2009 - 2015 Intel Corporation."; @@ -70,6 +70,7 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info, [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info, + [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info, }; /* ixgbevf_pci_tbl - PCI Device ID Table @@ -89,6 +90,7 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf }, /* required last entry */ {0, } }; @@ -1800,16 +1802,19 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, **/ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) { - int i; struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; + int i, ret; ixgbevf_setup_psrtype(adapter); if (hw->mac.type >= ixgbe_mac_X550_vf) ixgbevf_setup_vfmrqc(adapter); /* notify the PF of our intent to use this size of frame */ - hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); + ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); + if (ret) + dev_err(&adapter->pdev->dev, + "Failed to set MTU at %d\n", netdev->mtu); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring @@ -2772,12 +2777,15 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) /* If we're already down or resetting, just bail */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || + test_bit(__IXGBEVF_REMOVING, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) return; adapter->tx_timeout_count++; + rtnl_lock(); ixgbevf_reinit_locked(adapter); + rtnl_unlock(); } /** @@ -3732,6 +3740,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) struct ixgbe_hw *hw = &adapter->hw; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; + int ret; switch (adapter->hw.api_version) { case ixgbe_mbox_api_11: @@ -3748,14 +3757,17 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) if ((new_mtu < 68) || (max_frame > max_possible_frame)) return -EINVAL; + /* notify the PF of our intent to use this size of frame */ + ret = hw->mac.ops.set_rlpml(hw, max_frame); + if (ret) + return -EINVAL; + hw_dbg(hw, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; - /* notify the PF of our intent to use this size of frame */ - hw->mac.ops.set_rlpml(hw, max_frame); - return 0; } diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index e670d3b19..a52f70ec4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -33,6 +33,18 @@ */ #define IXGBE_HV_RESET_OFFSET 0x201 +static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg, + u32 *retmsg, u16 size) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 retval = mbx->ops.write_posted(hw, msg, size); + + if (retval) + return retval; + + return mbx->ops.read_posted(hw, retmsg, size); +} + /** * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure @@ -255,8 +267,7 @@ static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) { - struct ixgbe_mbx_info *mbx = &hw->mbx; - u32 msgbuf[3]; + u32 msgbuf[3], msgbuf_chk; u8 *msg_addr = (u8 *)(&msgbuf[1]); s32 ret_val; @@ -268,19 +279,18 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) */ msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; msgbuf[0] |= IXGBE_VF_SET_MACVLAN; + msgbuf_chk = msgbuf[0]; + if (addr) ether_addr_copy(msg_addr, addr); - ret_val = mbx->ops.write_posted(hw, msgbuf, 3); - if (!ret_val) - ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3); + if (!ret_val) { + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; - msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; - - if (!ret_val) - if (msgbuf[0] == - (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK)) - ret_val = -ENOMEM; + if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK)) + return -ENOMEM; + } return ret_val; } @@ -423,7 +433,6 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq) { - struct ixgbe_mbx_info *mbx = &hw->mbx; u32 msgbuf[3]; u8 *msg_addr = (u8 *)(&msgbuf[1]); s32 ret_val; @@ -431,10 +440,8 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, memset(msgbuf, 0, sizeof(msgbuf)); msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; ether_addr_copy(msg_addr, addr); - ret_val = mbx->ops.write_posted(hw, msgbuf, 3); - if (!ret_val) - ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; @@ -468,17 +475,6 @@ static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, return -EOPNOTSUPP; } -static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, - u32 *msg, u16 size) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - u32 retmsg[IXGBE_VFMAILBOX_SIZE]; - s32 retval = mbx->ops.write_posted(hw, msg, size); - - if (!retval) - mbx->ops.read_posted(hw, retmsg, size); -} - /** * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses * @hw: pointer to the HW structure @@ -519,7 +515,7 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); } - ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); + ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE); return 0; } @@ -542,7 +538,6 @@ static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, **/ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) { - struct ixgbe_mbx_info *mbx = &hw->mbx; u32 msgbuf[2]; s32 err; @@ -556,11 +551,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; msgbuf[1] = xcast_mode; - err = mbx->ops.write_posted(hw, msgbuf, 2); - if (err) - return err; - - err = mbx->ops.read_posted(hw, msgbuf, 2); + err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); if (err) return err; @@ -589,7 +580,6 @@ static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) { - struct ixgbe_mbx_info *mbx = &hw->mbx; u32 msgbuf[2]; s32 err; @@ -598,11 +588,7 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; - err = mbx->ops.write_posted(hw, msgbuf, 2); - if (err) - goto mbx_err; - - err = mbx->ops.read_posted(hw, msgbuf, 2); + err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); if (err) goto mbx_err; @@ -797,13 +783,22 @@ out: * @hw: pointer to the HW structure * @max_size: value to assign to max frame size **/ -static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) +static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) { u32 msgbuf[2]; + s32 ret_val; msgbuf[0] = IXGBE_VF_SET_LPE; msgbuf[1] = max_size; - ixgbevf_write_msg_read_ack(hw, msgbuf, 2); + + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + if (ret_val) + return ret_val; + if ((msgbuf[0] & IXGBE_VF_SET_LPE) && + (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK)) + return IXGBE_ERR_MBX; + + return 0; } /** @@ -812,7 +807,7 @@ static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) * @max_size: value to assign to max frame size * Hyper-V variant. **/ -static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) +static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) { u32 reg; @@ -823,6 +818,8 @@ static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) /* CRC == 4 */ reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN); IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg); + + return 0; } /** @@ -839,11 +836,8 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) msg[0] = IXGBE_VF_API_NEGOTIATE; msg[1] = api; msg[2] = 0; - err = hw->mbx.ops.write_posted(hw, msg, 3); - - if (!err) - err = hw->mbx.ops.read_posted(hw, msg, 3); + err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3); if (!err) { msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; @@ -892,11 +886,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, /* Fetch queue configuration from the PF */ msg[0] = IXGBE_VF_GET_QUEUE; msg[1] = msg[2] = msg[3] = msg[4] = 0; - err = hw->mbx.ops.write_posted(hw, msg, 5); - - if (!err) - err = hw->mbx.ops.read_posted(hw, msg, 5); + err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5); if (!err) { msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; @@ -1005,3 +996,8 @@ const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = { .mac = ixgbe_mac_X550EM_x_vf, .mac_ops = &ixgbevf_hv_mac_ops, }; + +const struct ixgbevf_info ixgbevf_x550em_a_vf_info = { + .mac = ixgbe_mac_x550em_a_vf, + .mac_ops = &ixgbevf_mac_ops, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 2cac610f3..04d8d4ee4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -69,7 +69,7 @@ struct ixgbe_mac_operations { s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); - void (*set_rlpml)(struct ixgbe_hw *, u16); + s32 (*set_rlpml)(struct ixgbe_hw *, u16); }; enum ixgbe_mac_type { @@ -78,6 +78,7 @@ enum ixgbe_mac_type { ixgbe_mac_X540_vf, ixgbe_mac_X550_vf, ixgbe_mac_X550EM_x_vf, + ixgbe_mac_x550em_a_vf, ixgbe_num_macs }; diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index dc82b1b19..91e09d68b 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -102,7 +102,6 @@ struct ltq_etop_priv { struct resource *res; struct mii_bus *mii_bus; - struct phy_device *phydev; struct ltq_etop_chan ch[MAX_DMA_CHAN]; int tx_free[MAX_DMA_CHAN >> 1]; @@ -304,35 +303,17 @@ ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) strlcpy(info->version, DRV_VERSION, sizeof(info->version)); } -static int -ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - - return phy_ethtool_gset(priv->phydev, cmd); -} - -static int -ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - - return phy_ethtool_sset(priv->phydev, cmd); -} - static int ltq_etop_nway_reset(struct net_device *dev) { - struct ltq_etop_priv *priv = netdev_priv(dev); - - return phy_start_aneg(priv->phydev); + return phy_start_aneg(dev->phydev); } static const struct ethtool_ops ltq_etop_ethtool_ops = { .get_drvinfo = ltq_etop_get_drvinfo, - .get_settings = ltq_etop_get_settings, - .set_settings = ltq_etop_set_settings, .nway_reset = ltq_etop_nway_reset, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int @@ -401,7 +382,6 @@ ltq_etop_mdio_probe(struct net_device *dev) | SUPPORTED_TP); phydev->advertising = phydev->supported; - priv->phydev = phydev; phy_attached_info(phydev); return 0; @@ -411,7 +391,6 @@ static int ltq_etop_mdio_init(struct net_device *dev) { struct ltq_etop_priv *priv = netdev_priv(dev); - int i; int err; priv->mii_bus = mdiobus_alloc(); @@ -451,7 +430,7 @@ ltq_etop_mdio_cleanup(struct net_device *dev) { struct ltq_etop_priv *priv = netdev_priv(dev); - phy_disconnect(priv->phydev); + phy_disconnect(dev->phydev); mdiobus_unregister(priv->mii_bus); mdiobus_free(priv->mii_bus); } @@ -470,7 +449,7 @@ ltq_etop_open(struct net_device *dev) ltq_dma_open(&ch->dma); napi_enable(&ch->napi); } - phy_start(priv->phydev); + phy_start(dev->phydev); netif_tx_start_all_queues(dev); return 0; } @@ -482,7 +461,7 @@ ltq_etop_stop(struct net_device *dev) int i; netif_tx_stop_all_queues(dev); - phy_stop(priv->phydev); + phy_stop(dev->phydev); for (i = 0; i < MAX_DMA_CHAN; i++) { struct ltq_etop_chan *ch = &priv->ch[i]; @@ -557,10 +536,8 @@ ltq_etop_change_mtu(struct net_device *dev, int new_mtu) static int ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct ltq_etop_priv *priv = netdev_priv(dev); - /* TODO: mii-toll reports "No MII transceiver present!." ?!*/ - return phy_mii_ioctl(priv->phydev, rq, cmd); + return phy_mii_ioctl(dev->phydev, rq, cmd); } static int diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index f92018b13..d41c28d00 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4118,6 +4118,7 @@ static int mvneta_probe(struct platform_device *pdev) pp->bm_priv = NULL; } } + of_node_put(bm_node); err = mvneta_init(&pdev->dev, pp); if (err < 0) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 868a957f2..60227a345 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -699,7 +699,6 @@ struct mvpp2_port { u16 rx_ring_size; struct mvpp2_pcpu_stats __percpu *stats; - struct phy_device *phy_dev; phy_interface_t phy_interface; struct device_node *phy_node; unsigned int link; @@ -4850,7 +4849,7 @@ static irqreturn_t mvpp2_isr(int irq, void *dev_id) static void mvpp2_link_event(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); - struct phy_device *phydev = port->phy_dev; + struct phy_device *phydev = dev->phydev; int status_change = 0; u32 val; @@ -5416,6 +5415,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) /* Set hw internals when starting port */ static void mvpp2_start_dev(struct mvpp2_port *port) { + struct net_device *ndev = port->dev; + mvpp2_gmac_max_rx_size_set(port); mvpp2_txp_max_tx_size_set(port); @@ -5425,13 +5426,15 @@ static void mvpp2_start_dev(struct mvpp2_port *port) mvpp2_interrupts_enable(port); mvpp2_port_enable(port); - phy_start(port->phy_dev); + phy_start(ndev->phydev); netif_tx_start_all_queues(port->dev); } /* Set hw internals when stopping port */ static void mvpp2_stop_dev(struct mvpp2_port *port) { + struct net_device *ndev = port->dev; + /* Stop new packets from arriving to RXQs */ mvpp2_ingress_disable(port); @@ -5447,7 +5450,7 @@ static void mvpp2_stop_dev(struct mvpp2_port *port) mvpp2_egress_disable(port); mvpp2_port_disable(port); - phy_stop(port->phy_dev); + phy_stop(ndev->phydev); } /* Return positive if MTU is valid */ @@ -5535,7 +5538,6 @@ static int mvpp2_phy_connect(struct mvpp2_port *port) phy_dev->supported &= PHY_GBIT_FEATURES; phy_dev->advertising = phy_dev->supported; - port->phy_dev = phy_dev; port->link = 0; port->duplex = 0; port->speed = 0; @@ -5545,8 +5547,9 @@ static int mvpp2_phy_connect(struct mvpp2_port *port) static void mvpp2_phy_disconnect(struct mvpp2_port *port) { - phy_disconnect(port->phy_dev); - port->phy_dev = NULL; + struct net_device *ndev = port->dev; + + phy_disconnect(ndev->phydev); } static int mvpp2_open(struct net_device *dev) @@ -5796,13 +5799,12 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct mvpp2_port *port = netdev_priv(dev); int ret; - if (!port->phy_dev) + if (!dev->phydev) return -ENOTSUPP; - ret = phy_mii_ioctl(port->phy_dev, ifr, cmd); + ret = phy_mii_ioctl(dev->phydev, ifr, cmd); if (!ret) mvpp2_link_event(dev); @@ -5811,28 +5813,6 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /* Ethtool methods */ -/* Get settings (phy address, speed) for ethtools */ -static int mvpp2_ethtool_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct mvpp2_port *port = netdev_priv(dev); - - if (!port->phy_dev) - return -ENODEV; - return phy_ethtool_gset(port->phy_dev, cmd); -} - -/* Set settings (phy address, speed) for ethtools */ -static int mvpp2_ethtool_set_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct mvpp2_port *port = netdev_priv(dev); - - if (!port->phy_dev) - return -ENODEV; - return phy_ethtool_sset(port->phy_dev, cmd); -} - /* Set interrupt coalescing for ethtools */ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) @@ -5967,13 +5947,13 @@ static const struct net_device_ops mvpp2_netdev_ops = { static const struct ethtool_ops mvpp2_eth_tool_ops = { .get_link = ethtool_op_get_link, - .get_settings = mvpp2_ethtool_get_settings, - .set_settings = mvpp2_ethtool_set_settings, .set_coalesce = mvpp2_ethtool_set_coalesce, .get_coalesce = mvpp2_ethtool_get_coalesce, .get_drvinfo = mvpp2_ethtool_get_drvinfo, .get_ringparam = mvpp2_ethtool_get_ringparam, .set_ringparam = mvpp2_ethtool_set_ringparam, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /* Driver initialization */ @@ -6254,6 +6234,7 @@ err_free_stats: err_free_irq: irq_dispose_mapping(port->irq); err_free_netdev: + of_node_put(phy_node); free_netdev(dev); return err; } @@ -6264,6 +6245,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port) int i; unregister_netdev(port->dev); + of_node_put(port->phy_node); free_percpu(port->pcpu); free_percpu(port->stats); for (i = 0; i < txq_number; i++) diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 54d5154ac..5d5000c8e 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -247,7 +247,6 @@ struct pxa168_eth_private { */ struct timer_list timeout; struct mii_bus *smi_bus; - struct phy_device *phy; /* clock */ struct clk *clk; @@ -275,8 +274,8 @@ enum hash_table_entry { HASH_ENTRY_RECEIVE_DISCARD_BIT = 2 }; -static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); -static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd); +static int pxa168_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd); static int pxa168_init_hw(struct pxa168_eth_private *pep); static int pxa168_init_phy(struct net_device *dev); static void eth_port_reset(struct net_device *dev); @@ -644,7 +643,7 @@ static void eth_port_start(struct net_device *dev) struct pxa168_eth_private *pep = netdev_priv(dev); int tx_curr_desc, rx_curr_desc; - phy_start(pep->phy); + phy_start(dev->phydev); /* Assignment of Tx CTRP of given queue */ tx_curr_desc = pep->tx_curr_desc_q; @@ -700,7 +699,7 @@ static void eth_port_reset(struct net_device *dev) val &= ~PCR_EN; wrl(pep, PORT_CONFIG, val); - phy_stop(pep->phy); + phy_stop(dev->phydev); } /* @@ -943,7 +942,7 @@ static int set_port_config_ext(struct pxa168_eth_private *pep) static void pxa168_eth_adjust_link(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); - struct phy_device *phy = pep->phy; + struct phy_device *phy = dev->phydev; u32 cfg, cfg_o = rdl(pep, PORT_CONFIG); u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT); @@ -972,35 +971,37 @@ static void pxa168_eth_adjust_link(struct net_device *dev) static int pxa168_init_phy(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); - struct ethtool_cmd cmd; + struct ethtool_link_ksettings cmd; + struct phy_device *phy = NULL; int err; - if (pep->phy) + if (dev->phydev) return 0; - pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr); - if (IS_ERR(pep->phy)) - return PTR_ERR(pep->phy); + phy = mdiobus_scan(pep->smi_bus, pep->phy_addr); + if (IS_ERR(phy)) + return PTR_ERR(phy); - err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link, + err = phy_connect_direct(dev, phy, pxa168_eth_adjust_link, pep->phy_intf); if (err) return err; - err = pxa168_get_settings(dev, &cmd); + err = pxa168_get_link_ksettings(dev, &cmd); if (err) return err; - cmd.phy_address = pep->phy_addr; - cmd.speed = pep->phy_speed; - cmd.duplex = pep->phy_duplex; - cmd.advertising = PHY_BASIC_FEATURES; - cmd.autoneg = AUTONEG_ENABLE; + cmd.base.phy_address = pep->phy_addr; + cmd.base.speed = pep->phy_speed; + cmd.base.duplex = pep->phy_duplex; + ethtool_convert_legacy_u32_to_link_mode(cmd.link_modes.advertising, + PHY_BASIC_FEATURES); + cmd.base.autoneg = AUTONEG_ENABLE; - if (cmd.speed != 0) - cmd.autoneg = AUTONEG_DISABLE; + if (cmd.base.speed != 0) + cmd.base.autoneg = AUTONEG_DISABLE; - return pxa168_set_settings(dev, &cmd); + return phy_ethtool_set_link_ksettings(dev, &cmd); } static int pxa168_init_hw(struct pxa168_eth_private *pep) @@ -1366,32 +1367,24 @@ static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum, static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct pxa168_eth_private *pep = netdev_priv(dev); - if (pep->phy != NULL) - return phy_mii_ioctl(pep->phy, ifr, cmd); + if (dev->phydev != NULL) + return phy_mii_ioctl(dev->phydev, ifr, cmd); return -EOPNOTSUPP; } -static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int pxa168_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { - struct pxa168_eth_private *pep = netdev_priv(dev); int err; - err = phy_read_status(pep->phy); + err = phy_read_status(dev->phydev); if (err == 0) - err = phy_ethtool_gset(pep->phy, cmd); + err = phy_ethtool_ksettings_get(dev->phydev, cmd); return err; } -static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct pxa168_eth_private *pep = netdev_priv(dev); - - return phy_ethtool_sset(pep->phy, cmd); -} - static void pxa168_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -1402,11 +1395,11 @@ static void pxa168_get_drvinfo(struct net_device *dev, } static const struct ethtool_ops pxa168_ethtool_ops = { - .get_settings = pxa168_get_settings, - .set_settings = pxa168_set_settings, .get_drvinfo = pxa168_get_drvinfo, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = pxa168_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops pxa168_eth_netdev_ops = { @@ -1513,6 +1506,7 @@ static int pxa168_eth_probe(struct platform_device *pdev) } of_property_read_u32(np, "reg", &pep->phy_addr); pep->phy_intf = of_get_phy_mode(pdev->dev.of_node); + of_node_put(np); } /* Hardware supports only 3 ports */ @@ -1569,8 +1563,8 @@ static int pxa168_eth_remove(struct platform_device *pdev) pep->htpr, pep->htpr_dma); pep->htpr = NULL; } - if (pep->phy) - phy_disconnect(pep->phy); + if (dev->phydev) + phy_disconnect(dev->phydev); if (pep->clk) { clk_disable_unprepare(pep->clk); } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index d1cdc2d76..3743af8f1 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -50,6 +50,10 @@ static const struct mtk_ethtool_stats { MTK_ETHTOOL_STAT(rx_flow_control_packets), }; +static const char * const mtk_clks_source_name[] = { + "ethif", "esw", "gp1", "gp2" +}; + void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) { __raw_writel(val, eth->base + reg); @@ -76,8 +80,8 @@ static int mtk_mdio_busy_wait(struct mtk_eth *eth) return -1; } -u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, - u32 phy_register, u32 write_data) +static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, + u32 phy_register, u32 write_data) { if (mtk_mdio_busy_wait(eth)) return -1; @@ -95,7 +99,7 @@ u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, return 0; } -u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg) +static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg) { u32 d; @@ -245,12 +249,16 @@ static int mtk_phy_connect(struct mtk_mac *mac) case PHY_INTERFACE_MODE_MII: ge_mode = 1; break; - case PHY_INTERFACE_MODE_RMII: + case PHY_INTERFACE_MODE_REVMII: ge_mode = 2; break; + case PHY_INTERFACE_MODE_RMII: + if (!mac->id) + goto err_phy; + ge_mode = 3; + break; default: - dev_err(eth->dev, "invalid phy_mode\n"); - return -1; + goto err_phy; } /* put the gmac into the right mode */ @@ -263,19 +271,31 @@ static int mtk_phy_connect(struct mtk_mac *mac) mac->phy_dev->autoneg = AUTONEG_ENABLE; mac->phy_dev->speed = 0; mac->phy_dev->duplex = 0; + + if (of_phy_is_fixed_link(mac->of_node)) + mac->phy_dev->supported |= + SUPPORTED_Pause | SUPPORTED_Asym_Pause; + mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause; mac->phy_dev->advertising = mac->phy_dev->supported | ADVERTISED_Autoneg; phy_start_aneg(mac->phy_dev); + of_node_put(np); + return 0; + +err_phy: + of_node_put(np); + dev_err(eth->dev, "invalid phy_mode\n"); + return -EINVAL; } static int mtk_mdio_init(struct mtk_eth *eth) { struct device_node *mii_np; - int err; + int ret; mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); if (!mii_np) { @@ -284,13 +304,13 @@ static int mtk_mdio_init(struct mtk_eth *eth) } if (!of_device_is_available(mii_np)) { - err = 0; + ret = -ENODEV; goto err_put_node; } - eth->mii_bus = mdiobus_alloc(); + eth->mii_bus = devm_mdiobus_alloc(eth->dev); if (!eth->mii_bus) { - err = -ENOMEM; + ret = -ENOMEM; goto err_put_node; } @@ -301,19 +321,11 @@ static int mtk_mdio_init(struct mtk_eth *eth) eth->mii_bus->parent = eth->dev; snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); - err = of_mdiobus_register(eth->mii_bus, mii_np); - if (err) - goto err_free_bus; - - return 0; - -err_free_bus: - mdiobus_free(eth->mii_bus); + ret = of_mdiobus_register(eth->mii_bus, mii_np); err_put_node: of_node_put(mii_np); - eth->mii_bus = NULL; - return err; + return ret; } static void mtk_mdio_cleanup(struct mtk_eth *eth) @@ -322,28 +334,28 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth) return; mdiobus_unregister(eth->mii_bus); - of_node_put(eth->mii_bus->dev.of_node); - mdiobus_free(eth->mii_bus); } static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) { + unsigned long flags; u32 val; + spin_lock_irqsave(ð->irq_lock, flags); val = mtk_r32(eth, MTK_QDMA_INT_MASK); mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK); - /* flush write */ - mtk_r32(eth, MTK_QDMA_INT_MASK); + spin_unlock_irqrestore(ð->irq_lock, flags); } static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask) { + unsigned long flags; u32 val; + spin_lock_irqsave(ð->irq_lock, flags); val = mtk_r32(eth, MTK_QDMA_INT_MASK); mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK); - /* flush write */ - mtk_r32(eth, MTK_QDMA_INT_MASK); + spin_unlock_irqrestore(ð->irq_lock, flags); } static int mtk_set_mac_address(struct net_device *dev, void *p) @@ -540,15 +552,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, return &ring->buf[idx]; } -static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) +static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) { if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { - dma_unmap_single(dev, + dma_unmap_single(eth->dev, dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_len(tx_buf, dma_len0), DMA_TO_DEVICE); } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { - dma_unmap_page(dev, + dma_unmap_page(eth->dev, dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_len(tx_buf, dma_len0), DMA_TO_DEVICE); @@ -570,14 +582,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, dma_addr_t mapped_addr; unsigned int nr_frags; int i, n_desc = 1; - u32 txd4 = 0; + u32 txd4 = 0, fport; itxd = ring->next_free; if (itxd == ring->last_free) return -ENOMEM; /* set the forward port */ - txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT; + fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT; + txd4 |= fport; tx_buf = mtk_desc_to_tx_buf(ring, itxd); memset(tx_buf, 0, sizeof(*tx_buf)); @@ -593,9 +606,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, if (skb_vlan_tag_present(skb)) txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); - mapped_addr = dma_map_single(&dev->dev, skb->data, + mapped_addr = dma_map_single(eth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) + if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) return -ENOMEM; WRITE_ONCE(itxd->txd1, mapped_addr); @@ -621,10 +634,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, n_desc++; frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); - mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, + mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, frag_map_size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) + if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) goto err_dma; if (i == nr_frags - 1 && @@ -635,7 +648,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, WRITE_ONCE(txd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(frag_map_size) | last_frag * TX_DMA_LS0)); - WRITE_ONCE(txd->txd4, 0); + WRITE_ONCE(txd->txd4, fport); tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; tx_buf = mtk_desc_to_tx_buf(ring, txd); @@ -677,7 +690,7 @@ err_dma: tx_buf = mtk_desc_to_tx_buf(ring, itxd); /* unmap dma */ - mtk_tx_unmap(&dev->dev, tx_buf); + mtk_tx_unmap(eth, tx_buf); itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); @@ -798,7 +811,7 @@ drop: } static int mtk_poll_rx(struct napi_struct *napi, int budget, - struct mtk_eth *eth, u32 rx_intr) + struct mtk_eth *eth) { struct mtk_rx_ring *ring = ð->rx_ring; int idx = ring->calc_idx; @@ -834,11 +847,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, netdev->stats.rx_dropped++; goto release_desc; } - dma_addr = dma_map_single(ð->netdev[mac]->dev, + dma_addr = dma_map_single(eth->dev, new_data + NET_SKB_PAD, ring->buf_size, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { + if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { skb_free_frag(new_data); netdev->stats.rx_dropped++; goto release_desc; @@ -847,13 +860,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, /* receive data */ skb = build_skb(data, ring->frag_size); if (unlikely(!skb)) { - put_page(virt_to_head_page(new_data)); + skb_free_frag(new_data); netdev->stats.rx_dropped++; goto release_desc; } skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); - dma_unmap_single(&netdev->dev, trxd.rxd1, + dma_unmap_single(eth->dev, trxd.rxd1, ring->buf_size, DMA_FROM_DEVICE); pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); skb->dev = netdev; @@ -886,22 +899,22 @@ release_desc: } if (done < budget) - mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS); + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS); return done; } -static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) +static int mtk_poll_tx(struct mtk_eth *eth, int budget) { struct mtk_tx_ring *ring = ð->tx_ring; struct mtk_tx_dma *desc; struct sk_buff *skb; struct mtk_tx_buf *tx_buf; - int total = 0, done[MTK_MAX_DEVS]; + unsigned int done[MTK_MAX_DEVS]; unsigned int bytes[MTK_MAX_DEVS]; u32 cpu, dma; static int condition; - int i; + int total = 0, i; memset(done, 0, sizeof(done)); memset(bytes, 0, sizeof(bytes)); @@ -935,7 +948,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) done[mac]++; budget--; } - mtk_tx_unmap(eth->dev, tx_buf); + mtk_tx_unmap(eth, tx_buf); ring->last_free = desc; atomic_inc(&ring->free_count); @@ -952,15 +965,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) total += done[i]; } - /* read hw index again make sure no new tx packet */ - if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR)) - *tx_again = true; - else - mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS); - - if (!total) - return 0; - if (mtk_queue_stopped(eth) && (atomic_read(&ring->free_count) > ring->thresh)) mtk_wake_queue(eth); @@ -968,49 +972,75 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) return total; } -static int mtk_poll(struct napi_struct *napi, int budget) +static void mtk_handle_status_irq(struct mtk_eth *eth) { - struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); - u32 status, status2, mask, tx_intr, rx_intr, status_intr; - int tx_done, rx_done; - bool tx_again = false; + u32 status2 = mtk_r32(eth, MTK_INT_STATUS2); - status = mtk_r32(eth, MTK_QMTK_INT_STATUS); - status2 = mtk_r32(eth, MTK_INT_STATUS2); - tx_intr = MTK_TX_DONE_INT; - rx_intr = MTK_RX_DONE_INT; - status_intr = (MTK_GDM1_AF | MTK_GDM2_AF); - tx_done = 0; - rx_done = 0; - tx_again = 0; + if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) { + mtk_stats_update(eth); + mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF), + MTK_INT_STATUS2); + } +} - if (status & tx_intr) - tx_done = mtk_poll_tx(eth, budget, &tx_again); +static int mtk_napi_tx(struct napi_struct *napi, int budget) +{ + struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); + u32 status, mask; + int tx_done = 0; - if (status & rx_intr) - rx_done = mtk_poll_rx(napi, budget, eth, rx_intr); + mtk_handle_status_irq(eth); + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS); + tx_done = mtk_poll_tx(eth, budget); - if (unlikely(status2 & status_intr)) { - mtk_stats_update(eth); - mtk_w32(eth, status_intr, MTK_INT_STATUS2); + if (unlikely(netif_msg_intr(eth))) { + status = mtk_r32(eth, MTK_QMTK_INT_STATUS); + mask = mtk_r32(eth, MTK_QDMA_INT_MASK); + dev_info(eth->dev, + "done tx %d, intr 0x%08x/0x%x\n", + tx_done, status, mask); } + if (tx_done == budget) + return budget; + + status = mtk_r32(eth, MTK_QMTK_INT_STATUS); + if (status & MTK_TX_DONE_INT) + return budget; + + napi_complete(napi); + mtk_irq_enable(eth, MTK_TX_DONE_INT); + + return tx_done; +} + +static int mtk_napi_rx(struct napi_struct *napi, int budget) +{ + struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); + u32 status, mask; + int rx_done = 0; + + mtk_handle_status_irq(eth); + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS); + rx_done = mtk_poll_rx(napi, budget, eth); + if (unlikely(netif_msg_intr(eth))) { + status = mtk_r32(eth, MTK_QMTK_INT_STATUS); mask = mtk_r32(eth, MTK_QDMA_INT_MASK); - netdev_info(eth->netdev[0], - "done tx %d, rx %d, intr 0x%08x/0x%x\n", - tx_done, rx_done, status, mask); + dev_info(eth->dev, + "done rx %d, intr 0x%08x/0x%x\n", + rx_done, status, mask); } - if (tx_again || rx_done == budget) + if (rx_done == budget) return budget; status = mtk_r32(eth, MTK_QMTK_INT_STATUS); - if (status & (tx_intr | rx_intr)) + if (status & MTK_RX_DONE_INT) return budget; napi_complete(napi); - mtk_irq_enable(eth, tx_intr | rx_intr); + mtk_irq_enable(eth, MTK_RX_DONE_INT); return rx_done; } @@ -1073,7 +1103,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) if (ring->buf) { for (i = 0; i < MTK_DMA_SIZE; i++) - mtk_tx_unmap(eth->dev, &ring->buf[i]); + mtk_tx_unmap(eth, &ring->buf[i]); kfree(ring->buf); ring->buf = NULL; } @@ -1246,22 +1276,26 @@ static void mtk_tx_timeout(struct net_device *dev) schedule_work(ð->pending_work); } -static irqreturn_t mtk_handle_irq(int irq, void *_eth) +static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) { struct mtk_eth *eth = _eth; - u32 status; - status = mtk_r32(eth, MTK_QMTK_INT_STATUS); - if (unlikely(!status)) - return IRQ_NONE; + if (likely(napi_schedule_prep(ð->rx_napi))) { + __napi_schedule(ð->rx_napi); + mtk_irq_disable(eth, MTK_RX_DONE_INT); + } - if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) { - if (likely(napi_schedule_prep(ð->rx_napi))) - __napi_schedule(ð->rx_napi); - } else { - mtk_w32(eth, status, MTK_QMTK_INT_STATUS); + return IRQ_HANDLED; +} + +static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) +{ + struct mtk_eth *eth = _eth; + + if (likely(napi_schedule_prep(ð->tx_napi))) { + __napi_schedule(ð->tx_napi); + mtk_irq_disable(eth, MTK_TX_DONE_INT); } - mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT)); return IRQ_HANDLED; } @@ -1274,7 +1308,7 @@ static void mtk_poll_controller(struct net_device *dev) u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT; mtk_irq_disable(eth, int_mask); - mtk_handle_irq(dev->irq, dev); + mtk_handle_irq_rx(eth->irq[2], dev); mtk_irq_enable(eth, int_mask); } #endif @@ -1310,6 +1344,7 @@ static int mtk_open(struct net_device *dev) if (err) return err; + napi_enable(ð->tx_napi); napi_enable(ð->rx_napi); mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); } @@ -1358,6 +1393,7 @@ static int mtk_stop(struct net_device *dev) return 0; mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); + napi_disable(ð->tx_napi); napi_disable(ð->rx_napi); mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); @@ -1395,7 +1431,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth) /* Enable RX VLan Offloading */ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); - err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0, + err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0, + dev_name(eth->dev), eth); + if (err) + return err; + err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0, dev_name(eth->dev), eth); if (err) return err; @@ -1411,7 +1451,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth) mtk_w32(eth, 0, MTK_RST_GL); /* FE int grouping */ - mtk_w32(eth, 0, MTK_FE_INT_GRP); + mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1); + mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2); + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1); + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2); + mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); for (i = 0; i < 2; i++) { u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); @@ -1457,9 +1501,7 @@ static void mtk_uninit(struct net_device *dev) struct mtk_eth *eth = mac->hw; phy_disconnect(mac->phy_dev); - mtk_mdio_cleanup(eth); mtk_irq_disable(eth, ~0); - free_irq(dev->irq, dev); } static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) @@ -1717,6 +1759,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) goto free_netdev; } spin_lock_init(&mac->hw_stats->stats_lock); + u64_stats_init(&mac->hw_stats->syncp); mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; SET_NETDEV_DEV(eth->netdev[id], eth->dev); @@ -1733,10 +1776,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) dev_err(eth->dev, "error bringing up device\n"); goto free_netdev; } - eth->netdev[id]->irq = eth->irq; + eth->netdev[id]->irq = eth->irq[0]; netif_info(eth, probe, eth->netdev[id], "mediatek frame engine at 0x%08lx, irq %d\n", - eth->netdev[id]->base_addr, eth->netdev[id]->irq); + eth->netdev[id]->base_addr, eth->irq[0]); return 0; @@ -1753,6 +1796,7 @@ static int mtk_probe(struct platform_device *pdev) struct mtk_soc_data *soc; struct mtk_eth *eth; int err; + int i; match = of_match_device(of_mtk_match, &pdev->dev); soc = (struct mtk_soc_data *)match->data; @@ -1761,11 +1805,13 @@ static int mtk_probe(struct platform_device *pdev) if (!eth) return -ENOMEM; + eth->dev = &pdev->dev; eth->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(eth->base)) return PTR_ERR(eth->base); spin_lock_init(ð->page_lock); + spin_lock_init(ð->irq_lock); eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "mediatek,ethsys"); @@ -1787,26 +1833,28 @@ static int mtk_probe(struct platform_device *pdev) return PTR_ERR(eth->rstc); } - eth->irq = platform_get_irq(pdev, 0); - if (eth->irq < 0) { - dev_err(&pdev->dev, "no IRQ resource found\n"); - return -ENXIO; + for (i = 0; i < 3; i++) { + eth->irq[i] = platform_get_irq(pdev, i); + if (eth->irq[i] < 0) { + dev_err(&pdev->dev, "no IRQ%d resource found\n", i); + return -ENXIO; + } + } + for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { + eth->clks[i] = devm_clk_get(eth->dev, + mtk_clks_source_name[i]); + if (IS_ERR(eth->clks[i])) { + if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) + return -EPROBE_DEFER; + return -ENODEV; + } } - eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif"); - eth->clk_esw = devm_clk_get(&pdev->dev, "esw"); - eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1"); - eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2"); - if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) || - IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif)) - return -ENODEV; - - clk_prepare_enable(eth->clk_ethif); - clk_prepare_enable(eth->clk_esw); - clk_prepare_enable(eth->clk_gp1); - clk_prepare_enable(eth->clk_gp2); + clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]); + clk_prepare_enable(eth->clks[MTK_CLK_ESW]); + clk_prepare_enable(eth->clks[MTK_CLK_GP1]); + clk_prepare_enable(eth->clks[MTK_CLK_GP2]); - eth->dev = &pdev->dev; eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); INIT_WORK(ð->pending_work, mtk_pending_work); @@ -1831,7 +1879,9 @@ static int mtk_probe(struct platform_device *pdev) * for NAPI to work */ init_dummy_netdev(ð->dummy_dev); - netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_poll, + netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx, + MTK_NAPI_WEIGHT); + netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx, MTK_NAPI_WEIGHT); platform_set_drvdata(pdev, eth); @@ -1846,14 +1896,24 @@ err_free_dev: static int mtk_remove(struct platform_device *pdev) { struct mtk_eth *eth = platform_get_drvdata(pdev); + int i; - clk_disable_unprepare(eth->clk_ethif); - clk_disable_unprepare(eth->clk_esw); - clk_disable_unprepare(eth->clk_gp1); - clk_disable_unprepare(eth->clk_gp2); + /* stop all devices to make sure that dma is properly shut down */ + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + mtk_stop(eth->netdev[i]); + } + clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]); + clk_disable_unprepare(eth->clks[MTK_CLK_ESW]); + clk_disable_unprepare(eth->clks[MTK_CLK_GP1]); + clk_disable_unprepare(eth->clks[MTK_CLK_GP2]); + + netif_napi_del(ð->tx_napi); netif_napi_del(ð->rx_napi); mtk_cleanup(eth); + mtk_mdio_cleanup(eth); platform_set_drvdata(pdev, NULL); return 0; @@ -1863,13 +1923,13 @@ const struct of_device_id of_mtk_match[] = { { .compatible = "mediatek,mt7623-eth" }, {}, }; +MODULE_DEVICE_TABLE(of, of_mtk_match); static struct platform_driver mtk_driver = { .probe = mtk_probe, .remove = mtk_remove, .driver = { .name = "mtk_soc_eth", - .owner = THIS_MODULE, .of_match_table = of_mtk_match, }, }; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index a5eb7c623..6e1ade7a2 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -68,6 +68,10 @@ /* Unicast Filter MAC Address Register - High */ #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000)) +/* PDMA Interrupt grouping registers */ +#define MTK_PDMA_INT_GRP1 0xa50 +#define MTK_PDMA_INT_GRP2 0xa54 + /* QDMA TX Queue Configuration Registers */ #define MTK_QTX_CFG(x) (0x1800 + (x * 0x10)) #define QDMA_RES_THRES 4 @@ -125,6 +129,11 @@ #define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \ MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3) +/* QDMA Interrupt grouping registers */ +#define MTK_QDMA_INT_GRP1 0x1a20 +#define MTK_QDMA_INT_GRP2 0x1a24 +#define MTK_RLS_DONE_INT BIT(0) + /* QDMA Interrupt Status Register */ #define MTK_QDMA_INT_MASK 0x1A1C @@ -281,6 +290,17 @@ enum mtk_tx_flags { MTK_TX_FLAGS_PAGE0 = 0x02, }; +/* This enum allows us to identify how the clock is defined on the array of the + * clock in the order + */ +enum mtk_clks_map { + MTK_CLK_ETHIF, + MTK_CLK_ESW, + MTK_CLK_GP1, + MTK_CLK_GP2, + MTK_CLK_MAX +}; + /* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at * by the TX descriptor s * @skb: The SKB pointer of the packet being sent @@ -356,14 +376,12 @@ struct mtk_rx_ring { * @dma_refcnt: track how many netdevs are using the DMA engine * @tx_ring: Pointer to the memore holding info about the TX ring * @rx_ring: Pointer to the memore holding info about the RX ring - * @rx_napi: The NAPI struct + * @tx_napi: The TX NAPI struct + * @rx_napi: The RX NAPI struct * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring * @phy_scratch_ring: physical address of scratch_ring * @scratch_head: The scratch memory that scratch_ring points to. - * @clk_ethif: The ethif clock - * @clk_esw: The switch clock - * @clk_gp1: The gmac1 clock - * @clk_gp2: The gmac2 clock + * @clks: clock array for all clocks required * @mii_bus: If there is a bus we need to create an instance for it * @pending_work: The workqueue used to reset the dma ring */ @@ -373,10 +391,11 @@ struct mtk_eth { void __iomem *base; struct reset_control *rstc; spinlock_t page_lock; + spinlock_t irq_lock; struct net_device dummy_dev; struct net_device *netdev[MTK_MAX_DEVS]; struct mtk_mac *mac[MTK_MAX_DEVS]; - int irq; + int irq[3]; u32 msg_enable; unsigned long sysclk; struct regmap *ethsys; @@ -384,14 +403,13 @@ struct mtk_eth { atomic_t dma_refcnt; struct mtk_tx_ring tx_ring; struct mtk_rx_ring rx_ring; + struct napi_struct tx_napi; struct napi_struct rx_napi; struct mtk_tx_dma *scratch_ring; dma_addr_t phy_scratch_ring; void *scratch_head; - struct clk *clk_ethif; - struct clk *clk_esw; - struct clk *clk_gp1; - struct clk *clk_gp2; + struct clk *clks[MTK_CLK_MAX]; + struct mii_bus *mii_bus; struct work_struct pending_work; }; diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 9ca3734eb..5098e7f21 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -24,13 +24,6 @@ config MLX4_EN_DCB If unsure, set to Y -config MLX4_EN_VXLAN - bool "VXLAN offloads Support" - default y - depends on MLX4_EN && VXLAN && !(MLX4_EN=y && VXLAN=m) - ---help--- - Say Y here if you want to use VXLAN offloads in the driver. - config MLX4_CORE tristate depends on PCI diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index f01918c63..b04760a50 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -37,6 +37,11 @@ #include "mlx4_en.h" #include "fw_qos.h" +enum { + MLX4_CEE_STATE_DOWN = 0, + MLX4_CEE_STATE_UP = 1, +}; + /* Definitions for QCN */ @@ -80,13 +85,205 @@ struct mlx4_congestion_control_mb_prio_802_1_qau_statistics { __be32 reserved3[4]; }; +static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + switch (capid) { + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_DCBX: + *cap = priv->dcbx_cap; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 1 << mlx4_max_tc(priv->mdev->dev); + break; + default: + *cap = false; + break; + } + + return 0; +} + +static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + + return priv->cee_config.pfc_state; +} + +static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + + priv->cee_config.pfc_state = state; +} + +static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, + u8 *setting) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + + *setting = priv->cee_config.dcb_pfc[priority]; +} + +static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, + u8 setting) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + + priv->cee_config.dcb_pfc[priority] = setting; + priv->cee_config.pfc_state = true; +} + +static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + + if (!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED)) + return -EINVAL; + + if (tcid == DCB_NUMTCS_ATTR_PFC) + *num = mlx4_max_tc(priv->mdev->dev); + else + *num = 0; + + return 0; +} + +static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + struct mlx4_en_dev *mdev = priv->mdev; + + if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return 1; + + if (priv->cee_config.pfc_state) { + int tc; + + priv->prof->rx_pause = 0; + priv->prof->tx_pause = 0; + for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) { + u8 tc_mask = 1 << tc; + + switch (priv->cee_config.dcb_pfc[tc]) { + case pfc_disabled: + priv->prof->tx_ppp &= ~tc_mask; + priv->prof->rx_ppp &= ~tc_mask; + break; + case pfc_enabled_full: + priv->prof->tx_ppp |= tc_mask; + priv->prof->rx_ppp |= tc_mask; + break; + case pfc_enabled_tx: + priv->prof->tx_ppp |= tc_mask; + priv->prof->rx_ppp &= ~tc_mask; + break; + case pfc_enabled_rx: + priv->prof->tx_ppp &= ~tc_mask; + priv->prof->rx_ppp |= tc_mask; + break; + default: + break; + } + } + en_dbg(DRV, priv, "Set pfc on\n"); + } else { + priv->prof->rx_pause = 1; + priv->prof->tx_pause = 1; + en_dbg(DRV, priv, "Set pfc off\n"); + } + + if (mlx4_SET_PORT_general(mdev->dev, priv->port, + priv->rx_skb_size + ETH_FCS_LEN, + priv->prof->tx_pause, + priv->prof->tx_ppp, + priv->prof->rx_pause, + priv->prof->rx_ppp)) { + en_err(priv, "Failed setting pause params\n"); + return 1; + } + + return 0; +} + +static u8 mlx4_en_dcbnl_get_state(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (priv->flags & MLX4_EN_FLAG_DCB_ENABLED) + return MLX4_CEE_STATE_UP; + + return MLX4_CEE_STATE_DOWN; +} + +static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int num_tcs = 0; + + if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return 1; + + if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED)) + return 0; + + if (state) { + priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; + num_tcs = IEEE_8021QAZ_MAX_TCS; + } else { + priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; + } + + if (mlx4_en_setup_tc(dev, num_tcs)) + return 1; + + return 0; +} + +/* On success returns a non-zero 802.1p user priority bitmap + * otherwise returns 0 as the invalid user priority bitmap to + * indicate an error. + */ +static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + struct dcb_app app = { + .selector = idtype, + .protocol = id, + }; + if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return 0; + + return dcb_getapp(netdev, &app); +} + +static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype, + u16 id, u8 up) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + struct dcb_app app; + + if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return -EINVAL; + + memset(&app, 0, sizeof(struct dcb_app)); + app.selector = idtype; + app.protocol = id; + app.priority = up; + + return dcb_setapp(netdev, &app); +} + static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) { struct mlx4_en_priv *priv = netdev_priv(dev); struct ieee_ets *my_ets = &priv->ets; - /* No IEEE PFC settings available */ if (!my_ets) return -EINVAL; @@ -237,18 +434,51 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev) { - return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; + struct mlx4_en_priv *priv = netdev_priv(dev); + + return priv->dcbx_cap; } static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode) { + struct mlx4_en_priv *priv = netdev_priv(dev); + struct ieee_ets ets = {0}; + struct ieee_pfc pfc = {0}; + + if (mode == priv->dcbx_cap) + return 0; + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || - (mode & DCB_CAP_DCBX_VER_CEE) || - !(mode & DCB_CAP_DCBX_VER_IEEE) || + ((mode & DCB_CAP_DCBX_VER_IEEE) && + (mode & DCB_CAP_DCBX_VER_CEE)) || !(mode & DCB_CAP_DCBX_HOST)) - return 1; + goto err; + + priv->dcbx_cap = mode; + + ets.ets_cap = IEEE_8021QAZ_MAX_TCS; + pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS; + + if (mode & DCB_CAP_DCBX_VER_IEEE) { + if (mlx4_en_dcbnl_ieee_setets(dev, &ets)) + goto err; + if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc)) + goto err; + } else if (mode & DCB_CAP_DCBX_VER_CEE) { + if (mlx4_en_dcbnl_set_all(dev)) + goto err; + } else { + if (mlx4_en_dcbnl_ieee_setets(dev, &ets)) + goto err; + if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc)) + goto err; + if (mlx4_en_setup_tc(dev, 0)) + goto err; + } return 0; +err: + return 1; } #define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */ @@ -463,24 +693,46 @@ static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev, } const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { - .ieee_getets = mlx4_en_dcbnl_ieee_getets, - .ieee_setets = mlx4_en_dcbnl_ieee_setets, - .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate, - .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate, - .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, - .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, + .ieee_getets = mlx4_en_dcbnl_ieee_getets, + .ieee_setets = mlx4_en_dcbnl_ieee_setets, + .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate, + .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate, + .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn, + .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn, + .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats, + .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, + .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, + + .getstate = mlx4_en_dcbnl_get_state, + .setstate = mlx4_en_dcbnl_set_state, + .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg, + .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg, + .setall = mlx4_en_dcbnl_set_all, + .getcap = mlx4_en_dcbnl_getcap, + .getnumtcs = mlx4_en_dcbnl_getnumtcs, + .getpfcstate = mlx4_en_dcbnl_getpfcstate, + .setpfcstate = mlx4_en_dcbnl_setpfcstate, + .getapp = mlx4_en_dcbnl_getapp, + .setapp = mlx4_en_dcbnl_setapp, .getdcbx = mlx4_en_dcbnl_getdcbx, .setdcbx = mlx4_en_dcbnl_setdcbx, - .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn, - .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn, - .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats, }; const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = { .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, + .setstate = mlx4_en_dcbnl_set_state, + .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg, + .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg, + .setall = mlx4_en_dcbnl_set_all, + .getnumtcs = mlx4_en_dcbnl_getnumtcs, + .getpfcstate = mlx4_en_dcbnl_getpfcstate, + .setpfcstate = mlx4_en_dcbnl_setpfcstate, + .getapp = mlx4_en_dcbnl_getapp, + .setapp = mlx4_en_dcbnl_setapp, + .getdcbx = mlx4_en_dcbnl_getdcbx, .setdcbx = mlx4_en_dcbnl_setdcbx, }; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 44cf16d01..bdda17d2e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1112,7 +1112,7 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); - return priv->rx_ring_num; + return rounddown_pow_of_two(priv->rx_ring_num); } static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev) @@ -1146,19 +1146,17 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key, u8 *hfunc) { struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_rss_map *rss_map = &priv->rss_map; - int rss_rings; - size_t n = priv->rx_ring_num; + u32 n = mlx4_en_get_rxfh_indir_size(dev); + u32 i, rss_rings; int err = 0; - rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num; - rss_rings = 1 << ilog2(rss_rings); + rss_rings = priv->prof->rss_rings ?: n; + rss_rings = rounddown_pow_of_two(rss_rings); - while (n--) { + for (i = 0; i < n; i++) { if (!ring_index) break; - ring_index[n] = rss_map->qps[n % rss_rings].qpn - - rss_map->base_qpn; + ring_index[i] = i % rss_rings; } if (key) memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE); @@ -1171,6 +1169,7 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, const u8 *key, const u8 hfunc) { struct mlx4_en_priv *priv = netdev_priv(dev); + u32 n = mlx4_en_get_rxfh_indir_size(dev); struct mlx4_en_dev *mdev = priv->mdev; int port_up = 0; int err = 0; @@ -1180,18 +1179,18 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, /* Calculate RSS table size and make sure flows are spread evenly * between rings */ - for (i = 0; i < priv->rx_ring_num; i++) { + for (i = 0; i < n; i++) { if (!ring_index) - continue; + break; if (i > 0 && !ring_index[i] && !rss_rings) rss_rings = i; - if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num))) + if (ring_index[i] != (i % (rss_rings ?: n))) return -EINVAL; } if (!rss_rings) - rss_rings = priv->rx_ring_num; + rss_rings = n; /* RSS table size must be an order of 2 */ if (!is_power_of_2(rss_rings)) @@ -1730,6 +1729,12 @@ static int mlx4_en_set_channels(struct net_device *dev, !channel->tx_count || !channel->rx_count) return -EINVAL; + if (channel->tx_count * MLX4_EN_NUM_UP <= priv->xdp_ring_num) { + en_err(priv, "Minimum %d tx channels required with XDP on\n", + priv->xdp_ring_num / MLX4_EN_NUM_UP + 1); + return -EINVAL; + } + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) return -ENOMEM; @@ -1751,7 +1756,8 @@ static int mlx4_en_set_channels(struct net_device *dev, mlx4_en_safe_replace_resources(priv, tmp); - netif_set_real_num_tx_queues(dev, priv->tx_ring_num); + netif_set_real_num_tx_queues(dev, priv->tx_ring_num - + priv->xdp_ring_num); netif_set_real_num_rx_queues(dev, priv->rx_ring_num); if (dev->num_tc) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 8359e9e51..fedb82927 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -31,6 +31,7 @@ * */ +#include #include #include #include @@ -67,6 +68,18 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up) offset += priv->num_tx_rings_p_up; } +#ifdef CONFIG_MLX4_EN_DCB + if (!mlx4_is_slave(priv->mdev->dev)) { + if (up) { + if (priv->dcbx_cap) + priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; + } else { + priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; + priv->cee_config.pfc_state = false; + } + } +#endif /* CONFIG_MLX4_EN_DCB */ + return 0; } @@ -1201,8 +1214,8 @@ static void mlx4_en_netpoll(struct net_device *dev) struct mlx4_en_cq *cq; int i; - for (i = 0; i < priv->rx_ring_num; i++) { - cq = priv->rx_cq[i]; + for (i = 0; i < priv->tx_ring_num; i++) { + cq = priv->tx_cq[i]; napi_schedule(&cq->napi); } } @@ -1510,6 +1523,24 @@ static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask); } +static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv, + int tx_ring_idx) +{ + struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[tx_ring_idx]; + int rr_index; + + rr_index = (priv->xdp_ring_num - priv->tx_ring_num) + tx_ring_idx; + if (rr_index >= 0) { + tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc; + tx_ring->recycle_ring = priv->rx_ring[rr_index]; + en_dbg(DRV, priv, + "Set tx_ring[%d]->recycle_ring = rx_ring[%d]\n", + tx_ring_idx, rr_index); + } else { + tx_ring->recycle_ring = NULL; + } +} + int mlx4_en_start_port(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); @@ -1632,6 +1663,8 @@ int mlx4_en_start_port(struct net_device *dev) } tx_ring->tx_queue = netdev_get_tx_queue(dev, i); + mlx4_en_init_recycle_ring(priv, i); + /* Arm CQ for TX completions */ mlx4_en_arm_cq(priv, cq); @@ -1696,10 +1729,9 @@ int mlx4_en_start_port(struct net_device *dev) /* Schedule multicast task to populate multicast list */ queue_work(mdev->workqueue, &priv->rx_mode_task); -#ifdef CONFIG_MLX4_EN_VXLAN if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) - vxlan_get_rx_port(dev); -#endif + udp_tunnel_get_rx_info(dev); + priv->port_up = true; netif_tx_start_all_queues(dev); netif_device_attach(dev); @@ -2177,6 +2209,11 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) en_err(priv, "Bad MTU size:%d.\n", new_mtu); return -EPERM; } + if (priv->xdp_ring_num && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) { + en_err(priv, "MTU size:%d requires frags but XDP running\n", + new_mtu); + return -EOPNOTSUPP; + } dev->mtu = new_mtu; if (netif_running(dev)) { @@ -2434,7 +2471,6 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev, return 0; } -#ifdef CONFIG_MLX4_EN_VXLAN static void mlx4_en_add_vxlan_offloads(struct work_struct *work) { int ret; @@ -2484,15 +2520,19 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work) } static void mlx4_en_add_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 port = ti->port; __be16 current_port; - if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return; - if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) + return; + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) return; current_port = priv->vxlan_port; @@ -2507,15 +2547,19 @@ static void mlx4_en_add_vxlan_port(struct net_device *dev, } static void mlx4_en_del_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 port = ti->port; __be16 current_port; - if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return; - if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) + return; + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) return; current_port = priv->vxlan_port; @@ -2550,7 +2594,6 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, return features; } -#endif static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) { @@ -2579,6 +2622,103 @@ static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 m return err; } +static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct bpf_prog *old_prog; + int xdp_ring_num; + int port_up = 0; + int err; + int i; + + xdp_ring_num = prog ? ALIGN(priv->rx_ring_num, MLX4_EN_NUM_UP) : 0; + + /* No need to reconfigure buffers when simply swapping the + * program for a new one. + */ + if (priv->xdp_ring_num == xdp_ring_num) { + if (prog) { + prog = bpf_prog_add(prog, priv->rx_ring_num - 1); + if (IS_ERR(prog)) + return PTR_ERR(prog); + } + for (i = 0; i < priv->rx_ring_num; i++) { + /* This xchg is paired with READ_ONCE in the fastpath */ + old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + } + return 0; + } + + if (priv->num_frags > 1) { + en_err(priv, "Cannot set XDP if MTU requires multiple frags\n"); + return -EOPNOTSUPP; + } + + if (priv->tx_ring_num < xdp_ring_num + MLX4_EN_NUM_UP) { + en_err(priv, + "Minimum %d tx channels required to run XDP\n", + (xdp_ring_num + MLX4_EN_NUM_UP) / MLX4_EN_NUM_UP); + return -EINVAL; + } + + if (prog) { + prog = bpf_prog_add(prog, priv->rx_ring_num - 1); + if (IS_ERR(prog)) + return PTR_ERR(prog); + } + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + port_up = 1; + mlx4_en_stop_port(dev, 1); + } + + priv->xdp_ring_num = xdp_ring_num; + netif_set_real_num_tx_queues(dev, priv->tx_ring_num - + priv->xdp_ring_num); + + for (i = 0; i < priv->rx_ring_num; i++) { + old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + } + + if (port_up) { + err = mlx4_en_start_port(dev); + if (err) { + en_err(priv, "Failed starting port %d for XDP change\n", + priv->port); + queue_work(mdev->workqueue, &priv->watchdog_task); + } + } + + mutex_unlock(&mdev->state_lock); + return 0; +} + +static bool mlx4_xdp_attached(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + return !!priv->xdp_ring_num; +} + +static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return mlx4_xdp_set(dev, xdp->prog); + case XDP_QUERY_PROG: + xdp->prog_attached = mlx4_xdp_attached(dev); + return 0; + default: + return -EINVAL; + } +} + static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, @@ -2603,12 +2743,11 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, -#ifdef CONFIG_MLX4_EN_VXLAN - .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, - .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, + .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, .ndo_features_check = mlx4_en_features_check, -#endif .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, + .ndo_xdp = mlx4_xdp, }; static const struct net_device_ops mlx4_netdev_ops_master = { @@ -2641,12 +2780,11 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, -#ifdef CONFIG_MLX4_EN_VXLAN - .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, - .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, + .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, .ndo_features_check = mlx4_en_features_check, -#endif .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, + .ndo_xdp = mlx4_xdp, }; struct mlx4_en_bond { @@ -2936,10 +3074,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); -#ifdef CONFIG_MLX4_EN_VXLAN INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); -#endif #ifdef CONFIG_RFS_ACCEL INIT_LIST_HEAD(&priv->filters); spin_lock_init(&priv->filters_lock); @@ -2979,6 +3115,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->msg_enable = MLX4_EN_MSG_LEVEL; #ifdef CONFIG_MLX4_EN_DCB if (!mlx4_is_slave(priv->mdev->dev)) { + priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST | + DCB_CAP_DCBX_VER_IEEE; + priv->flags |= MLX4_EN_DCB_ENABLED; + priv->cee_config.pfc_state = false; + + for (i = 0; i < MLX4_EN_NUM_UP; i++) + priv->cee_config.dcb_pfc[i] = pfc_disabled; + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { dev->dcbnl_ops = &mlx4_en_dcbnl_ops; } else { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 99b5407f2..2040dad86 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -32,6 +32,7 @@ */ #include +#include #include #include #include @@ -57,7 +58,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv, struct page *page; dma_addr_t dma; - for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) { + for (order = frag_info->order; ;) { gfp_t gfp = _gfp; if (order) @@ -70,7 +71,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv, return -ENOMEM; } dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order, - PCI_DMA_FROMDEVICE); + frag_info->dma_dir); if (dma_mapping_error(priv->ddev, dma)) { put_page(page); return -ENOMEM; @@ -124,7 +125,8 @@ out: while (i--) { if (page_alloc[i].page != ring_alloc[i].page) { dma_unmap_page(priv->ddev, page_alloc[i].dma, - page_alloc[i].page_size, PCI_DMA_FROMDEVICE); + page_alloc[i].page_size, + priv->frag_info[i].dma_dir); page = page_alloc[i].page; /* Revert changes done by mlx4_alloc_pages */ page_ref_sub(page, page_alloc[i].page_size / @@ -145,7 +147,7 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv, if (next_frag_end > frags[i].page_size) dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size, - PCI_DMA_FROMDEVICE); + frag_info->dma_dir); if (frags[i].page) put_page(frags[i].page); @@ -176,7 +178,8 @@ out: page_alloc = &ring->page_alloc[i]; dma_unmap_page(priv->ddev, page_alloc->dma, - page_alloc->page_size, PCI_DMA_FROMDEVICE); + page_alloc->page_size, + priv->frag_info[i].dma_dir); page = page_alloc->page; /* Revert changes done by mlx4_alloc_pages */ page_ref_sub(page, page_alloc->page_size / @@ -201,7 +204,7 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, i, page_count(page_alloc->page)); dma_unmap_page(priv->ddev, page_alloc->dma, - page_alloc->page_size, PCI_DMA_FROMDEVICE); + page_alloc->page_size, frag_info->dma_dir); while (page_alloc->page_offset + frag_info->frag_stride < page_alloc->page_size) { put_page(page_alloc->page); @@ -244,6 +247,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, struct mlx4_en_rx_alloc *frags = ring->rx_info + (index << priv->log_rx_info); + if (ring->page_cache.index > 0) { + frags[0] = ring->page_cache.buf[--ring->page_cache.index]; + rx_desc->data[0].addr = cpu_to_be64(frags[0].dma); + return 0; + } + return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); } @@ -502,13 +511,35 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) } } +/* When the rx ring is running in page-per-packet mode, a released frame can go + * directly into a small cache, to avoid unmapping or touching the page + * allocator. In bpf prog performance scenarios, buffers are either forwarded + * or dropped, never converted to skbs, so every page can come directly from + * this cache when it is sized to be a multiple of the napi budget. + */ +bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring, + struct mlx4_en_rx_alloc *frame) +{ + struct mlx4_en_page_cache *cache = &ring->page_cache; + + if (cache->index >= MLX4_EN_CACHE_SIZE) + return false; + + cache->buf[cache->index++] = *frame; + return true; +} + void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, u32 size, u16 stride) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_rx_ring *ring = *pring; + struct bpf_prog *old_prog; + old_prog = READ_ONCE(ring->xdp_prog); + if (old_prog) + bpf_prog_put(old_prog); mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); vfree(ring->rx_info); ring->rx_info = NULL; @@ -519,6 +550,16 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring) { + int i; + + for (i = 0; i < ring->page_cache.index; i++) { + struct mlx4_en_rx_alloc *frame = &ring->page_cache.buf[i]; + + dma_unmap_page(priv->ddev, frame->dma, frame->page_size, + priv->frag_info[0].dma_dir); + put_page(frame->page); + } + ring->page_cache.index = 0; mlx4_en_free_rx_buf(priv, ring); if (ring->stride <= TXBB_SIZE) ring->buf -= TXBB_SIZE; @@ -740,7 +781,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; struct mlx4_en_rx_alloc *frags; struct mlx4_en_rx_desc *rx_desc; + struct bpf_prog *xdp_prog; + int doorbell_pending; struct sk_buff *skb; + int tx_index; int index; int nr; unsigned int length; @@ -756,6 +800,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud if (budget <= 0) return polled; + xdp_prog = READ_ONCE(ring->xdp_prog); + doorbell_pending = 0; + tx_index = (priv->tx_ring_num - priv->xdp_ring_num) + cq->ring; + /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of * reading 'cqe->index' */ @@ -832,6 +880,43 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); + /* A bpf program gets first chance to drop the packet. It may + * read bytes but not past the end of the frag. + */ + if (xdp_prog) { + struct xdp_buff xdp; + dma_addr_t dma; + u32 act; + + dma = be64_to_cpu(rx_desc->data[0].addr); + dma_sync_single_for_cpu(priv->ddev, dma, + priv->frag_info[0].frag_size, + DMA_FROM_DEVICE); + + xdp.data = page_address(frags[0].page) + + frags[0].page_offset; + xdp.data_end = xdp.data + length; + + act = bpf_prog_run_xdp(xdp_prog, &xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + if (!mlx4_en_xmit_frame(frags, dev, + length, tx_index, + &doorbell_pending)) + goto consumed; + break; + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + case XDP_DROP: + if (mlx4_en_rx_recycle(ring, frags)) + goto consumed; + goto next; + } + } + if (likely(dev->features & NETIF_F_RXCSUM)) { if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | MLX4_CQE_STATUS_UDP)) { @@ -983,6 +1068,7 @@ next: for (nr = 0; nr < priv->num_frags; nr++) mlx4_en_free_frag(priv, frags, nr); +consumed: ++cq->mcq.cons_index; index = (cq->mcq.cons_index) & ring->size_mask; cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; @@ -991,6 +1077,9 @@ next: } out: + if (doorbell_pending) + mlx4_en_xmit_doorbell(priv->tx_ring[tx_index]); + AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); mlx4_cq_set_ci(&cq->mcq); wmb(); /* ensure HW sees CQ consumer before we post new buffers */ @@ -1058,22 +1147,35 @@ static const int frag_sizes[] = { void mlx4_en_calc_rx_buf(struct net_device *dev) { + enum dma_data_direction dma_dir = PCI_DMA_FROMDEVICE; struct mlx4_en_priv *priv = netdev_priv(dev); - /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple - * headers. (For example: ETH_P_8021Q and ETH_P_8021AD). - */ - int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN); + int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu); + int order = MLX4_EN_ALLOC_PREFER_ORDER; + u32 align = SMP_CACHE_BYTES; int buf_size = 0; int i = 0; + /* bpf requires buffers to be set up as 1 packet per page. + * This only works when num_frags == 1. + */ + if (priv->xdp_ring_num) { + dma_dir = PCI_DMA_BIDIRECTIONAL; + /* This will gain efficient xdp frame recycling at the expense + * of more costly truesize accounting + */ + align = PAGE_SIZE; + order = 0; + } + while (buf_size < eff_mtu) { + priv->frag_info[i].order = order; priv->frag_info[i].frag_size = (eff_mtu > buf_size + frag_sizes[i]) ? frag_sizes[i] : eff_mtu - buf_size; priv->frag_info[i].frag_prefix_size = buf_size; priv->frag_info[i].frag_stride = - ALIGN(priv->frag_info[i].frag_size, - SMP_CACHE_BYTES); + ALIGN(priv->frag_info[i].frag_size, align); + priv->frag_info[i].dma_dir = dma_dir; buf_size += priv->frag_info[i].frag_size; i++; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 76aa4d271..e2509bba3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -196,6 +196,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, ring->last_nr_txbb = 1; memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); memset(ring->buf, 0, ring->buf_size); + ring->free_tx_desc = mlx4_en_free_tx_desc; ring->qp_state = MLX4_QP_STATE_RST; ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8); @@ -265,10 +266,10 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, } -static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, - struct mlx4_en_tx_ring *ring, - int index, u8 owner, u64 timestamp, - int napi_mode) +u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner, u64 timestamp, + int napi_mode) { struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; @@ -344,6 +345,27 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, return tx_info->nr_txbb; } +u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner, u64 timestamp, + int napi_mode) +{ + struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; + struct mlx4_en_rx_alloc frame = { + .page = tx_info->page, + .dma = tx_info->map0_dma, + .page_offset = 0, + .page_size = PAGE_SIZE, + }; + + if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { + dma_unmap_page(priv->ddev, tx_info->map0_dma, + PAGE_SIZE, priv->frag_info[0].dma_dir); + put_page(tx_info->page); + } + + return tx_info->nr_txbb; +} int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) { @@ -362,7 +384,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) } while (ring->cons != ring->prod) { - ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, + ring->last_nr_txbb = ring->free_tx_desc(priv, ring, ring->cons & ring->size_mask, !!(ring->cons & ring->size), 0, 0 /* Non-NAPI caller */); @@ -444,7 +466,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, timestamp = mlx4_en_get_cqe_ts(cqe); /* free next descriptor */ - last_nr_txbb = mlx4_en_free_tx_desc( + last_nr_txbb = ring->free_tx_desc( priv, ring, ring_index, !!((ring_cons + txbbs_skipped) & ring->size), timestamp, napi_budget); @@ -476,6 +498,9 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb; ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped; + if (ring->free_tx_desc == mlx4_en_recycle_tx_desc) + return done < budget; + netdev_tx_completed_queue(ring->tx_queue, packets, bytes); /* Wakeup Tx queue if this stopped, and ring is not full. @@ -631,8 +656,7 @@ static int get_real_size(const struct sk_buff *skb, static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, const struct sk_buff *skb, const struct skb_shared_info *shinfo, - int real_size, u16 *vlan_tag, - int tx_ind, void *fragptr) + void *fragptr) { struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; @@ -700,10 +724,66 @@ static void mlx4_bf_copy(void __iomem *dst, const void *src, __iowrite64_copy(dst, src, bytecnt / 8); } +void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring) +{ + wmb(); + /* Since there is no iowrite*_native() that writes the + * value as is, without byteswapping - using the one + * the doesn't do byteswapping in the relevant arch + * endianness. + */ +#if defined(__LITTLE_ENDIAN) + iowrite32( +#else + iowrite32be( +#endif + ring->doorbell_qpn, + ring->bf.uar->map + MLX4_SEND_DOORBELL); +} + +static void mlx4_en_tx_write_desc(struct mlx4_en_tx_ring *ring, + struct mlx4_en_tx_desc *tx_desc, + union mlx4_wqe_qpn_vlan qpn_vlan, + int desc_size, int bf_index, + __be32 op_own, bool bf_ok, + bool send_doorbell) +{ + tx_desc->ctrl.qpn_vlan = qpn_vlan; + + if (bf_ok) { + op_own |= htonl((bf_index & 0xffff) << 8); + /* Ensure new descriptor hits memory + * before setting ownership of this descriptor to HW + */ + dma_wmb(); + tx_desc->ctrl.owner_opcode = op_own; + + wmb(); + + mlx4_bf_copy(ring->bf.reg + ring->bf.offset, &tx_desc->ctrl, + desc_size); + + wmb(); + + ring->bf.offset ^= ring->bf.buf_size; + } else { + /* Ensure new descriptor hits memory + * before setting ownership of this descriptor to HW + */ + dma_wmb(); + tx_desc->ctrl.owner_opcode = op_own; + if (send_doorbell) + mlx4_en_xmit_doorbell(ring); + else + ring->xmit_more++; + } +} + netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) { struct skb_shared_info *shinfo = skb_shinfo(skb); struct mlx4_en_priv *priv = netdev_priv(dev); + union mlx4_wqe_qpn_vlan qpn_vlan = {}; struct device *ddev = priv->ddev; struct mlx4_en_tx_ring *ring; struct mlx4_en_tx_desc *tx_desc; @@ -715,7 +795,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) int real_size; u32 index, bf_index; __be32 op_own; - u16 vlan_tag = 0; u16 vlan_proto = 0; int i_frag; int lso_header_size; @@ -725,6 +804,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) bool stop_queue; bool inline_ok; u32 ring_cons; + bool bf_ok; tx_ind = skb_get_queue_mapping(skb); ring = priv->tx_ring[tx_ind]; @@ -738,7 +818,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) real_size = get_real_size(skb, shinfo, dev, &lso_header_size, &inline_ok, &fragptr); if (unlikely(!real_size)) - goto tx_drop; + goto tx_drop_count; /* Align descriptor to TXBB size */ desc_size = ALIGN(real_size, TXBB_SIZE); @@ -746,12 +826,20 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { if (netif_msg_tx_err(priv)) en_warn(priv, "Oversized header or SG list\n"); - goto tx_drop; + goto tx_drop_count; } + bf_ok = ring->bf_enabled; if (skb_vlan_tag_present(skb)) { - vlan_tag = skb_vlan_tag_get(skb); + qpn_vlan.vlan_tag = cpu_to_be16(skb_vlan_tag_get(skb)); vlan_proto = be16_to_cpu(skb->vlan_proto); + if (vlan_proto == ETH_P_8021AD) + qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN; + else if (vlan_proto == ETH_P_8021Q) + qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN; + else + qpn_vlan.ins_vlan = 0; + bf_ok = false; } netdev_txq_bql_enqueue_prefetchw(ring->tx_queue); @@ -771,6 +859,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) else { tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; bounce = true; + bf_ok = false; } /* Save skb in tx_info ring */ @@ -907,8 +996,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); if (tx_info->inl) - build_inline_wqe(tx_desc, skb, shinfo, real_size, &vlan_tag, - tx_ind, fragptr); + build_inline_wqe(tx_desc, skb, shinfo, fragptr); if (skb->encapsulation) { union { @@ -946,60 +1034,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) real_size = (real_size / 16) & 0x3f; - if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && - !skb_vlan_tag_present(skb) && send_doorbell) { - tx_desc->ctrl.bf_qpn = ring->doorbell_qpn | - cpu_to_be32(real_size); + bf_ok &= desc_size <= MAX_BF && send_doorbell; - op_own |= htonl((bf_index & 0xffff) << 8); - /* Ensure new descriptor hits memory - * before setting ownership of this descriptor to HW - */ - dma_wmb(); - tx_desc->ctrl.owner_opcode = op_own; - - wmb(); - - mlx4_bf_copy(ring->bf.reg + ring->bf.offset, &tx_desc->ctrl, - desc_size); - - wmb(); - - ring->bf.offset ^= ring->bf.buf_size; - } else { - tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); - if (vlan_proto == ETH_P_8021AD) - tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN; - else if (vlan_proto == ETH_P_8021Q) - tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN; - else - tx_desc->ctrl.ins_vlan = 0; + if (bf_ok) + qpn_vlan.bf_qpn = ring->doorbell_qpn | cpu_to_be32(real_size); + else + qpn_vlan.fence_size = real_size; - tx_desc->ctrl.fence_size = real_size; - - /* Ensure new descriptor hits memory - * before setting ownership of this descriptor to HW - */ - dma_wmb(); - tx_desc->ctrl.owner_opcode = op_own; - if (send_doorbell) { - wmb(); - /* Since there is no iowrite*_native() that writes the - * value as is, without byteswapping - using the one - * the doesn't do byteswapping in the relevant arch - * endianness. - */ -#if defined(__LITTLE_ENDIAN) - iowrite32( -#else - iowrite32be( -#endif - ring->doorbell_qpn, - ring->bf.uar->map + MLX4_SEND_DOORBELL); - } else { - ring->xmit_more++; - } - } + mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, desc_size, bf_index, + op_own, bf_ok, send_doorbell); if (unlikely(stop_queue)) { /* If queue was emptied after the if (stop_queue) , and before @@ -1028,9 +1071,114 @@ tx_drop_unmap: PCI_DMA_TODEVICE); } +tx_drop_count: + ring->tx_dropped++; tx_drop: dev_kfree_skb_any(skb); - ring->tx_dropped++; return NETDEV_TX_OK; } +netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, + struct net_device *dev, unsigned int length, + int tx_ind, int *doorbell_pending) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + union mlx4_wqe_qpn_vlan qpn_vlan = {}; + struct mlx4_en_tx_ring *ring; + struct mlx4_en_tx_desc *tx_desc; + struct mlx4_wqe_data_seg *data; + struct mlx4_en_tx_info *tx_info; + int index, bf_index; + bool send_doorbell; + int nr_txbb = 1; + bool stop_queue; + dma_addr_t dma; + int real_size; + __be32 op_own; + u32 ring_cons; + bool bf_ok; + + BUILD_BUG_ON_MSG(ALIGN(CTRL_SIZE + DS_SIZE, TXBB_SIZE) != TXBB_SIZE, + "mlx4_en_xmit_frame requires minimum size tx desc"); + + ring = priv->tx_ring[tx_ind]; + + if (!priv->port_up) + goto tx_drop; + + if (mlx4_en_is_tx_ring_full(ring)) + goto tx_drop_count; + + /* fetch ring->cons far ahead before needing it to avoid stall */ + ring_cons = READ_ONCE(ring->cons); + + index = ring->prod & ring->size_mask; + tx_info = &ring->tx_info[index]; + + bf_ok = ring->bf_enabled; + + /* Track current inflight packets for performance analysis */ + AVG_PERF_COUNTER(priv->pstats.inflight_avg, + (u32)(ring->prod - ring_cons - 1)); + + bf_index = ring->prod; + tx_desc = ring->buf + index * TXBB_SIZE; + data = &tx_desc->data; + + dma = frame->dma; + + tx_info->page = frame->page; + frame->page = NULL; + tx_info->map0_dma = dma; + tx_info->map0_byte_count = length; + tx_info->nr_txbb = nr_txbb; + tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN); + tx_info->data_offset = (void *)data - (void *)tx_desc; + tx_info->ts_requested = 0; + tx_info->nr_maps = 1; + tx_info->linear = 1; + tx_info->inl = 0; + + dma_sync_single_for_device(priv->ddev, dma, length, PCI_DMA_TODEVICE); + + data->addr = cpu_to_be64(dma); + data->lkey = ring->mr_key; + dma_wmb(); + data->byte_count = cpu_to_be32(length); + + /* tx completion can avoid cache line miss for common cases */ + tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; + + op_own = cpu_to_be32(MLX4_OPCODE_SEND) | + ((ring->prod & ring->size) ? + cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); + + ring->packets++; + ring->bytes += tx_info->nr_bytes; + AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, length); + + ring->prod += nr_txbb; + + stop_queue = mlx4_en_is_tx_ring_full(ring); + send_doorbell = stop_queue || + *doorbell_pending > MLX4_EN_DOORBELL_BUDGET; + bf_ok &= send_doorbell; + + real_size = ((CTRL_SIZE + nr_txbb * DS_SIZE) / 16) & 0x3f; + + if (bf_ok) + qpn_vlan.bf_qpn = ring->doorbell_qpn | cpu_to_be32(real_size); + else + qpn_vlan.fence_size = real_size; + + mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, TXBB_SIZE, bf_index, + op_own, bf_ok, send_doorbell); + *doorbell_pending = send_doorbell ? 0 : *doorbell_pending + 1; + + return NETDEV_TX_OK; + +tx_drop_count: + ring->tx_dropped++; +tx_drop: + return NETDEV_TX_BUSY; +} diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index f61397745..cf8f8a72a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -1305,8 +1305,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) return 0; err_out_unmap: - while (i >= 0) - mlx4_free_eq(dev, &priv->eq_table.eq[i--]); + while (i > 0) + mlx4_free_eq(dev, &priv->eq_table.eq[--i]); #ifdef CONFIG_RFS_ACCEL for (i = 1; i <= dev->caps.num_ports; i++) { if (mlx4_priv(dev)->port[i].rmap) { diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index e97094598..d728704d0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -721,6 +721,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c +#define QUERY_DEV_CAP_DIAG_RPRT_PER_PORT 0x9c #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d #define QUERY_DEV_CAP_VXLAN 0x9e #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 @@ -935,6 +936,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; if (field32 & (1 << 7)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT; + MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT); + if (field32 & (1 << 17)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT; MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); if (field & 1<<6) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; @@ -1128,6 +1132,7 @@ int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_c port_cap->max_pkeys = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); port_cap->max_vl = field & 0xf; + port_cap->max_tc_eth = field >> 4; MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); port_cap->log_max_macs = field & 0xf; port_cap->log_max_vlans = field >> 4; @@ -2456,6 +2461,42 @@ int mlx4_NOP(struct mlx4_dev *dev) MLX4_CMD_NATIVE); } +int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier, + const u32 offset[], + u32 value[], size_t array_len, u8 port) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + size_t i; + int ret; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + outbox = mailbox->buf; + + ret = mlx4_cmd_box(dev, 0, mailbox->dma, port, op_modifier, + MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (ret) + goto out; + + for (i = 0; i < array_len; i++) { + if (offset[i] > MLX4_MAILBOX_SIZE) { + ret = -EINVAL; + goto out; + } + + MLX4_GET(value[i], outbox, offset[i]); + } + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return ret; +} +EXPORT_SYMBOL(mlx4_query_diag_counters); + int mlx4_get_phys_port_id(struct mlx4_dev *dev) { u8 port; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 7ea258af6..cdbd76f10 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -53,6 +53,7 @@ struct mlx4_port_cap { int ib_mtu; int max_port_width; int max_vl; + int max_tc_eth; int max_gids; int max_pkeys; u64 def_mac; diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index dec77d6f0..0e8b7c449 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -147,7 +147,7 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable) if (enable) { dev->flags |= MLX4_FLAG_BONDED; } else { - ret = mlx4_virt2phy_port_map(dev, 1, 2); + ret = mlx4_virt2phy_port_map(dev, 1, 2); if (ret) { mlx4_err(dev, "Fail to reset port map\n"); return ret; @@ -218,6 +218,9 @@ void mlx4_unregister_device(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_interface *intf; + if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)) + return; + mlx4_stop_catas_poll(dev); mutex_lock(&intf_mutex); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 546fab0ec..7183ac413 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -292,6 +292,7 @@ static int _mlx4_dev_port(struct mlx4_dev *dev, int port, dev->caps.pkey_table_len[port] = port_cap->max_pkeys; dev->caps.port_width_cap[port] = port_cap->max_port_width; dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; + dev->caps.max_tc_eth = port_cap->max_tc_eth; dev->caps.def_mac[port] = port_cap->def_mac; dev->caps.supported_type[port] = port_cap->supported_port_types; dev->caps.suggested_type[port] = port_cap->suggested_type; @@ -2599,7 +2600,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) err = mlx4_init_uar_table(dev); if (err) { mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); - return err; + return err; } err = mlx4_uar_alloc(dev, &priv->driver_uar); @@ -2969,6 +2970,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) mlx4_err(dev, "Failed to create mtu file for port %d\n", port); device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); + devlink_port_unregister(&info->devlink_port); info->port = -1; } @@ -2983,6 +2985,8 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info) device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); device_remove_file(&info->dev->persist->pdev->dev, &info->port_mtu_attr); + devlink_port_unregister(&info->devlink_port); + #ifdef CONFIG_RFS_ACCEL free_irq_cpu_rmap(info->rmap); info->rmap = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index f2d092001..94b891c11 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -618,8 +618,8 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, err = mlx4_READ_ENTRY(dev, entry->index, mailbox); - if (err) - goto out_mailbox; + if (err) + goto out_mailbox; members_count = be32_to_cpu(mgm->members_count) & 0xffffff; @@ -657,8 +657,8 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); - if (err) - goto out_mailbox; + if (err) + goto out_mailbox; } } } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 13d297ee3..9099dbd04 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -132,6 +132,7 @@ enum { MLX4_EN_NUM_UP) #define MLX4_EN_DEFAULT_TX_WORK 256 +#define MLX4_EN_DOORBELL_BUDGET 8 /* Target number of packets to coalesce with interrupt moderation */ #define MLX4_EN_RX_COAL_TARGET 44 @@ -164,6 +165,10 @@ enum { #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN) #define MLX4_EN_MIN_MTU 46 +/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple + * headers. (For example: ETH_P_8021Q and ETH_P_8021AD). + */ +#define MLX4_EN_EFF_MTU(mtu) ((mtu) + ETH_HLEN + (2 * VLAN_HLEN)) #define ETH_BCAST 0xffffffffffffULL #define MLX4_EN_LOOPBACK_RETRIES 5 @@ -215,7 +220,10 @@ enum cq_type { struct mlx4_en_tx_info { - struct sk_buff *skb; + union { + struct sk_buff *skb; + struct page *page; + }; dma_addr_t map0_dma; u32 map0_byte_count; u32 nr_txbb; @@ -255,6 +263,14 @@ struct mlx4_en_rx_alloc { u32 page_size; }; +#define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT) +struct mlx4_en_page_cache { + u32 index; + struct mlx4_en_rx_alloc buf[MLX4_EN_CACHE_SIZE]; +}; + +struct mlx4_en_priv; + struct mlx4_en_tx_ring { /* cache line used and dirtied in tx completion * (mlx4_en_free_tx_buf()) @@ -288,6 +304,11 @@ struct mlx4_en_tx_ring { __be32 mr_key; void *buf; struct mlx4_en_tx_info *tx_info; + struct mlx4_en_rx_ring *recycle_ring; + u32 (*free_tx_desc)(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner, + u64 timestamp, int napi_mode); u8 *bounce_buf; struct mlx4_qp_context context; int qpn; @@ -319,6 +340,8 @@ struct mlx4_en_rx_ring { u8 fcs_del; void *buf; void *rx_info; + struct bpf_prog *xdp_prog; + struct mlx4_en_page_cache page_cache; unsigned long bytes; unsigned long packets; unsigned long csum_ok; @@ -440,7 +463,9 @@ struct mlx4_en_mc_list { struct mlx4_en_frag_info { u16 frag_size; u16 frag_prefix_size; - u16 frag_stride; + u32 frag_stride; + enum dma_data_direction dma_dir; + int order; }; #ifdef CONFIG_MLX4_EN_DCB @@ -450,6 +475,17 @@ struct mlx4_en_frag_info { #define MLX4_EN_TC_ETS 7 +enum dcb_pfc_type { + pfc_disabled = 0, + pfc_enabled_full, + pfc_enabled_tx, + pfc_enabled_rx +}; + +struct mlx4_en_cee_config { + bool pfc_state; + enum dcb_pfc_type dcb_pfc[MLX4_EN_NUM_UP]; +}; #endif struct ethtool_flow_id { @@ -469,6 +505,9 @@ enum { MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3), MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4), MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5), +#ifdef CONFIG_MLX4_EN_DCB + MLX4_EN_FLAG_DCB_ENABLED = (1 << 6), +#endif }; #define PORT_BEACON_MAX_LIMIT (65535) @@ -536,6 +575,7 @@ struct mlx4_en_priv { struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS]; u16 num_frags; u16 log_rx_info; + int xdp_ring_num; struct mlx4_en_tx_ring **tx_ring; struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS]; @@ -547,10 +587,8 @@ struct mlx4_en_priv { struct work_struct linkstate_task; struct delayed_work stats_task; struct delayed_work service_task; -#ifdef CONFIG_MLX4_EN_VXLAN struct work_struct vxlan_add_task; struct work_struct vxlan_del_task; -#endif struct mlx4_en_perf_stats pstats; struct mlx4_en_pkt_stats pkstats; struct mlx4_en_counter_stats pf_stats; @@ -572,9 +610,12 @@ struct mlx4_en_priv { u32 counter_index; #ifdef CONFIG_MLX4_EN_DCB +#define MLX4_EN_DCB_ENABLED 0x3 struct ieee_ets ets; u16 maxrate[IEEE_8021QAZ_MAX_TCS]; enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS]; + struct mlx4_en_cee_config cee_config; + u8 dcbx_cap; #endif #ifdef CONFIG_RFS_ACCEL spinlock_t filters_lock; @@ -644,6 +685,12 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq); u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, + struct net_device *dev, unsigned int length, + int tx_ind, int *doorbell_pending); +void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring); +bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring, + struct mlx4_en_rx_alloc *frame); int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring, @@ -672,6 +719,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, int budget); int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget); +u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner, u64 timestamp, + int napi_mode); +u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner, u64 timestamp, + int napi_mode); void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, int is_tx, int rss, int qpn, int cqn, int user_prio, struct mlx4_qp_context *context); diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 93195191f..395b5463c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -248,7 +248,7 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) offset, order); return; } - __mlx4_free_mtt_range(dev, offset, order); + __mlx4_free_mtt_range(dev, offset, order); } void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index b3cc3ab63..6fc156a39 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -205,7 +205,9 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node) goto free_uar; } - uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT); + uar->bf_map = io_mapping_map_wc(priv->bf_mapping, + uar->index << PAGE_SHIFT, + PAGE_SIZE); if (!uar->bf_map) { err = -ENOMEM; goto unamp_uar; diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 087b23b32..c5b206429 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -52,6 +52,7 @@ #define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2 #define MLX4_IGNORE_FCS_MASK 0x1 +#define MLX4_TC_MAX_NUMBER 8 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) { @@ -2015,3 +2016,14 @@ out: return ret; } EXPORT_SYMBOL(mlx4_get_module_info); + +int mlx4_max_tc(struct mlx4_dev *dev) +{ + u8 num_tc = dev->caps.max_tc_eth; + + if (!num_tc) + num_tc = MLX4_TC_MAX_NUMBER; + + return num_tc; +} +EXPORT_SYMBOL(mlx4_max_tc); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index cd9b2b28d..8b81114bd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -2372,16 +2372,15 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, __mlx4_mpt_release(dev, index); break; case RES_OP_MAP_ICM: - index = get_param_l(&in_param); - id = index & mpt_mask(dev); - err = mr_res_start_move_to(dev, slave, id, - RES_MPT_RESERVED, &mpt); - if (err) - return err; - - __mlx4_mpt_free_icm(dev, mpt->key); - res_end_move(dev, slave, RES_MPT, id); + index = get_param_l(&in_param); + id = index & mpt_mask(dev); + err = mr_res_start_move_to(dev, slave, id, + RES_MPT_RESERVED, &mpt); + if (err) return err; + + __mlx4_mpt_free_icm(dev, mpt->key); + res_end_move(dev, slave, RES_MPT, id); break; default: err = -EINVAL; @@ -4253,9 +4252,8 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) && !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) { - mlx4_warn(dev, - "Src check LB for slave %d isn't supported\n", - slave); + mlx4_warn(dev, "Src check LB for slave %d isn't supported\n", + slave); return -ENOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 1cf722eba..aae46884b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -4,6 +4,7 @@ config MLX5_CORE tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver" + depends on MAY_USE_DEVLINK depends on PCI default n ---help--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9ea7b5830..05cc1effc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -1,11 +1,13 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ - health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ - mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o fs_counters.o + health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ + mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ + fs_counters.o rl.o -mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ - en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ - en_txrx.o en_clock.o vxlan.o en_tc.o en_arfs.o +mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \ + en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \ + en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \ + en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 943b1bd43..bf722aa88 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -44,6 +44,7 @@ #include #include #include +#include #include "wq.h" #include "mlx5_core.h" #include "en_stats.h" @@ -72,13 +73,18 @@ #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) #define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \ MLX5_MPWRQ_WQE_PAGE_ORDER) -#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \ - BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW)) + +#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) +#define MLX5E_REQUIRED_MTTS(rqs, wqes)\ + (rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) +#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX) + #define MLX5_UMR_ALIGN (2048) #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 +#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 @@ -88,6 +94,7 @@ #define MLX5E_LOG_INDIR_RQT_SIZE 0x7 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) +#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_SQ_BF_BUDGET 16 @@ -126,6 +133,12 @@ static inline int mlx5_max_log_rq_size(int wq_type) } } +enum { + MLX5E_INLINE_MODE_L2, + MLX5E_INLINE_MODE_VPORT_CONTEXT, + MLX5_INLINE_MODE_NOT_REQUIRED, +}; + struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_eth_seg eth; @@ -143,10 +156,31 @@ struct mlx5e_umr_wqe { struct mlx5_wqe_data_seg data; }; +static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { + "rx_cqe_moder", +}; + +enum mlx5e_priv_flag { + MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0), +}; + +#define MLX5E_SET_PRIV_FLAG(priv, pflag, enable) \ + do { \ + if (enable) \ + priv->pflags |= pflag; \ + else \ + priv->pflags &= ~pflag; \ + } while (0) + #ifdef CONFIG_MLX5_CORE_EN_DCB #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ #endif +struct mlx5e_cq_moder { + u16 usec; + u16 pkts; +}; + struct mlx5e_params { u8 log_sq_size; u8 rq_wq_type; @@ -155,16 +189,16 @@ struct mlx5e_params { u8 log_rq_size; u16 num_channels; u8 num_tc; + u8 rx_cq_period_mode; bool rx_cqe_compress_admin; bool rx_cqe_compress; - u16 rx_cq_moderation_usec; - u16 rx_cq_moderation_pkts; - u16 tx_cq_moderation_usec; - u16 tx_cq_moderation_pkts; + struct mlx5e_cq_moder rx_cq_moderation; + struct mlx5e_cq_moder tx_cq_moderation; u16 min_rx_wqes; bool lro_en; u32 lro_wqe_sz; u16 tx_max_inline; + u8 tx_min_inline_mode; u8 rss_hfunc; u8 toeplitz_hash_key[40]; u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; @@ -172,6 +206,7 @@ struct mlx5e_params { #ifdef CONFIG_MLX5_CORE_EN_DCB struct ieee_ets ets; #endif + bool rx_am_enabled; }; struct mlx5e_tstamp { @@ -188,9 +223,9 @@ struct mlx5e_tstamp { }; enum { - MLX5E_RQ_STATE_POST_WQES_ENABLE, + MLX5E_RQ_STATE_FLUSH, MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, - MLX5E_RQ_STATE_FLUSH_TIMEOUT, + MLX5E_RQ_STATE_AM, }; struct mlx5e_cq { @@ -198,6 +233,7 @@ struct mlx5e_cq { struct mlx5_cqwq wq; /* data path - accessed per napi poll */ + u16 event_ctr; struct napi_struct *napi; struct mlx5_core_cq mcq; struct mlx5e_channel *channel; @@ -227,6 +263,30 @@ struct mlx5e_dma_info { dma_addr_t addr; }; +struct mlx5e_rx_am_stats { + int ppms; /* packets per msec */ + int epms; /* events per msec */ +}; + +struct mlx5e_rx_am_sample { + ktime_t time; + unsigned int pkt_ctr; + u16 event_ctr; +}; + +struct mlx5e_rx_am { /* Adaptive Moderation */ + u8 state; + struct mlx5e_rx_am_stats prev_stats; + struct mlx5e_rx_am_sample start_sample; + struct work_struct work; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + struct mlx5e_rq { /* data path */ struct mlx5_wq_ll wq; @@ -247,6 +307,9 @@ struct mlx5e_rq { unsigned long state; int ix; + u32 mpwqe_mtt_offset; + + struct mlx5e_rx_am am; /* Adaptive Moderation */ /* control */ struct mlx5_wq_ctrl wq_ctrl; @@ -306,9 +369,8 @@ struct mlx5e_sq_dma { }; enum { - MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, + MLX5E_SQ_STATE_FLUSH, MLX5E_SQ_STATE_BF_ENABLE, - MLX5E_SQ_STATE_TX_TIMEOUT, }; struct mlx5e_ico_wqe_info { @@ -346,6 +408,7 @@ struct mlx5e_sq { u32 sqn; u16 bf_buf_size; u16 max_inline; + u8 min_inline_mode; u16 edge; struct device *pdev; struct mlx5e_tstamp *tstamp; @@ -358,6 +421,7 @@ struct mlx5e_sq { struct mlx5e_channel *channel; int tc; struct mlx5e_ico_wqe_info *ico_wqe_info; + u32 rate_limit; } ____cacheline_aligned_in_smp; static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n) @@ -495,8 +559,24 @@ enum { MLX5E_ARFS_FT_LEVEL }; +struct mlx5e_ethtool_table { + struct mlx5_flow_table *ft; + int num_rules; +}; + +#define ETHTOOL_NUM_L3_L4_FTS 7 +#define ETHTOOL_NUM_L2_FTS 4 + +struct mlx5e_ethtool_steering { + struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS]; + struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS]; + struct list_head rules; + int tot_num_rules; +}; + struct mlx5e_flow_steering { struct mlx5_flow_namespace *ns; + struct mlx5e_ethtool_steering ethtool; struct mlx5e_tc_table tc; struct mlx5e_vlan_table vlan; struct mlx5e_l2_table l2; @@ -504,9 +584,15 @@ struct mlx5e_flow_steering { struct mlx5e_arfs_tables arfs; }; -struct mlx5e_direct_tir { - u32 tirn; +struct mlx5e_rqt { u32 rqtn; + bool enabled; +}; + +struct mlx5e_tir { + u32 tirn; + struct mlx5e_rqt rqt; + struct list_head list; }; enum { @@ -514,6 +600,22 @@ enum { MLX5E_NIC_PRIO }; +struct mlx5e_profile { + void (*init)(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, void *ppriv); + void (*cleanup)(struct mlx5e_priv *priv); + int (*init_rx)(struct mlx5e_priv *priv); + void (*cleanup_rx)(struct mlx5e_priv *priv); + int (*init_tx)(struct mlx5e_priv *priv); + void (*cleanup_tx)(struct mlx5e_priv *priv); + void (*enable)(struct mlx5e_priv *priv); + void (*disable)(struct mlx5e_priv *priv); + void (*update_stats)(struct mlx5e_priv *priv); + int (*max_nch)(struct mlx5_core_dev *mdev); + int max_tc; +}; + struct mlx5e_priv { /* priv data path fields - start */ struct mlx5e_sq **txq_to_sq_map; @@ -522,18 +624,15 @@ struct mlx5e_priv { unsigned long state; struct mutex state_lock; /* Protects Interface state */ - struct mlx5_uar cq_uar; - u32 pdn; - u32 tdn; - struct mlx5_core_mkey mkey; struct mlx5_core_mkey umr_mkey; struct mlx5e_rq drop_rq; struct mlx5e_channel **channel; u32 tisn[MLX5E_MAX_NUM_TC]; - u32 indir_rqtn; - u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; - struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; + struct mlx5e_rqt indir_rqt; + struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; + struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; + u32 tx_rates[MLX5E_MAX_NUM_SQS]; struct mlx5e_flow_steering fs; struct mlx5e_vxlan_db vxlan; @@ -545,11 +644,14 @@ struct mlx5e_priv { struct work_struct tx_timeout_work; struct delayed_work update_stats_work; + u32 pflags; struct mlx5_core_dev *mdev; struct net_device *netdev; struct mlx5e_stats stats; struct mlx5e_tstamp tstamp; u16 q_counter; + const struct mlx5e_profile *profile; + void *ppriv; }; enum mlx5e_link_mode { @@ -567,6 +669,7 @@ enum mlx5e_link_mode { MLX5E_10GBASE_ER = 14, MLX5E_40GBASE_SR4 = 15, MLX5E_40GBASE_LR4 = 16, + MLX5E_50GBASE_SR2 = 18, MLX5E_100GBASE_CR4 = 20, MLX5E_100GBASE_SR4 = 21, MLX5E_100GBASE_KR4 = 22, @@ -584,6 +687,9 @@ enum mlx5e_link_mode { #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) + +void mlx5e_build_ptys2ethtool_map(void); + void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw); u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback); @@ -595,7 +701,6 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); void mlx5e_free_tx_descs(struct mlx5e_sq *sq); -void mlx5e_free_rx_descs(struct mlx5e_rq *rq); void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); @@ -621,12 +726,26 @@ void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); +void mlx5e_rx_am(struct mlx5e_rq *rq); +void mlx5e_rx_am_work(struct work_struct *work); +struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode); + void mlx5e_update_stats(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); void mlx5e_init_l2_addr(struct mlx5e_priv *priv); void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft); +int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, + int location); +int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, + struct ethtool_rxnfc *info, u32 *rule_locs); +int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, + struct ethtool_rx_flow_spec *fs); +int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, + int location); +void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv); +void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv); void mlx5e_set_rx_mode_work(struct work_struct *work); void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp, @@ -656,6 +775,9 @@ void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev, int num_channels); int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); +void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, + u8 cq_period_mode); + static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz) { @@ -694,11 +816,6 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) MLX5E_MAX_NUM_CHANNELS); } -static inline int mlx5e_get_mtt_octw(int npages) -{ - return ALIGN(npages, 8) / 2; -} - extern const struct ethtool_ops mlx5e_ethtool_ops; #ifdef CONFIG_MLX5_CORE_EN_DCB extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; @@ -732,5 +849,39 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, #endif u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev); +int mlx5e_create_tir(struct mlx5_core_dev *mdev, + struct mlx5e_tir *tir, u32 *in, int inlen); +void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, + struct mlx5e_tir *tir); +int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); +void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); +int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev); + +struct mlx5_eswitch_rep; +int mlx5e_vport_rep_load(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep); +void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep); +int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep); +void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep); +int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv); +void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); +int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr); + +int mlx5e_create_direct_rqts(struct mlx5e_priv *priv); +void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); +int mlx5e_create_direct_tirs(struct mlx5e_priv *priv); +void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv); +int mlx5e_create_tises(struct mlx5e_priv *priv); +void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv); +int mlx5e_close(struct net_device *netdev); +int mlx5e_open(struct net_device *netdev); +void mlx5e_update_stats_work(struct work_struct *work); +void *mlx5e_create_netdev(struct mlx5_core_dev *mdev, + const struct mlx5e_profile *profile, void *ppriv); +void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv); +struct rtnl_link_stats64 * +mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 3515e78ba..a8cb38789 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -93,14 +93,14 @@ static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type) static int arfs_disable(struct mlx5e_priv *priv) { struct mlx5_flow_destination dest; - u32 *tirn = priv->indir_tirn; + struct mlx5e_tir *tir = priv->indir_tir; int err = 0; int tt; int i; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; for (i = 0; i < ARFS_NUM_TYPES; i++) { - dest.tir_num = tirn[i]; + dest.tir_num = tir[i].tirn; tt = arfs_get_tt(i); /* Modify ttc rules destination to bypass the aRFS tables*/ err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt], @@ -175,15 +175,12 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, { struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type]; struct mlx5_flow_destination dest; - u8 match_criteria_enable = 0; - u32 *tirn = priv->indir_tirn; - u32 *match_criteria; - u32 *match_value; + struct mlx5e_tir *tir = priv->indir_tir; + struct mlx5_flow_spec *spec; int err = 0; - match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - if (!match_value || !match_criteria) { + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { netdev_err(priv->netdev, "%s: alloc failed\n", __func__); err = -ENOMEM; goto out; @@ -192,24 +189,23 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; switch (type) { case ARFS_IPV4_TCP: - dest.tir_num = tirn[MLX5E_TT_IPV4_TCP]; + dest.tir_num = tir[MLX5E_TT_IPV4_TCP].tirn; break; case ARFS_IPV4_UDP: - dest.tir_num = tirn[MLX5E_TT_IPV4_UDP]; + dest.tir_num = tir[MLX5E_TT_IPV4_UDP].tirn; break; case ARFS_IPV6_TCP: - dest.tir_num = tirn[MLX5E_TT_IPV6_TCP]; + dest.tir_num = tir[MLX5E_TT_IPV6_TCP].tirn; break; case ARFS_IPV6_UDP: - dest.tir_num = tirn[MLX5E_TT_IPV6_UDP]; + dest.tir_num = tir[MLX5E_TT_IPV6_UDP].tirn; break; default: err = -EINVAL; goto out; } - arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, match_criteria_enable, - match_criteria, match_value, + arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_DEFAULT_FLOW_TAG, &dest); @@ -220,8 +216,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, __func__, type); } out: - kvfree(match_criteria); - kvfree(match_value); + kvfree(spec); return err; } @@ -475,23 +470,20 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, struct mlx5_flow_rule *rule = NULL; struct mlx5_flow_destination dest; struct arfs_table *arfs_table; - u8 match_criteria_enable = 0; + struct mlx5_flow_spec *spec; struct mlx5_flow_table *ft; - u32 *match_criteria; - u32 *match_value; int err = 0; - match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - if (!match_value || !match_criteria) { + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { netdev_err(priv->netdev, "%s: alloc failed\n", __func__); err = -ENOMEM; goto out; } - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, match_criteria, + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ntohs(tuple->etype)); arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype); if (!arfs_table) { @@ -501,59 +493,58 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, ft = arfs_table->ft.t; if (tuple->ip_proto == IPPROTO_TCP) { - MLX5_SET_TO_ONES(fte_match_param, match_criteria, + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.tcp_dport); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.tcp_sport); - MLX5_SET(fte_match_param, match_value, outer_headers.tcp_dport, + MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport, ntohs(tuple->dst_port)); - MLX5_SET(fte_match_param, match_value, outer_headers.tcp_sport, + MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport, ntohs(tuple->src_port)); } else { - MLX5_SET_TO_ONES(fte_match_param, match_criteria, + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_sport); - MLX5_SET(fte_match_param, match_value, outer_headers.udp_dport, + MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, ntohs(tuple->dst_port)); - MLX5_SET(fte_match_param, match_value, outer_headers.udp_sport, + MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport, ntohs(tuple->src_port)); } if (tuple->etype == htons(ETH_P_IP)) { - memcpy(MLX5_ADDR_OF(fte_match_param, match_value, + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), &tuple->src_ipv4, 4); - memcpy(MLX5_ADDR_OF(fte_match_param, match_value, + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), &tuple->dst_ipv4, 4); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); } else { - memcpy(MLX5_ADDR_OF(fte_match_param, match_value, + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), &tuple->src_ipv6, 16); - memcpy(MLX5_ADDR_OF(fte_match_param, match_value, + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), &tuple->dst_ipv6, 16); - memset(MLX5_ADDR_OF(fte_match_param, match_criteria, + memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16); - memset(MLX5_ADDR_OF(fte_match_param, match_criteria, + memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16); } dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn; - rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, - match_value, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_DEFAULT_FLOW_TAG, &dest); if (IS_ERR(rule)) { @@ -563,8 +554,7 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, } out: - kvfree(match_criteria); - kvfree(match_value); + kvfree(spec); return err ? ERR_PTR(err) : rule; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c new file mode 100644 index 000000000..9cce153e1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "en.h" + +/* mlx5e global resources should be placed in this file. + * Global resources are common to all the netdevices crated on the same nic. + */ + +int mlx5e_create_tir(struct mlx5_core_dev *mdev, + struct mlx5e_tir *tir, u32 *in, int inlen) +{ + int err; + + err = mlx5_core_create_tir(mdev, in, inlen, &tir->tirn); + if (err) + return err; + + list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); + + return 0; +} + +void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, + struct mlx5e_tir *tir) +{ + mlx5_core_destroy_tir(mdev, tir->tirn); + list_del(&tir->list); +} + +static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, + struct mlx5_core_mkey *mkey) +{ + struct mlx5_create_mkey_mbox_in *in; + int err; + + in = mlx5_vzalloc(sizeof(*in)); + if (!in) + return -ENOMEM; + + in->seg.flags = MLX5_PERM_LOCAL_WRITE | + MLX5_PERM_LOCAL_READ | + MLX5_ACCESS_MODE_PA; + in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); + in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + + err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL, + NULL); + + kvfree(in); + + return err; +} + +int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) +{ + struct mlx5e_resources *res = &mdev->mlx5e_res; + int err; + + err = mlx5_alloc_map_uar(mdev, &res->cq_uar, false); + if (err) { + mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); + return err; + } + + err = mlx5_core_alloc_pd(mdev, &res->pdn); + if (err) { + mlx5_core_err(mdev, "alloc pd failed, %d\n", err); + goto err_unmap_free_uar; + } + + err = mlx5_core_alloc_transport_domain(mdev, &res->td.tdn); + if (err) { + mlx5_core_err(mdev, "alloc td failed, %d\n", err); + goto err_dealloc_pd; + } + + err = mlx5e_create_mkey(mdev, res->pdn, &res->mkey); + if (err) { + mlx5_core_err(mdev, "create mkey failed, %d\n", err); + goto err_dealloc_transport_domain; + } + + INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); + + return 0; + +err_dealloc_transport_domain: + mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); +err_dealloc_pd: + mlx5_core_dealloc_pd(mdev, res->pdn); +err_unmap_free_uar: + mlx5_unmap_free_uar(mdev, &res->cq_uar); + + return err; +} + +void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) +{ + struct mlx5e_resources *res = &mdev->mlx5e_res; + + mlx5_core_destroy_mkey(mdev, &res->mkey); + mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); + mlx5_core_dealloc_pd(mdev, res->pdn); + mlx5_unmap_free_uar(mdev, &res->cq_uar); +} + +int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev) +{ + struct mlx5e_tir *tir; + void *in; + int inlen; + int err = 0; + + inlen = MLX5_ST_SZ_BYTES(modify_tir_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); + + list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { + err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen); + if (err) + goto out; + } + +out: + kvfree(in); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index c585349e0..762af16ed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -127,29 +127,40 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw); } -static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets) +static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, + struct ieee_ets *ets) { int bw_sum = 0; int i; /* Validate Priority */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) + if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) { + netdev_err(netdev, + "Failed to validate ETS: priority value greater than max(%d)\n", + MLX5E_MAX_PRIORITY); return -EINVAL; + } } /* Validate Bandwidth Sum */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { - if (!ets->tc_tx_bw[i]) + if (!ets->tc_tx_bw[i]) { + netdev_err(netdev, + "Failed to validate ETS: BW 0 is illegal\n"); return -EINVAL; + } bw_sum += ets->tc_tx_bw[i]; } } - if (bw_sum != 0 && bw_sum != 100) + if (bw_sum != 0 && bw_sum != 100) { + netdev_err(netdev, + "Failed to validate ETS: BW sum is illegal\n"); return -EINVAL; + } return 0; } @@ -159,7 +170,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev, struct mlx5e_priv *priv = netdev_priv(netdev); int err; - err = mlx5e_dbcnl_validate_ets(ets); + err = mlx5e_dbcnl_validate_ets(netdev, ets); if (err) return err; @@ -195,7 +206,6 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; - enum mlx5_port_status ps; u8 curr_pfc_en; int ret; @@ -204,14 +214,8 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, if (pfc->pfc_en == curr_pfc_en) return 0; - mlx5_query_port_admin_status(mdev, &ps); - if (ps == MLX5_PORT_UP) - mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); - ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en); - - if (ps == MLX5_PORT_UP) - mlx5_set_port_admin_status(mdev, MLX5_PORT_UP); + mlx5_toggle_port_link(mdev); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index e667a870e..7a346bb2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -48,123 +48,85 @@ static void mlx5e_get_drvinfo(struct net_device *dev, sizeof(drvinfo->bus_info)); } -static const struct { - u32 supported; - u32 advertised; +struct ptys2ethtool_config { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised); u32 speed; -} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = { - [MLX5E_1000BASE_CX_SGMII] = { - .supported = SUPPORTED_1000baseKX_Full, - .advertised = ADVERTISED_1000baseKX_Full, - .speed = 1000, - }, - [MLX5E_1000BASE_KX] = { - .supported = SUPPORTED_1000baseKX_Full, - .advertised = ADVERTISED_1000baseKX_Full, - .speed = 1000, - }, - [MLX5E_10GBASE_CX4] = { - .supported = SUPPORTED_10000baseKX4_Full, - .advertised = ADVERTISED_10000baseKX4_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_KX4] = { - .supported = SUPPORTED_10000baseKX4_Full, - .advertised = ADVERTISED_10000baseKX4_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_KR] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_20GBASE_KR2] = { - .supported = SUPPORTED_20000baseKR2_Full, - .advertised = ADVERTISED_20000baseKR2_Full, - .speed = 20000, - }, - [MLX5E_40GBASE_CR4] = { - .supported = SUPPORTED_40000baseCR4_Full, - .advertised = ADVERTISED_40000baseCR4_Full, - .speed = 40000, - }, - [MLX5E_40GBASE_KR4] = { - .supported = SUPPORTED_40000baseKR4_Full, - .advertised = ADVERTISED_40000baseKR4_Full, - .speed = 40000, - }, - [MLX5E_56GBASE_R4] = { - .supported = SUPPORTED_56000baseKR4_Full, - .advertised = ADVERTISED_56000baseKR4_Full, - .speed = 56000, - }, - [MLX5E_10GBASE_CR] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_SR] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_ER] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_40GBASE_SR4] = { - .supported = SUPPORTED_40000baseSR4_Full, - .advertised = ADVERTISED_40000baseSR4_Full, - .speed = 40000, - }, - [MLX5E_40GBASE_LR4] = { - .supported = SUPPORTED_40000baseLR4_Full, - .advertised = ADVERTISED_40000baseLR4_Full, - .speed = 40000, - }, - [MLX5E_100GBASE_CR4] = { - .speed = 100000, - }, - [MLX5E_100GBASE_SR4] = { - .speed = 100000, - }, - [MLX5E_100GBASE_KR4] = { - .speed = 100000, - }, - [MLX5E_100GBASE_LR4] = { - .speed = 100000, - }, - [MLX5E_100BASE_TX] = { - .speed = 100, - }, - [MLX5E_1000BASE_T] = { - .supported = SUPPORTED_1000baseT_Full, - .advertised = ADVERTISED_1000baseT_Full, - .speed = 1000, - }, - [MLX5E_10GBASE_T] = { - .supported = SUPPORTED_10000baseT_Full, - .advertised = ADVERTISED_10000baseT_Full, - .speed = 1000, - }, - [MLX5E_25GBASE_CR] = { - .speed = 25000, - }, - [MLX5E_25GBASE_KR] = { - .speed = 25000, - }, - [MLX5E_25GBASE_SR] = { - .speed = 25000, - }, - [MLX5E_50GBASE_CR2] = { - .speed = 50000, - }, - [MLX5E_50GBASE_KR2] = { - .speed = 50000, - }, }; +static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER]; + +#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \ + ({ \ + struct ptys2ethtool_config *cfg; \ + const unsigned int modes[] = { __VA_ARGS__ }; \ + unsigned int i; \ + cfg = &ptys2ethtool_table[reg_]; \ + cfg->speed = speed_; \ + bitmap_zero(cfg->supported, \ + __ETHTOOL_LINK_MODE_MASK_NBITS); \ + bitmap_zero(cfg->advertised, \ + __ETHTOOL_LINK_MODE_MASK_NBITS); \ + for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \ + __set_bit(modes[i], cfg->supported); \ + __set_bit(modes[i], cfg->advertised); \ + } \ + }) + +void mlx5e_build_ptys2ethtool_map(void) +{ + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, SPEED_20000, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, SPEED_56000, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, SPEED_50000, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, SPEED_25000, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, SPEED_25000, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, SPEED_25000, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, SPEED_50000, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, SPEED_50000, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT); +} + static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -177,6 +139,18 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) return err ? 0 : pfc_en_tx | pfc_en_rx; } +static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 rx_pause; + u32 tx_pause; + int err; + + err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause); + + return err ? false : rx_pause | tx_pause; +} + #define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter)) #define MLX5E_NUM_RQ_STATS(priv) \ (NUM_RQ_STATS * priv->params.num_channels * \ @@ -185,8 +159,8 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \ test_bit(MLX5E_STATE_OPENED, &priv->state)) #define MLX5E_NUM_PFC_COUNTERS(priv) \ - (hweight8(mlx5e_query_pfc_combined(priv)) * \ - NUM_PPORT_PER_PRIO_PFC_COUNTERS) + ((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \ + NUM_PPORT_PER_PRIO_PFC_COUNTERS) static int mlx5e_get_sset_count(struct net_device *dev, int sset) { @@ -200,6 +174,8 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) MLX5E_NUM_RQ_STATS(priv) + MLX5E_NUM_SQ_STATS(priv) + MLX5E_NUM_PFC_COUNTERS(priv); + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(mlx5e_priv_flags); /* fallthrough */ default: return -EOPNOTSUPP; @@ -245,9 +221,19 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) pfc_combined = mlx5e_query_pfc_combined(priv); for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { + for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { + char pfc_string[ETH_GSTRING_LEN]; + + snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio); + sprintf(data + (idx++) * ETH_GSTRING_LEN, + pport_per_prio_pfc_stats_desc[i].format, pfc_string); + } + } + + if (mlx5e_query_global_pause_combined(priv)) { for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { sprintf(data + (idx++) * ETH_GSTRING_LEN, - pport_per_prio_pfc_stats_desc[i].format, prio); + pport_per_prio_pfc_stats_desc[i].format, "global"); } } @@ -272,9 +258,12 @@ static void mlx5e_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *data) { struct mlx5e_priv *priv = netdev_priv(dev); + int i; switch (stringset) { case ETH_SS_PRIV_FLAGS: + for (i = 0; i < ARRAY_SIZE(mlx5e_priv_flags); i++) + strcpy(data + i * ETH_GSTRING_LEN, mlx5e_priv_flags[i]); break; case ETH_SS_TEST: @@ -339,6 +328,13 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, } } + if (mlx5e_query_global_pause_combined(priv)) { + for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], + pport_per_prio_pfc_stats_desc, i); + } + } + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return; @@ -356,15 +352,61 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, sq_stats_desc, j); } +static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type, + int num_wqe) +{ + int packets_per_wqe; + int stride_size; + int num_strides; + int wqe_size; + + if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) + return num_wqe; + + stride_size = 1 << priv->params.mpwqe_log_stride_sz; + num_strides = 1 << priv->params.mpwqe_log_num_strides; + wqe_size = stride_size * num_strides; + + packets_per_wqe = wqe_size / + ALIGN(ETH_DATA_LEN, stride_size); + return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1)); +} + +static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type, + int num_packets) +{ + int packets_per_wqe; + int stride_size; + int num_strides; + int wqe_size; + int num_wqes; + + if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) + return num_packets; + + stride_size = 1 << priv->params.mpwqe_log_stride_sz; + num_strides = 1 << priv->params.mpwqe_log_num_strides; + wqe_size = stride_size * num_strides; + + num_packets = (1 << order_base_2(num_packets)); + + packets_per_wqe = wqe_size / + ALIGN(ETH_DATA_LEN, stride_size); + num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe); + return 1 << (order_base_2(num_wqes)); +} + static void mlx5e_get_ringparam(struct net_device *dev, struct ethtool_ringparam *param) { struct mlx5e_priv *priv = netdev_priv(dev); int rq_wq_type = priv->params.rq_wq_type; - param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type); + param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, + 1 << mlx5_max_log_rq_size(rq_wq_type)); param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; - param->rx_pending = 1 << priv->params.log_rq_size; + param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, + 1 << priv->params.log_rq_size); param->tx_pending = 1 << priv->params.log_sq_size; } @@ -374,9 +416,13 @@ static int mlx5e_set_ringparam(struct net_device *dev, struct mlx5e_priv *priv = netdev_priv(dev); bool was_opened; int rq_wq_type = priv->params.rq_wq_type; + u32 rx_pending_wqes; + u32 min_rq_size; + u32 max_rq_size; u16 min_rx_wqes; u8 log_rq_size; u8 log_sq_size; + u32 num_mtts; int err = 0; if (param->rx_jumbo_pending) { @@ -389,18 +435,36 @@ static int mlx5e_set_ringparam(struct net_device *dev, __func__); return -EINVAL; } - if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) { + + min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, + 1 << mlx5_min_log_rq_size(rq_wq_type)); + max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, + 1 << mlx5_max_log_rq_size(rq_wq_type)); + rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type, + param->rx_pending); + + if (param->rx_pending < min_rq_size) { netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n", __func__, param->rx_pending, - 1 << mlx5_min_log_rq_size(rq_wq_type)); + min_rq_size); return -EINVAL; } - if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) { + if (param->rx_pending > max_rq_size) { netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n", __func__, param->rx_pending, - 1 << mlx5_max_log_rq_size(rq_wq_type)); + max_rq_size); return -EINVAL; } + + num_mtts = MLX5E_REQUIRED_MTTS(priv->params.num_channels, + rx_pending_wqes); + if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && + !MLX5E_VALID_NUM_MTTS(num_mtts)) { + netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n", + __func__, param->rx_pending); + return -EINVAL; + } + if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) { netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n", __func__, param->tx_pending, @@ -414,9 +478,9 @@ static int mlx5e_set_ringparam(struct net_device *dev, return -EINVAL; } - log_rq_size = order_base_2(param->rx_pending); + log_rq_size = order_base_2(rx_pending_wqes); log_sq_size = order_base_2(param->tx_pending); - min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending); + min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes); if (log_rq_size == priv->params.log_rq_size && log_sq_size == priv->params.log_sq_size && @@ -458,6 +522,7 @@ static int mlx5e_set_channels(struct net_device *dev, unsigned int count = ch->combined_count; bool arfs_enabled; bool was_opened; + u32 num_mtts; int err = 0; if (!count) { @@ -476,6 +541,14 @@ static int mlx5e_set_channels(struct net_device *dev, return -EINVAL; } + num_mtts = MLX5E_REQUIRED_MTTS(count, BIT(priv->params.log_rq_size)); + if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && + !MLX5E_VALID_NUM_MTTS(num_mtts)) { + netdev_info(dev, "%s: rx count (%d) request can't be satisfied, try to reduce.\n", + __func__, count); + return -EINVAL; + } + if (priv->params.num_channels == count) return 0; @@ -519,10 +592,11 @@ static int mlx5e_get_coalesce(struct net_device *netdev, if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) return -ENOTSUPP; - coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec; - coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts; - coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec; - coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts; + coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec; + coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts; + coal->tx_coalesce_usecs = priv->params.tx_cq_moderation.usec; + coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation.pkts; + coal->use_adaptive_rx_coalesce = priv->params.rx_am_enabled; return 0; } @@ -533,6 +607,10 @@ static int mlx5e_set_coalesce(struct net_device *netdev, struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_channel *c; + bool restart = + !!coal->use_adaptive_rx_coalesce != priv->params.rx_am_enabled; + bool was_opened; + int err = 0; int tc; int i; @@ -540,12 +618,19 @@ static int mlx5e_set_coalesce(struct net_device *netdev, return -ENOTSUPP; mutex_lock(&priv->state_lock); - priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs; - priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames; - priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs; - priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened && restart) { + mlx5e_close_locked(netdev); + priv->params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce; + } + + priv->params.tx_cq_moderation.usec = coal->tx_coalesce_usecs; + priv->params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames; + priv->params.rx_cq_moderation.usec = coal->rx_coalesce_usecs; + priv->params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames; + + if (!was_opened || restart) goto out; for (i = 0; i < priv->params.num_channels; ++i) { @@ -564,35 +649,39 @@ static int mlx5e_set_coalesce(struct net_device *netdev, } out: + if (was_opened && restart) + err = mlx5e_open_locked(netdev); + mutex_unlock(&priv->state_lock); - return 0; + return err; } -static u32 ptys2ethtool_supported_link(u32 eth_proto_cap) +static void ptys2ethtool_supported_link(unsigned long *supported_modes, + u32 eth_proto_cap) { - int i; - u32 supported_modes = 0; + unsigned long proto_cap = eth_proto_cap; + int proto; - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (eth_proto_cap & MLX5E_PROT_MASK(i)) - supported_modes |= ptys2ethtool_table[i].supported; - } - return supported_modes; + for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER) + bitmap_or(supported_modes, supported_modes, + ptys2ethtool_table[proto].supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); } -static u32 ptys2ethtool_adver_link(u32 eth_proto_cap) +static void ptys2ethtool_adver_link(unsigned long *advertising_modes, + u32 eth_proto_cap) { - int i; - u32 advertising_modes = 0; + unsigned long proto_cap = eth_proto_cap; + int proto; - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (eth_proto_cap & MLX5E_PROT_MASK(i)) - advertising_modes |= ptys2ethtool_table[i].advertised; - } - return advertising_modes; + for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER) + bitmap_or(advertising_modes, advertising_modes, + ptys2ethtool_table[proto].advertised, + __ETHTOOL_LINK_MODE_MASK_NBITS); } -static u32 ptys2ethtool_supported_port(u32 eth_proto_cap) +static void ptys2ethtool_supported_port(struct ethtool_link_ksettings *link_ksettings, + u32 eth_proto_cap) { if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) | MLX5E_PROT_MASK(MLX5E_10GBASE_SR) @@ -600,7 +689,7 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap) | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) { - return SUPPORTED_FIBRE; + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, FIBRE); } if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4) @@ -608,9 +697,8 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap) | MLX5E_PROT_MASK(MLX5E_10GBASE_KR) | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) { - return SUPPORTED_Backplane; + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Backplane); } - return 0; } int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) @@ -634,7 +722,7 @@ int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) static void get_speed_duplex(struct net_device *netdev, u32 eth_proto_oper, - struct ethtool_cmd *cmd) + struct ethtool_link_ksettings *link_ksettings) { int i; u32 speed = SPEED_UNKNOWN; @@ -651,23 +739,32 @@ static void get_speed_duplex(struct net_device *netdev, } } out: - ethtool_cmd_speed_set(cmd, speed); - cmd->duplex = duplex; + link_ksettings->base.speed = speed; + link_ksettings->base.duplex = duplex; } -static void get_supported(u32 eth_proto_cap, u32 *supported) +static void get_supported(u32 eth_proto_cap, + struct ethtool_link_ksettings *link_ksettings) { - *supported |= ptys2ethtool_supported_port(eth_proto_cap); - *supported |= ptys2ethtool_supported_link(eth_proto_cap); - *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + unsigned long *supported = link_ksettings->link_modes.supported; + + ptys2ethtool_supported_port(link_ksettings, eth_proto_cap); + ptys2ethtool_supported_link(supported, eth_proto_cap); + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause); } static void get_advertising(u32 eth_proto_cap, u8 tx_pause, - u8 rx_pause, u32 *advertising) + u8 rx_pause, + struct ethtool_link_ksettings *link_ksettings) { - *advertising |= ptys2ethtool_adver_link(eth_proto_cap); - *advertising |= tx_pause ? ADVERTISED_Pause : 0; - *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0; + unsigned long *advertising = link_ksettings->link_modes.advertising; + + ptys2ethtool_adver_link(advertising, eth_proto_cap); + if (tx_pause) + ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); + if (tx_pause ^ rx_pause) + ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause); } static u8 get_connector_port(u32 eth_proto) @@ -695,13 +792,16 @@ static u8 get_connector_port(u32 eth_proto) return PORT_OTHER; } -static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising) +static void get_lp_advertising(u32 eth_proto_lp, + struct ethtool_link_ksettings *link_ksettings) { - *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp); + unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; + + ptys2ethtool_adver_link(lp_advertising, eth_proto_lp); } -static int mlx5e_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) +static int mlx5e_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *link_ksettings) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; @@ -710,6 +810,8 @@ static int mlx5e_get_settings(struct net_device *netdev, u32 eth_proto_admin; u32 eth_proto_lp; u32 eth_proto_oper; + u8 an_disable_admin; + u8 an_status; int err; err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); @@ -720,35 +822,49 @@ static int mlx5e_get_settings(struct net_device *netdev, goto err_query_ptys; } - eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); - eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); - eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); - eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); + eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); + eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); + eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); + eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); + an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); + an_status = MLX5_GET(ptys_reg, out, an_status); - cmd->supported = 0; - cmd->advertising = 0; + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); - get_supported(eth_proto_cap, &cmd->supported); - get_advertising(eth_proto_admin, 0, 0, &cmd->advertising); - get_speed_duplex(netdev, eth_proto_oper, cmd); + get_supported(eth_proto_cap, link_ksettings); + get_advertising(eth_proto_admin, 0, 0, link_ksettings); + get_speed_duplex(netdev, eth_proto_oper, link_ksettings); eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; - cmd->port = get_connector_port(eth_proto_oper); - get_lp_advertising(eth_proto_lp, &cmd->lp_advertising); + link_ksettings->base.port = get_connector_port(eth_proto_oper); + get_lp_advertising(eth_proto_lp, link_ksettings); + + if (an_status == MLX5_AN_COMPLETE) + ethtool_link_ksettings_add_link_mode(link_ksettings, + lp_advertising, Autoneg); - cmd->transceiver = XCVR_INTERNAL; + link_ksettings->base.autoneg = an_disable_admin ? AUTONEG_DISABLE : + AUTONEG_ENABLE; + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, + Autoneg); + if (!an_disable_admin) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, Autoneg); err_query_ptys: return err; } -static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes) +static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) { u32 i, ptys_modes = 0; for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (ptys2ethtool_table[i].advertised & link_modes) + if (bitmap_intersects(ptys2ethtool_table[i].advertised, + link_modes, + __ETHTOOL_LINK_MODE_MASK_NBITS)) ptys_modes |= MLX5E_PROT_MASK(i); } @@ -767,21 +883,25 @@ static u32 mlx5e_ethtool2ptys_speed_link(u32 speed) return speed_links; } -static int mlx5e_set_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) +static int mlx5e_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *link_ksettings) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + u32 eth_proto_cap, eth_proto_admin; + bool an_changes = false; + u8 an_disable_admin; + u8 an_disable_cap; + bool an_disable; u32 link_modes; + u8 an_status; u32 speed; - u32 eth_proto_cap, eth_proto_admin; - enum mlx5_port_status ps; int err; - speed = ethtool_cmd_speed(cmd); + speed = link_ksettings->base.speed; - link_modes = cmd->autoneg == AUTONEG_ENABLE ? - mlx5e_ethtool2ptys_adver_link(cmd->advertising) : + link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ? + mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) : mlx5e_ethtool2ptys_speed_link(speed); err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN); @@ -806,15 +926,18 @@ static int mlx5e_set_settings(struct net_device *netdev, goto out; } - if (link_modes == eth_proto_admin) + mlx5_query_port_autoneg(mdev, MLX5_PTYS_EN, &an_status, + &an_disable_cap, &an_disable_admin); + + an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE; + an_changes = ((!an_disable && an_disable_admin) || + (an_disable && !an_disable_admin)); + + if (!an_changes && link_modes == eth_proto_admin) goto out; - mlx5_query_port_admin_status(mdev, &ps); - if (ps == MLX5_PORT_UP) - mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); - mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN); - if (ps == MLX5_PORT_UP) - mlx5_set_port_admin_status(mdev, MLX5_PORT_UP); + mlx5_set_port_ptys(mdev, an_disable, link_modes, MLX5_PTYS_EN); + mlx5_toggle_port_link(mdev); out: return err; @@ -861,7 +984,7 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) mlx5e_build_tir_ctx_hash(tirc, priv); for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) - mlx5_core_modify_tir(mdev, priv->indir_tirn[i], in, inlen); + mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen); } static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, @@ -883,7 +1006,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, mutex_lock(&priv->state_lock); if (indir) { - u32 rqtn = priv->indir_rqtn; + u32 rqtn = priv->indir_rqt.rqtn; memcpy(priv->params.indirection_rqt, indir, sizeof(priv->params.indirection_rqt)); @@ -916,6 +1039,15 @@ static int mlx5e_get_rxnfc(struct net_device *netdev, case ETHTOOL_GRXRINGS: info->data = priv->params.num_channels; break; + case ETHTOOL_GRXCLSRLCNT: + info->rule_cnt = priv->fs.ethtool.tot_num_rules; + break; + case ETHTOOL_GRXCLSRULE: + err = mlx5e_ethtool_get_flow(priv, info, info->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs); + break; default: err = -EOPNOTSUPP; break; @@ -1272,6 +1404,107 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, return 0; } +typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable); + +static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + bool rx_mode_changed; + u8 rx_cq_period_mode; + int err = 0; + bool reset; + + rx_cq_period_mode = enable ? + MLX5_CQ_PERIOD_MODE_START_FROM_CQE : + MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + rx_mode_changed = rx_cq_period_mode != priv->params.rx_cq_period_mode; + + if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && + !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)) + return -ENOTSUPP; + + if (!rx_mode_changed) + return 0; + + reset = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (reset) + mlx5e_close_locked(netdev); + + mlx5e_set_rx_cq_mode_params(&priv->params, rx_cq_period_mode); + + if (reset) + err = mlx5e_open_locked(netdev); + + return err; +} + +static int mlx5e_handle_pflag(struct net_device *netdev, + u32 wanted_flags, + enum mlx5e_priv_flag flag, + mlx5e_pflag_handler pflag_handler) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + bool enable = !!(wanted_flags & flag); + u32 changes = wanted_flags ^ priv->pflags; + int err; + + if (!(changes & flag)) + return 0; + + err = pflag_handler(netdev, enable); + if (err) { + netdev_err(netdev, "%s private flag 0x%x failed err %d\n", + enable ? "Enable" : "Disable", flag, err); + return err; + } + + MLX5E_SET_PRIV_FLAG(priv, flag, enable); + return 0; +} + +static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + mutex_lock(&priv->state_lock); + + err = mlx5e_handle_pflag(netdev, pflags, + MLX5E_PFLAG_RX_CQE_BASED_MODER, + set_pflag_rx_cqe_based_moder); + + mutex_unlock(&priv->state_lock); + return err ? -EINVAL : 0; +} + +static u32 mlx5e_get_priv_flags(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return priv->pflags; +} + +static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + int err = 0; + struct mlx5e_priv *priv = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = mlx5e_ethtool_flow_replace(priv, &cmd->fs); + break; + case ETHTOOL_SRXCLSRLDEL: + err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, @@ -1284,13 +1517,14 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .set_channels = mlx5e_set_channels, .get_coalesce = mlx5e_get_coalesce, .set_coalesce = mlx5e_set_coalesce, - .get_settings = mlx5e_get_settings, - .set_settings = mlx5e_set_settings, + .get_link_ksettings = mlx5e_get_link_ksettings, + .set_link_ksettings = mlx5e_set_link_ksettings, .get_rxfh_key_size = mlx5e_get_rxfh_key_size, .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size, .get_rxfh = mlx5e_get_rxfh, .set_rxfh = mlx5e_set_rxfh, .get_rxnfc = mlx5e_get_rxnfc, + .set_rxnfc = mlx5e_set_rxnfc, .get_tunable = mlx5e_get_tunable, .set_tunable = mlx5e_set_tunable, .get_pauseparam = mlx5e_get_pauseparam, @@ -1301,4 +1535,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .set_wol = mlx5e_set_wol, .get_module_info = mlx5e_get_module_info, .get_module_eeprom = mlx5e_get_module_eeprom, + .get_priv_flags = mlx5e_get_priv_flags, + .set_priv_flags = mlx5e_set_priv_flags }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index b32740092..1587a9fd5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -156,19 +156,18 @@ enum mlx5e_vlan_rule_type { static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, enum mlx5e_vlan_rule_type rule_type, - u16 vid, u32 *mc, u32 *mv) + u16 vid, struct mlx5_flow_spec *spec) { struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; struct mlx5_flow_destination dest; - u8 match_criteria_enable = 0; struct mlx5_flow_rule **rule_p; int err = 0; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.ft = priv->fs.l2.ft.t; - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); switch (rule_type) { case MLX5E_VLAN_RULE_TYPE_UNTAGGED: @@ -176,17 +175,19 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, break; case MLX5E_VLAN_RULE_TYPE_ANY_VID: rule_p = &priv->fs.vlan.any_vlan_rule; - MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1); break; default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ rule_p = &priv->fs.vlan.active_vlans_rule[vid]; - MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); - MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.first_vid); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, + vid); break; } - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + *rule_p = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_DEFAULT_FLOW_TAG, &dest); @@ -203,27 +204,21 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, enum mlx5e_vlan_rule_type rule_type, u16 vid) { - u32 *match_criteria; - u32 *match_value; + struct mlx5_flow_spec *spec; int err = 0; - match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - if (!match_value || !match_criteria) { + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { netdev_err(priv->netdev, "%s: alloc failed\n", __func__); - err = -ENOMEM; - goto add_vlan_rule_out; + return -ENOMEM; } if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID) mlx5e_vport_context_update_vlans(priv); - err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria, - match_value); + err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec); -add_vlan_rule_out: - kvfree(match_criteria); - kvfree(match_value); + kvfree(spec); return err; } @@ -598,32 +593,27 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, u8 proto) { struct mlx5_flow_rule *rule; - u8 match_criteria_enable = 0; - u32 *match_criteria; - u32 *match_value; + struct mlx5_flow_spec *spec; int err = 0; - match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - if (!match_value || !match_criteria) { + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { netdev_err(priv->netdev, "%s: alloc failed\n", __func__); - err = -ENOMEM; - goto out; + return ERR_PTR(-ENOMEM); } if (proto) { - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ip_protocol); - MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, proto); + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto); } if (etype) { - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ethertype); - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, etype); + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype); } - rule = mlx5_add_flow_rule(ft, match_criteria_enable, - match_criteria, match_value, + rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_DEFAULT_FLOW_TAG, dest); @@ -631,9 +621,8 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, err = PTR_ERR(rule); netdev_err(priv->netdev, "%s: add rule failed\n", __func__); } -out: - kvfree(match_criteria); - kvfree(match_value); + + kvfree(spec); return err ? ERR_PTR(err) : rule; } @@ -655,7 +644,7 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv) if (tt == MLX5E_TT_ANY) dest.tir_num = priv->direct_tir[0].tirn; else - dest.tir_num = priv->indir_tirn[tt]; + dest.tir_num = priv->indir_tir[tt].tirn; rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, ttc_rules[tt].etype, ttc_rules[tt].proto); @@ -792,24 +781,20 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, { struct mlx5_flow_table *ft = priv->fs.l2.ft.t; struct mlx5_flow_destination dest; - u8 match_criteria_enable = 0; - u32 *match_criteria; - u32 *match_value; + struct mlx5_flow_spec *spec; int err = 0; u8 *mc_dmac; u8 *mv_dmac; - match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - if (!match_value || !match_criteria) { + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { netdev_err(priv->netdev, "%s: alloc failed\n", __func__); - err = -ENOMEM; - goto add_l2_rule_out; + return -ENOMEM; } - mc_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, + mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers.dmac_47_16); - mv_dmac = MLX5_ADDR_OF(fte_match_param, match_value, + mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.dmac_47_16); dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; @@ -817,13 +802,13 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, switch (type) { case MLX5E_FULLMATCH: - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; eth_broadcast_addr(mc_dmac); ether_addr_copy(mv_dmac, ai->addr); break; case MLX5E_ALLMULTI: - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; mc_dmac[0] = 0x01; mv_dmac[0] = 0x01; break; @@ -832,8 +817,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, break; } - ai->rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, - match_value, + ai->rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_DEFAULT_FLOW_TAG, &dest); if (IS_ERR(ai->rule)) { @@ -843,9 +827,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, ai->rule = NULL; } -add_l2_rule_out: - kvfree(match_criteria); - kvfree(match_value); + kvfree(spec); return err; } @@ -1102,6 +1084,8 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) goto err_destroy_l2_table; } + mlx5e_ethtool_init_steering(priv); + return 0; err_destroy_l2_table: @@ -1121,4 +1105,5 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv) mlx5e_destroy_l2_table(priv); mlx5e_destroy_ttc_table(priv); mlx5e_arfs_destroy_tables(priv); + mlx5e_ethtool_cleanup_steering(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c new file mode 100644 index 000000000..d17c24227 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -0,0 +1,586 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include "en.h" + +struct mlx5e_ethtool_rule { + struct list_head list; + struct ethtool_rx_flow_spec flow_spec; + struct mlx5_flow_rule *rule; + struct mlx5e_ethtool_table *eth_ft; +}; + +static void put_flow_table(struct mlx5e_ethtool_table *eth_ft) +{ + if (!--eth_ft->num_rules) { + mlx5_destroy_flow_table(eth_ft->ft); + eth_ft->ft = NULL; + } +} + +#define MLX5E_ETHTOOL_L3_L4_PRIO 0 +#define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS) +#define MLX5E_ETHTOOL_NUM_ENTRIES 64000 +#define MLX5E_ETHTOOL_NUM_GROUPS 10 +static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, + struct ethtool_rx_flow_spec *fs, + int num_tuples) +{ + struct mlx5e_ethtool_table *eth_ft; + struct mlx5_flow_namespace *ns; + struct mlx5_flow_table *ft; + int max_tuples; + int table_size; + int prio; + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + max_tuples = ETHTOOL_NUM_L3_L4_FTS; + prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples); + eth_ft = &priv->fs.ethtool.l3_l4_ft[prio]; + break; + case IP_USER_FLOW: + max_tuples = ETHTOOL_NUM_L3_L4_FTS; + prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples); + eth_ft = &priv->fs.ethtool.l3_l4_ft[prio]; + break; + case ETHER_FLOW: + max_tuples = ETHTOOL_NUM_L2_FTS; + prio = max_tuples - num_tuples; + eth_ft = &priv->fs.ethtool.l2_ft[prio]; + prio += MLX5E_ETHTOOL_L2_PRIO; + break; + default: + return ERR_PTR(-EINVAL); + } + + eth_ft->num_rules++; + if (eth_ft->ft) + return eth_ft; + + ns = mlx5_get_flow_namespace(priv->mdev, + MLX5_FLOW_NAMESPACE_ETHTOOL); + if (!ns) + return ERR_PTR(-ENOTSUPP); + + table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev, + flow_table_properties_nic_receive.log_max_ft_size)), + MLX5E_ETHTOOL_NUM_ENTRIES); + ft = mlx5_create_auto_grouped_flow_table(ns, prio, + table_size, + MLX5E_ETHTOOL_NUM_GROUPS, 0); + if (IS_ERR(ft)) + return (void *)ft; + + eth_ft->ft = ft; + return eth_ft; +} + +static void mask_spec(u8 *mask, u8 *val, size_t size) +{ + unsigned int i; + + for (i = 0; i < size; i++, mask++, val++) + *((u8 *)val) = *((u8 *)mask) & *((u8 *)val); +} + +static void set_ips(void *outer_headers_v, void *outer_headers_c, __be32 ip4src_m, + __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v) +{ + if (ip4src_m) { + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, + src_ipv4_src_ipv6.ipv4_layout.ipv4), + &ip4src_v, sizeof(ip4src_v)); + memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, + src_ipv4_src_ipv6.ipv4_layout.ipv4), + 0xff, sizeof(ip4src_m)); + } + if (ip4dst_m) { + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &ip4dst_v, sizeof(ip4dst_v)); + memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + 0xff, sizeof(ip4dst_m)); + } + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, + ethertype, ETH_P_IP); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, + ethertype, 0xffff); +} + +static int set_flow_attrs(u32 *match_c, u32 *match_v, + struct ethtool_rx_flow_spec *fs) +{ + void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c, + outer_headers); + void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v, + outer_headers); + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + struct ethtool_tcpip4_spec *l4_mask; + struct ethtool_tcpip4_spec *l4_val; + struct ethtool_usrip4_spec *l3_mask; + struct ethtool_usrip4_spec *l3_val; + struct ethhdr *eth_val; + struct ethhdr *eth_mask; + + switch (flow_type) { + case TCP_V4_FLOW: + l4_mask = &fs->m_u.tcp_ip4_spec; + l4_val = &fs->h_u.tcp_ip4_spec; + set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src, + l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst); + + if (l4_mask->psrc) { + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport, + 0xffff); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport, + ntohs(l4_val->psrc)); + } + if (l4_mask->pdst) { + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport, + 0xffff); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport, + ntohs(l4_val->pdst)); + } + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, + 0xffff); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, + IPPROTO_TCP); + break; + case UDP_V4_FLOW: + l4_mask = &fs->m_u.tcp_ip4_spec; + l4_val = &fs->h_u.tcp_ip4_spec; + set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src, + l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst); + + if (l4_mask->psrc) { + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport, + 0xffff); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport, + ntohs(l4_val->psrc)); + } + if (l4_mask->pdst) { + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport, + 0xffff); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport, + ntohs(l4_val->pdst)); + } + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, + 0xffff); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, + IPPROTO_UDP); + break; + case IP_USER_FLOW: + l3_mask = &fs->m_u.usr_ip4_spec; + l3_val = &fs->h_u.usr_ip4_spec; + set_ips(outer_headers_v, outer_headers_c, l3_mask->ip4src, + l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst); + break; + case ETHER_FLOW: + eth_mask = &fs->m_u.ether_spec; + eth_val = &fs->h_u.ether_spec; + + mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask)); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, + outer_headers_c, smac_47_16), + eth_mask->h_source); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, + outer_headers_v, smac_47_16), + eth_val->h_source); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, + outer_headers_c, dmac_47_16), + eth_mask->h_dest); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, + outer_headers_v, dmac_47_16), + eth_val->h_dest); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ethertype, + ntohs(eth_mask->h_proto)); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ethertype, + ntohs(eth_val->h_proto)); + break; + default: + return -EINVAL; + } + + if ((fs->flow_type & FLOW_EXT) && + (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) { + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, + vlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, + vlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, + first_vid, 0xfff); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, + first_vid, ntohs(fs->h_ext.vlan_tci)); + } + if (fs->flow_type & FLOW_MAC_EXT && + !is_zero_ether_addr(fs->m_ext.h_dest)) { + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, + outer_headers_c, dmac_47_16), + fs->m_ext.h_dest); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, + outer_headers_v, dmac_47_16), + fs->h_ext.h_dest); + } + + return 0; +} + +static void add_rule_to_list(struct mlx5e_priv *priv, + struct mlx5e_ethtool_rule *rule) +{ + struct mlx5e_ethtool_rule *iter; + struct list_head *head = &priv->fs.ethtool.rules; + + list_for_each_entry(iter, &priv->fs.ethtool.rules, list) { + if (iter->flow_spec.location > rule->flow_spec.location) + break; + head = &iter->list; + } + priv->fs.ethtool.tot_num_rules++; + list_add(&rule->list, head); +} + +static bool outer_header_zero(u32 *match_criteria) +{ + int size = MLX5_ST_SZ_BYTES(fte_match_param); + char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria, + outer_headers); + + return outer_headers_c[0] == 0 && !memcmp(outer_headers_c, + outer_headers_c + 1, + size - 1); +} + +static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv, + struct mlx5_flow_table *ft, + struct ethtool_rx_flow_spec *fs) +{ + struct mlx5_flow_destination *dst = NULL; + struct mlx5_flow_spec *spec; + struct mlx5_flow_rule *rule; + int err = 0; + u32 action; + + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) + return ERR_PTR(-ENOMEM); + err = set_flow_attrs(spec->match_criteria, spec->match_value, + fs); + if (err) + goto free; + + if (fs->ring_cookie == RX_CLS_FLOW_DISC) { + action = MLX5_FLOW_CONTEXT_ACTION_DROP; + } else { + dst = kzalloc(sizeof(*dst), GFP_KERNEL); + if (!dst) { + err = -ENOMEM; + goto free; + } + + dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn; + action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + } + + spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria)); + rule = mlx5_add_flow_rule(ft, spec, action, + MLX5_FS_DEFAULT_FLOW_TAG, dst); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n", + __func__, err); + goto free; + } +free: + kvfree(spec); + kfree(dst); + return err ? ERR_PTR(err) : rule; +} + +static void del_ethtool_rule(struct mlx5e_priv *priv, + struct mlx5e_ethtool_rule *eth_rule) +{ + if (eth_rule->rule) + mlx5_del_flow_rule(eth_rule->rule); + list_del(ð_rule->list); + priv->fs.ethtool.tot_num_rules--; + put_flow_table(eth_rule->eth_ft); + kfree(eth_rule); +} + +static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv, + int location) +{ + struct mlx5e_ethtool_rule *iter; + + list_for_each_entry(iter, &priv->fs.ethtool.rules, list) { + if (iter->flow_spec.location == location) + return iter; + } + return NULL; +} + +static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv, + int location) +{ + struct mlx5e_ethtool_rule *eth_rule; + + eth_rule = find_ethtool_rule(priv, location); + if (eth_rule) + del_ethtool_rule(priv, eth_rule); + + eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL); + if (!eth_rule) + return ERR_PTR(-ENOMEM); + + add_rule_to_list(priv, eth_rule); + return eth_rule; +} + +#define MAX_NUM_OF_ETHTOOL_RULES BIT(10) + +#define all_ones(field) (field == (__force typeof(field))-1) +#define all_zeros_or_all_ones(field) \ + ((field) == 0 || (field) == (__force typeof(field))-1) + +static int validate_flow(struct mlx5e_priv *priv, + struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_tcpip4_spec *l4_mask; + struct ethtool_usrip4_spec *l3_mask; + struct ethhdr *eth_mask; + int num_tuples = 0; + + if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES) + return -EINVAL; + + if (fs->ring_cookie >= priv->params.num_channels && + fs->ring_cookie != RX_CLS_FLOW_DISC) + return -EINVAL; + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case ETHER_FLOW: + eth_mask = &fs->m_u.ether_spec; + if (!is_zero_ether_addr(eth_mask->h_dest)) + num_tuples++; + if (!is_zero_ether_addr(eth_mask->h_source)) + num_tuples++; + if (eth_mask->h_proto) + num_tuples++; + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: + if (fs->m_u.tcp_ip4_spec.tos) + return -EINVAL; + l4_mask = &fs->m_u.tcp_ip4_spec; + if (l4_mask->ip4src) { + if (!all_ones(l4_mask->ip4src)) + return -EINVAL; + num_tuples++; + } + if (l4_mask->ip4dst) { + if (!all_ones(l4_mask->ip4dst)) + return -EINVAL; + num_tuples++; + } + if (l4_mask->psrc) { + if (!all_ones(l4_mask->psrc)) + return -EINVAL; + num_tuples++; + } + if (l4_mask->pdst) { + if (!all_ones(l4_mask->pdst)) + return -EINVAL; + num_tuples++; + } + /* Flow is TCP/UDP */ + num_tuples++; + break; + case IP_USER_FLOW: + l3_mask = &fs->m_u.usr_ip4_spec; + if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto || + fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) + return -EINVAL; + if (l3_mask->ip4src) { + if (!all_ones(l3_mask->ip4src)) + return -EINVAL; + num_tuples++; + } + if (l3_mask->ip4dst) { + if (!all_ones(l3_mask->ip4dst)) + return -EINVAL; + num_tuples++; + } + /* Flow is IPv4 */ + num_tuples++; + break; + default: + return -EINVAL; + } + if ((fs->flow_type & FLOW_EXT)) { + if (fs->m_ext.vlan_etype || + (fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))) + return -EINVAL; + + if (fs->m_ext.vlan_tci) { + if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) + return -EINVAL; + } + num_tuples++; + } + + if (fs->flow_type & FLOW_MAC_EXT && + !is_zero_ether_addr(fs->m_ext.h_dest)) + num_tuples++; + + return num_tuples; +} + +int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, + struct ethtool_rx_flow_spec *fs) +{ + struct mlx5e_ethtool_table *eth_ft; + struct mlx5e_ethtool_rule *eth_rule; + struct mlx5_flow_rule *rule; + int num_tuples; + int err; + + num_tuples = validate_flow(priv, fs); + if (num_tuples <= 0) { + netdev_warn(priv->netdev, "%s: flow is not valid\n", __func__); + return -EINVAL; + } + + eth_ft = get_flow_table(priv, fs, num_tuples); + if (IS_ERR(eth_ft)) + return PTR_ERR(eth_ft); + + eth_rule = get_ethtool_rule(priv, fs->location); + if (IS_ERR(eth_rule)) { + put_flow_table(eth_ft); + return PTR_ERR(eth_rule); + } + + eth_rule->flow_spec = *fs; + eth_rule->eth_ft = eth_ft; + if (!eth_ft->ft) { + err = -EINVAL; + goto del_ethtool_rule; + } + rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto del_ethtool_rule; + } + + eth_rule->rule = rule; + + return 0; + +del_ethtool_rule: + del_ethtool_rule(priv, eth_rule); + + return err; +} + +int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, + int location) +{ + struct mlx5e_ethtool_rule *eth_rule; + int err = 0; + + if (location >= MAX_NUM_OF_ETHTOOL_RULES) + return -ENOSPC; + + eth_rule = find_ethtool_rule(priv, location); + if (!eth_rule) { + err = -ENOENT; + goto out; + } + + del_ethtool_rule(priv, eth_rule); +out: + return err; +} + +int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, + int location) +{ + struct mlx5e_ethtool_rule *eth_rule; + + if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES) + return -EINVAL; + + list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) { + if (eth_rule->flow_spec.location == location) { + info->fs = eth_rule->flow_spec; + return 0; + } + } + + return -ENOENT; +} + +int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, + u32 *rule_locs) +{ + int location = 0; + int idx = 0; + int err = 0; + + while ((!err || err == -ENOENT) && idx < info->rule_cnt) { + err = mlx5e_ethtool_get_flow(priv, info, location); + if (!err) + rule_locs[idx++] = location; + location++; + } + return err; +} + +void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) +{ + struct mlx5e_ethtool_rule *iter; + struct mlx5e_ethtool_rule *temp; + + list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list) + del_ethtool_rule(priv, iter); +} + +void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) +{ + INIT_LIST_HEAD(&priv->fs.ethtool.rules); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5a4d88c2c..2459c7f3d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -39,22 +39,17 @@ #include "eswitch.h" #include "vxlan.h" -enum { - MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000, - MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20, - MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS / - MLX5_EN_QP_FLUSH_MSLEEP_QUANT, -}; - struct mlx5e_rq_param { - u32 rqc[MLX5_ST_SZ_DW(rqc)]; - struct mlx5_wq_param wq; + u32 rqc[MLX5_ST_SZ_DW(rqc)]; + struct mlx5_wq_param wq; + bool am_enabled; }; struct mlx5e_sq_param { u32 sqc[MLX5_ST_SZ_DW(sqc)]; struct mlx5_wq_param wq; u16 max_inline; + u8 min_inline_mode; bool icosq; }; @@ -62,6 +57,7 @@ struct mlx5e_cq_param { u32 cqc[MLX5_ST_SZ_DW(cqc)]; struct mlx5_wq_param wq; u16 eq_ix; + u8 cq_period_mode; }; struct mlx5e_channel_param { @@ -159,6 +155,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; s->tx_queue_dropped += sq_stats->dropped; + s->tx_xmit_more += sq_stats->xmit_more; s->tx_csum_partial_inner += sq_stats->csum_partial_inner; tx_offload_none += sq_stats->csum_none; } @@ -254,14 +251,14 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) mlx5e_update_sw_counters(priv); } -static void mlx5e_update_stats_work(struct work_struct *work) +void mlx5e_update_stats_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv, update_stats_work); mutex_lock(&priv->state_lock); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { - mlx5e_update_stats(priv); + priv->profile->update_stats(priv); queue_delayed_work(priv->wq, dwork, msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL)); } @@ -337,6 +334,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; + rq->mpwqe_mtt_offset = c->ix * + MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size)); + rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz); rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides); rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides; @@ -367,6 +367,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, wqe->data.byte_count = cpu_to_be32(byte_count); } + INIT_WORK(&rq->am.work, mlx5e_rx_am_work); + rq->am.mode = priv->params.rx_cq_period_mode; + rq->wq_type = priv->params.rq_wq_type; rq->pdev = c->pdev; rq->netdev = c->netdev; @@ -422,7 +425,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); - MLX5_SET(rqc, rqc, flush_in_error_en, 1); MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable); MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); @@ -519,6 +521,27 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) return -ETIMEDOUT; } +static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) +{ + struct mlx5_wq_ll *wq = &rq->wq; + struct mlx5e_rx_wqe *wqe; + __be16 wqe_ix_be; + u16 wqe_ix; + + /* UMR WQE (if in progress) is always at wq->head */ + if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) + mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]); + + while (!mlx5_wq_ll_is_empty(wq)) { + wqe_ix_be = *wq->tail_next; + wqe_ix = be16_to_cpu(wqe_ix_be); + wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix); + rq->dealloc_wqe(rq, wqe_ix); + mlx5_wq_ll_pop(&rq->wq, wqe_ix_be, + &wqe->next.next_wqe_index); + } +} + static int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_rq_param *param, struct mlx5e_rq *rq) @@ -539,7 +562,8 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, if (err) goto err_disable_rq; - set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); + if (param->am_enabled) + set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; sq->ico_wqe_info[pi].num_wqebbs = 1; @@ -557,22 +581,9 @@ err_destroy_rq: static void mlx5e_close_rq(struct mlx5e_rq *rq) { - int tout = 0; - int err; - - clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); + set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state); napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ - - err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); - while (!mlx5_wq_ll_is_empty(&rq->wq) && !err && - tout++ < MLX5_EN_QP_FLUSH_MAX_ITER) - msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT); - - if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER) - set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state); - - /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ - napi_synchronize(&rq->channel->napi); + cancel_work_sync(&rq->am.work); mlx5e_disable_rq(rq); mlx5e_free_rx_descs(rq); @@ -639,6 +650,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, } sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; sq->max_inline = param->max_inline; + sq->min_inline_mode = + MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5E_INLINE_MODE_VPORT_CONTEXT ? + param->min_inline_mode : 0; err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)); if (err) @@ -721,6 +735,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]); MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); + MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode); MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1); MLX5_SET(sqc, sqc, flush_in_error_en, 1); @@ -741,7 +756,8 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) return err; } -static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) +static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, + int next_state, bool update_rl, int rl_index) { struct mlx5e_channel *c = sq->channel; struct mlx5e_priv *priv = c->priv; @@ -761,6 +777,10 @@ static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) MLX5_SET(modify_sq_in, in, sq_state, curr_state); MLX5_SET(sqc, sqc, state, next_state); + if (update_rl && next_state == MLX5_SQC_STATE_RDY) { + MLX5_SET64(modify_sq_in, in, modify_bitmask, 1); + MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index); + } err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen); @@ -776,6 +796,8 @@ static void mlx5e_disable_sq(struct mlx5e_sq *sq) struct mlx5_core_dev *mdev = priv->mdev; mlx5_core_destroy_sq(mdev, sq->sqn); + if (sq->rate_limit) + mlx5_rl_remove_rate(mdev, sq->rate_limit); } static int mlx5e_open_sq(struct mlx5e_channel *c, @@ -793,12 +815,12 @@ static int mlx5e_open_sq(struct mlx5e_channel *c, if (err) goto err_destroy_sq; - err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY, + false, 0); if (err) goto err_disable_sq; if (sq->txq) { - set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); netdev_tx_reset_queue(sq->txq); netif_tx_start_queue(sq->txq); } @@ -822,38 +844,20 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq) static void mlx5e_close_sq(struct mlx5e_sq *sq) { - int tout = 0; - int err; + set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state); + /* prevent netif_tx_wake_queue */ + napi_synchronize(&sq->channel->napi); if (sq->txq) { - clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); - /* prevent netif_tx_wake_queue */ - napi_synchronize(&sq->channel->napi); netif_tx_disable_queue(sq->txq); - /* ensure hw is notified of all pending wqes */ + /* last doorbell out, godspeed .. */ if (mlx5e_sq_has_room_for(sq, 1)) mlx5e_send_nop(sq, true); - - err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, - MLX5_SQC_STATE_ERR); - if (err) - set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); } - /* wait till sq is empty, unless a TX timeout occurred on this SQ */ - while (sq->cc != sq->pc && - !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) { - msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT); - if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER) - set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); - } - - /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */ - napi_synchronize(&sq->channel->napi); - - mlx5e_free_tx_descs(sq); mlx5e_disable_sq(sq); + mlx5e_free_tx_descs(sq); mlx5e_destroy_sq(sq); } @@ -891,7 +895,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c, mcq->comp = mlx5e_completion_event; mcq->event = mlx5e_cq_error_event; mcq->irqn = irqn; - mcq->uar = &priv->cq_uar; + mcq->uar = &mdev->mlx5e_res.cq_uar; for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); @@ -938,6 +942,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); + MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - @@ -967,8 +972,7 @@ static void mlx5e_disable_cq(struct mlx5e_cq *cq) static int mlx5e_open_cq(struct mlx5e_channel *c, struct mlx5e_cq_param *param, struct mlx5e_cq *cq, - u16 moderation_usecs, - u16 moderation_frames) + struct mlx5e_cq_moder moderation) { int err; struct mlx5e_priv *priv = c->priv; @@ -984,8 +988,8 @@ static int mlx5e_open_cq(struct mlx5e_channel *c, if (MLX5_CAP_GEN(mdev, cq_moderation)) mlx5_core_modify_cq_moderation(mdev, &cq->mcq, - moderation_usecs, - moderation_frames); + moderation.usec, + moderation.pkts); return 0; err_destroy_cq: @@ -1014,8 +1018,7 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, for (tc = 0; tc < c->num_tc; tc++) { err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq, - priv->params.tx_cq_moderation_usec, - priv->params.tx_cq_moderation_pkts); + priv->params.tx_cq_moderation); if (err) goto err_close_tx_cqs; } @@ -1070,19 +1073,96 @@ static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix) { int i; - for (i = 0; i < MLX5E_MAX_NUM_TC; i++) + for (i = 0; i < priv->profile->max_tc; i++) priv->channeltc_to_txq_map[ix][i] = ix + i * priv->params.num_channels; } +static int mlx5e_set_sq_maxrate(struct net_device *dev, + struct mlx5e_sq *sq, u32 rate) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + u16 rl_index = 0; + int err; + + if (rate == sq->rate_limit) + /* nothing to do */ + return 0; + + if (sq->rate_limit) + /* remove current rl index to free space to next ones */ + mlx5_rl_remove_rate(mdev, sq->rate_limit); + + sq->rate_limit = 0; + + if (rate) { + err = mlx5_rl_add_rate(mdev, rate, &rl_index); + if (err) { + netdev_err(dev, "Failed configuring rate %u: %d\n", + rate, err); + return err; + } + } + + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, + MLX5_SQC_STATE_RDY, true, rl_index); + if (err) { + netdev_err(dev, "Failed configuring rate %u: %d\n", + rate, err); + /* remove the rate from the table */ + if (rate) + mlx5_rl_remove_rate(mdev, rate); + return err; + } + + sq->rate_limit = rate; + return 0; +} + +static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_sq *sq = priv->txq_to_sq_map[index]; + int err = 0; + + if (!mlx5_rl_is_supported(mdev)) { + netdev_err(dev, "Rate limiting is not supported on this device\n"); + return -EINVAL; + } + + /* rate is given in Mb/sec, HW config is in Kb/sec */ + rate = rate << 10; + + /* Check whether rate in valid range, 0 is always valid */ + if (rate && !mlx5_rl_is_in_range(mdev, rate)) { + netdev_err(dev, "TX rate %u, is not in range\n", rate); + return -ERANGE; + } + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + err = mlx5e_set_sq_maxrate(dev, sq, rate); + if (!err) + priv->tx_rates[index] = rate; + mutex_unlock(&priv->state_lock); + + return err; +} + static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel_param *cparam, struct mlx5e_channel **cp) { + struct mlx5e_cq_moder icosq_cq_moder = {0, 0}; struct net_device *netdev = priv->netdev; + struct mlx5e_cq_moder rx_cq_profile; int cpu = mlx5e_get_cpu(priv, ix); struct mlx5e_channel *c; + struct mlx5e_sq *sq; int err; + int i; c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) @@ -1093,14 +1173,19 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->cpu = cpu; c->pdev = &priv->mdev->pdev->dev; c->netdev = priv->netdev; - c->mkey_be = cpu_to_be32(priv->mkey.key); + c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); c->num_tc = priv->params.num_tc; + if (priv->params.rx_am_enabled) + rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); + else + rx_cq_profile = priv->params.rx_cq_moderation; + mlx5e_build_channeltc_to_txq_map(priv, ix); netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); - err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, 0, 0); + err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder); if (err) goto err_napi_del; @@ -1109,8 +1194,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, goto err_close_icosq_cq; err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq, - priv->params.rx_cq_moderation_usec, - priv->params.rx_cq_moderation_pkts); + rx_cq_profile); if (err) goto err_close_tx_cqs; @@ -1124,6 +1208,16 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, if (err) goto err_close_icosq; + for (i = 0; i < priv->params.num_tc; i++) { + u32 txq_ix = priv->channeltc_to_txq_map[ix][i]; + + if (priv->tx_rates[txq_ix]) { + sq = priv->txq_to_sq_map[txq_ix]; + mlx5e_set_sq_maxrate(priv->netdev, sq, + priv->tx_rates[txq_ix]); + } + } + err = mlx5e_open_rq(c, &cparam->rq, &c->rq); if (err) goto err_close_sqs; @@ -1195,11 +1289,13 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); - MLX5_SET(wq, wq, pd, priv->pdn); + MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); param->wq.linear = 1; + + param->am_enabled = priv->params.rx_am_enabled; } static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) @@ -1218,7 +1314,7 @@ static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, void *wq = MLX5_ADDR_OF(sqc, sqc, wq); MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); - MLX5_SET(wq, wq, pd, priv->pdn); + MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); } @@ -1233,6 +1329,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); param->max_inline = priv->params.tx_max_inline; + param->min_inline_mode = priv->params.tx_min_inline_mode; } static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, @@ -1240,7 +1337,7 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, { void *cqc = param->cqc; - MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index); + MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index); } static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, @@ -1265,6 +1362,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, } mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = priv->params.rx_cq_period_mode; } static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, @@ -1275,6 +1374,8 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; } static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, @@ -1286,6 +1387,8 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, MLX5_SET(cqc, cqc, log_cq_size, log_wq_size); mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; } static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, @@ -1432,7 +1535,8 @@ static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc, MLX5_SET(rqtc, rqtc, rq_num[0], rqn); } -static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn) +static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, + int ix, struct mlx5e_rqt *rqt) { struct mlx5_core_dev *mdev = priv->mdev; void *rqtc; @@ -1455,34 +1559,36 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn) else mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix); - err = mlx5_core_create_rqt(mdev, in, inlen, rqtn); + err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn); + if (!err) + rqt->enabled = true; kvfree(in); return err; } -static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, u32 rqtn) +void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt) { - mlx5_core_destroy_rqt(priv->mdev, rqtn); + rqt->enabled = false; + mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn); } -static int mlx5e_create_rqts(struct mlx5e_priv *priv) +static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv) { - int nch = mlx5e_get_max_num_channels(priv->mdev); - u32 *rqtn; + struct mlx5e_rqt *rqt = &priv->indir_rqt; + + return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt); +} + +int mlx5e_create_direct_rqts(struct mlx5e_priv *priv) +{ + struct mlx5e_rqt *rqt; int err; int ix; - /* Indirect RQT */ - rqtn = &priv->indir_rqtn; - err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqtn); - if (err) - return err; - - /* Direct RQTs */ - for (ix = 0; ix < nch; ix++) { - rqtn = &priv->direct_tir[ix].rqtn; - err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqtn); + for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) { + rqt = &priv->direct_tir[ix].rqt; + err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt); if (err) goto err_destroy_rqts; } @@ -1491,24 +1597,11 @@ static int mlx5e_create_rqts(struct mlx5e_priv *priv) err_destroy_rqts: for (ix--; ix >= 0; ix--) - mlx5e_destroy_rqt(priv, priv->direct_tir[ix].rqtn); - - mlx5e_destroy_rqt(priv, priv->indir_rqtn); + mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt); return err; } -static void mlx5e_destroy_rqts(struct mlx5e_priv *priv) -{ - int nch = mlx5e_get_max_num_channels(priv->mdev); - int i; - - for (i = 0; i < nch; i++) - mlx5e_destroy_rqt(priv, priv->direct_tir[i].rqtn); - - mlx5e_destroy_rqt(priv, priv->indir_rqtn); -} - int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix) { struct mlx5_core_dev *mdev = priv->mdev; @@ -1544,10 +1637,15 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv) u32 rqtn; int ix; - rqtn = priv->indir_rqtn; - mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); + if (priv->indir_rqt.enabled) { + rqtn = priv->indir_rqt.rqtn; + mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); + } + for (ix = 0; ix < priv->params.num_channels; ix++) { - rqtn = priv->direct_tir[ix].rqtn; + if (!priv->direct_tir[ix].rqt.enabled) + continue; + rqtn = priv->direct_tir[ix].rqt.rqtn; mlx5e_redirect_rqt(priv, rqtn, 1, ix); } } @@ -1607,13 +1705,13 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) mlx5e_build_tir_ctx_lro(tirc, priv); for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - err = mlx5_core_modify_tir(mdev, priv->indir_tirn[tt], in, + err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); if (err) goto free_in; } - for (ix = 0; ix < mlx5e_get_max_num_channels(mdev); ix++) { + for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) { err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in, inlen); if (err) @@ -1626,40 +1724,6 @@ free_in: return err; } -static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv) -{ - void *in; - int inlen; - int err; - int i; - - inlen = MLX5_ST_SZ_BYTES(modify_tir_in); - in = mlx5_vzalloc(inlen); - if (!in) - return -ENOMEM; - - MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); - - for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) { - err = mlx5_core_modify_tir(priv->mdev, priv->indir_tirn[i], in, - inlen); - if (err) - return err; - } - - for (i = 0; i < priv->params.num_channels; i++) { - err = mlx5_core_modify_tir(priv->mdev, - priv->direct_tir[i].tirn, in, - inlen); - if (err) - return err; - } - - kvfree(in); - - return 0; -} - static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) { struct mlx5_core_dev *mdev = priv->mdev; @@ -1731,6 +1795,7 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev) int mlx5e_open_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; int num_txqs; int err; @@ -1742,10 +1807,6 @@ int mlx5e_open_locked(struct net_device *netdev) netif_set_real_num_tx_queues(netdev, num_txqs); netif_set_real_num_rx_queues(netdev, priv->params.num_channels); - err = mlx5e_set_dev_port_mtu(netdev); - if (err) - goto err_clear_state_opened_flag; - err = mlx5e_open_channels(priv); if (err) { netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n", @@ -1753,7 +1814,7 @@ int mlx5e_open_locked(struct net_device *netdev) goto err_clear_state_opened_flag; } - err = mlx5e_refresh_tirs_self_loopback_enable(priv); + err = mlx5e_refresh_tirs_self_loopback_enable(priv->mdev); if (err) { netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n", __func__, err); @@ -1766,9 +1827,14 @@ int mlx5e_open_locked(struct net_device *netdev) #ifdef CONFIG_RFS_ACCEL priv->netdev->rx_cpu_rmap = priv->mdev->rmap; #endif + if (priv->profile->update_stats) + queue_delayed_work(priv->wq, &priv->update_stats_work, 0); - queue_delayed_work(priv->wq, &priv->update_stats_work, 0); - + if (MLX5_CAP_GEN(mdev, vport_group_manager)) { + err = mlx5e_add_sqs_fwd_rules(priv); + if (err) + goto err_close_channels; + } return 0; err_close_channels: @@ -1778,7 +1844,7 @@ err_clear_state_opened_flag: return err; } -static int mlx5e_open(struct net_device *netdev) +int mlx5e_open(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); int err; @@ -1793,6 +1859,7 @@ static int mlx5e_open(struct net_device *netdev) int mlx5e_close_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; /* May already be CLOSED in case a previous configuration operation * (e.g RX/TX queue size change) that involves close&open failed. @@ -1802,6 +1869,9 @@ int mlx5e_close_locked(struct net_device *netdev) clear_bit(MLX5E_STATE_OPENED, &priv->state); + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + mlx5e_remove_sqs_fwd_rules(priv); + mlx5e_timestamp_cleanup(priv); netif_carrier_off(priv->netdev); mlx5e_redirect_rqts(priv); @@ -1810,7 +1880,7 @@ int mlx5e_close_locked(struct net_device *netdev) return 0; } -static int mlx5e_close(struct net_device *netdev) +int mlx5e_close(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); int err; @@ -1869,7 +1939,7 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv, mcq->comp = mlx5e_completion_event; mcq->event = mlx5e_cq_error_event; mcq->irqn = irqn; - mcq->uar = &priv->cq_uar; + mcq->uar = &mdev->mlx5e_res.cq_uar; cq->priv = priv; @@ -1935,7 +2005,7 @@ static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc) memset(in, 0, sizeof(in)); MLX5_SET(tisc, tisc, prio, tc << 1); - MLX5_SET(tisc, tisc, transport_domain, priv->tdn); + MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn); return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); } @@ -1945,12 +2015,12 @@ static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc) mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); } -static int mlx5e_create_tises(struct mlx5e_priv *priv) +int mlx5e_create_tises(struct mlx5e_priv *priv) { int err; int tc; - for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) { + for (tc = 0; tc < priv->profile->max_tc; tc++) { err = mlx5e_create_tis(priv, tc); if (err) goto err_close_tises; @@ -1965,11 +2035,11 @@ err_close_tises: return err; } -static void mlx5e_destroy_tises(struct mlx5e_priv *priv) +void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) { int tc; - for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) + for (tc = 0; tc < priv->profile->max_tc; tc++) mlx5e_destroy_tis(priv, tc); } @@ -1978,7 +2048,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, { void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); - MLX5_SET(tirc, tirc, transport_domain, priv->tdn); + MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ MLX5_HASH_FIELD_SEL_DST_IP) @@ -1995,7 +2065,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, mlx5e_build_tir_ctx_lro(tirc, priv); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); - MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqtn); + MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); mlx5e_build_tir_ctx_hash(tirc, priv); switch (tt) { @@ -2085,7 +2155,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, u32 rqtn) { - MLX5_SET(tirc, tirc, transport_domain, priv->tdn); + MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); mlx5e_build_tir_ctx_lro(tirc, priv); @@ -2094,15 +2164,13 @@ static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); } -static int mlx5e_create_tirs(struct mlx5e_priv *priv) +static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) { - int nch = mlx5e_get_max_num_channels(priv->mdev); + struct mlx5e_tir *tir; void *tirc; int inlen; - u32 *tirn; int err; u32 *in; - int ix; int tt; inlen = MLX5_ST_SZ_BYTES(create_tir_in); @@ -2110,25 +2178,51 @@ static int mlx5e_create_tirs(struct mlx5e_priv *priv) if (!in) return -ENOMEM; - /* indirect tirs */ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { memset(in, 0, inlen); - tirn = &priv->indir_tirn[tt]; + tir = &priv->indir_tir[tt]; tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); mlx5e_build_indir_tir_ctx(priv, tirc, tt); - err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn); + err = mlx5e_create_tir(priv->mdev, tir, in, inlen); if (err) goto err_destroy_tirs; } - /* direct tirs */ + kvfree(in); + + return 0; + +err_destroy_tirs: + for (tt--; tt >= 0; tt--) + mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]); + + kvfree(in); + + return err; +} + +int mlx5e_create_direct_tirs(struct mlx5e_priv *priv) +{ + int nch = priv->profile->max_nch(priv->mdev); + struct mlx5e_tir *tir; + void *tirc; + int inlen; + int err; + u32 *in; + int ix; + + inlen = MLX5_ST_SZ_BYTES(create_tir_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + for (ix = 0; ix < nch; ix++) { memset(in, 0, inlen); - tirn = &priv->direct_tir[ix].tirn; + tir = &priv->direct_tir[ix]; tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); mlx5e_build_direct_tir_ctx(priv, tirc, - priv->direct_tir[ix].rqtn); - err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn); + priv->direct_tir[ix].rqt.rqtn); + err = mlx5e_create_tir(priv->mdev, tir, in, inlen); if (err) goto err_destroy_ch_tirs; } @@ -2139,27 +2233,28 @@ static int mlx5e_create_tirs(struct mlx5e_priv *priv) err_destroy_ch_tirs: for (ix--; ix >= 0; ix--) - mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[ix].tirn); - -err_destroy_tirs: - for (tt--; tt >= 0; tt--) - mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[tt]); + mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]); kvfree(in); return err; } -static void mlx5e_destroy_tirs(struct mlx5e_priv *priv) +static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) { - int nch = mlx5e_get_max_num_channels(priv->mdev); int i; - for (i = 0; i < nch; i++) - mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[i].tirn); - for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) - mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[i]); + mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); +} + +void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv) +{ + int nch = priv->profile->max_nch(priv->mdev); + int i; + + for (i = 0; i < nch; i++) + mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]); } int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd) @@ -2233,7 +2328,7 @@ mqprio: return mlx5e_setup_tc(dev, tc->tc); } -static struct rtnl_link_stats64 * +struct rtnl_link_stats64 * mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -2455,6 +2550,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) u16 max_mtu; u16 min_mtu; int err = 0; + bool reset; mlx5_query_port_max_mtu(mdev, &max_mtu, 1); @@ -2470,13 +2566,18 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) mutex_lock(&priv->state_lock); + reset = !priv->params.lro_en && + (priv->params.rq_wq_type != + MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - if (was_opened) + if (was_opened && reset) mlx5e_close_locked(netdev); netdev->mtu = new_mtu; + mlx5e_set_dev_port_mtu(netdev); - if (was_opened) + if (was_opened && reset) err = mlx5e_open_locked(netdev); mutex_unlock(&priv->state_lock); @@ -2585,25 +2686,31 @@ static int mlx5e_get_vf_stats(struct net_device *dev, } static void mlx5e_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev); + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + if (!mlx5e_vxlan_allowed(priv->mdev)) return; - mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1); + mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); } static void mlx5e_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev); + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + if (!mlx5e_vxlan_allowed(priv->mdev)) return; - mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0); + mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0); } static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, @@ -2670,7 +2777,7 @@ static void mlx5e_tx_timeout(struct net_device *dev) if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i))) continue; sched_work = true; - set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); + set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state); netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n", i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc); } @@ -2693,6 +2800,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = { .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, + .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif @@ -2713,8 +2821,9 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, - .ndo_add_vxlan_port = mlx5e_add_vxlan_port, - .ndo_del_vxlan_port = mlx5e_del_vxlan_port, + .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, + .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, + .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, .ndo_features_check = mlx5e_features_check, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, @@ -2844,13 +2953,48 @@ static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw) (pci_bw < 40000) && (pci_bw < link_speed)); } -static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, - struct net_device *netdev, - int num_channels) +void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +{ + params->rx_cq_period_mode = cq_period_mode; + + params->rx_cq_moderation.pkts = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; + params->rx_cq_moderation.usec = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; + + if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) + params->rx_cq_moderation.usec = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; +} + +static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev, + u8 *min_inline_mode) +{ + switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { + case MLX5E_INLINE_MODE_L2: + *min_inline_mode = MLX5_INLINE_MODE_L2; + break; + case MLX5E_INLINE_MODE_VPORT_CONTEXT: + mlx5_query_nic_vport_min_inline(mdev, + min_inline_mode); + break; + case MLX5_INLINE_MODE_NOT_REQUIRED: + *min_inline_mode = MLX5_INLINE_MODE_NONE; + break; + } +} + +static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) { struct mlx5e_priv *priv = netdev_priv(netdev); u32 link_speed = 0; u32 pci_bw = 0; + u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? + MLX5_CQ_PERIOD_MODE_START_FROM_CQE : + MLX5_CQ_PERIOD_MODE_START_FROM_EQE; priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; @@ -2896,15 +3040,16 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, BIT(priv->params.log_rq_size)); - priv->params.rx_cq_moderation_usec = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; - priv->params.rx_cq_moderation_pkts = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; - priv->params.tx_cq_moderation_usec = + + priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); + mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode); + + priv->params.tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; - priv->params.tx_cq_moderation_pkts = + priv->params.tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); + mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode); priv->params.num_tc = 1; priv->params.rss_hfunc = ETH_RSS_HASH_XOR; @@ -2912,14 +3057,20 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, sizeof(priv->params.toeplitz_hash_key)); mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, num_channels); + MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev)); priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + /* Initialize pflags */ + MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER, + priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); + priv->mdev = mdev; priv->netdev = netdev; - priv->params.num_channels = num_channels; + priv->params.num_channels = profile->max_nch(mdev); + priv->profile = profile; + priv->ppriv = ppriv; #ifdef CONFIG_MLX5_CORE_EN_DCB mlx5e_ets_init(priv); @@ -2945,7 +3096,11 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) } } -static void mlx5e_build_netdev(struct net_device *netdev) +static const struct switchdev_ops mlx5e_switchdev_ops = { + .switchdev_port_attr_get = mlx5e_attr_get, +}; + +static void mlx5e_build_nic_netdev(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; @@ -3026,31 +3181,11 @@ static void mlx5e_build_netdev(struct net_device *netdev) netdev->priv_flags |= IFF_UNICAST_FLT; mlx5e_set_netdev_dev_addr(netdev); -} -static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, - struct mlx5_core_mkey *mkey) -{ - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5_create_mkey_mbox_in *in; - int err; - - in = mlx5_vzalloc(sizeof(*in)); - if (!in) - return -ENOMEM; - - in->seg.flags = MLX5_PERM_LOCAL_WRITE | - MLX5_PERM_LOCAL_READ | - MLX5_ACCESS_MODE_PA; - in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); - in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - - err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL, - NULL); - - kvfree(in); - - return err; +#ifdef CONFIG_NET_SWITCHDEV + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + netdev->switchdev_ops = &mlx5e_switchdev_ops; +#endif } static void mlx5e_create_q_counter(struct mlx5e_priv *priv) @@ -3079,8 +3214,8 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv) struct mlx5_create_mkey_mbox_in *in; struct mlx5_mkey_seg *mkc; int inlen = sizeof(*in); - u64 npages = - mlx5e_get_max_num_channels(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS; + u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev), + BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW)); int err; in = mlx5_vzalloc(inlen); @@ -3094,10 +3229,12 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv) MLX5_PERM_LOCAL_WRITE | MLX5_ACCESS_MODE_MTT; + npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages); + mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - mkc->flags_pd = cpu_to_be32(priv->pdn); + mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn); mkc->len = cpu_to_be64(npages << PAGE_SHIFT); - mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages)); + mkc->xlt_oct_size = cpu_to_be32(MLX5_MTT_OCTW(npages)); mkc->log2_page_size = PAGE_SHIFT; err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL, @@ -3108,160 +3245,236 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv) return err; } -static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) +static void mlx5e_nic_init(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) { - struct net_device *netdev; - struct mlx5e_priv *priv; - int nch = mlx5e_get_max_num_channels(mdev); - int err; - - if (mlx5e_check_required_hca_cap(mdev)) - return NULL; + struct mlx5e_priv *priv = netdev_priv(netdev); - netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), - nch * MLX5E_MAX_NUM_TC, - nch); - if (!netdev) { - mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); - return NULL; - } + mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv); + mlx5e_build_nic_netdev(netdev); + mlx5e_vxlan_init(priv); +} - mlx5e_build_netdev_priv(mdev, netdev, nch); - mlx5e_build_netdev(netdev); +static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_eswitch *esw = mdev->priv.eswitch; - netif_carrier_off(netdev); + mlx5e_vxlan_cleanup(priv); - priv = netdev_priv(netdev); + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + mlx5_eswitch_unregister_vport_rep(esw, 0); +} - priv->wq = create_singlethread_workqueue("mlx5e"); - if (!priv->wq) - goto err_free_netdev; +static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + int err; + int i; - err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false); + err = mlx5e_create_indirect_rqts(priv); if (err) { - mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); - goto err_destroy_wq; + mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err); + return err; } - err = mlx5_core_alloc_pd(mdev, &priv->pdn); + err = mlx5e_create_direct_rqts(priv); if (err) { - mlx5_core_err(mdev, "alloc pd failed, %d\n", err); - goto err_unmap_free_uar; + mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err); + goto err_destroy_indirect_rqts; } - err = mlx5_core_alloc_transport_domain(mdev, &priv->tdn); + err = mlx5e_create_indirect_tirs(priv); if (err) { - mlx5_core_err(mdev, "alloc td failed, %d\n", err); - goto err_dealloc_pd; + mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err); + goto err_destroy_direct_rqts; } - err = mlx5e_create_mkey(priv, priv->pdn, &priv->mkey); + err = mlx5e_create_direct_tirs(priv); if (err) { - mlx5_core_err(mdev, "create mkey failed, %d\n", err); - goto err_dealloc_transport_domain; + mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err); + goto err_destroy_indirect_tirs; } - err = mlx5e_create_umr_mkey(priv); + err = mlx5e_create_flow_steering(priv); if (err) { - mlx5_core_err(mdev, "create umr mkey failed, %d\n", err); - goto err_destroy_mkey; + mlx5_core_warn(mdev, "create flow steering failed, %d\n", err); + goto err_destroy_direct_tirs; } + err = mlx5e_tc_init(priv); + if (err) + goto err_destroy_flow_steering; + + return 0; + +err_destroy_flow_steering: + mlx5e_destroy_flow_steering(priv); +err_destroy_direct_tirs: + mlx5e_destroy_direct_tirs(priv); +err_destroy_indirect_tirs: + mlx5e_destroy_indirect_tirs(priv); +err_destroy_direct_rqts: + for (i = 0; i < priv->profile->max_nch(mdev); i++) + mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); +err_destroy_indirect_rqts: + mlx5e_destroy_rqt(priv, &priv->indir_rqt); + return err; +} + +static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) +{ + int i; + + mlx5e_tc_cleanup(priv); + mlx5e_destroy_flow_steering(priv); + mlx5e_destroy_direct_tirs(priv); + mlx5e_destroy_indirect_tirs(priv); + for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) + mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); + mlx5e_destroy_rqt(priv, &priv->indir_rqt); +} + +static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) +{ + int err; + err = mlx5e_create_tises(priv); if (err) { - mlx5_core_warn(mdev, "create tises failed, %d\n", err); - goto err_destroy_umr_mkey; + mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err); + return err; } - err = mlx5e_open_drop_rq(priv); - if (err) { - mlx5_core_err(mdev, "open drop rq failed, %d\n", err); - goto err_destroy_tises; +#ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets); +#endif + return 0; +} + +static void mlx5e_nic_enable(struct mlx5e_priv *priv) +{ + struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_eswitch *esw = mdev->priv.eswitch; + struct mlx5_eswitch_rep rep; + + if (mlx5e_vxlan_allowed(mdev)) { + rtnl_lock(); + udp_tunnel_get_rx_info(netdev); + rtnl_unlock(); } - err = mlx5e_create_rqts(priv); - if (err) { - mlx5_core_warn(mdev, "create rqts failed, %d\n", err); - goto err_close_drop_rq; + mlx5e_enable_async_events(priv); + queue_work(priv->wq, &priv->set_rx_mode_work); + + if (MLX5_CAP_GEN(mdev, vport_group_manager)) { + mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id); + rep.load = mlx5e_nic_rep_load; + rep.unload = mlx5e_nic_rep_unload; + rep.vport = 0; + rep.priv_data = priv; + mlx5_eswitch_register_vport_rep(esw, &rep); + } +} + +static void mlx5e_nic_disable(struct mlx5e_priv *priv) +{ + queue_work(priv->wq, &priv->set_rx_mode_work); + mlx5e_disable_async_events(priv); +} + +static const struct mlx5e_profile mlx5e_nic_profile = { + .init = mlx5e_nic_init, + .cleanup = mlx5e_nic_cleanup, + .init_rx = mlx5e_init_nic_rx, + .cleanup_rx = mlx5e_cleanup_nic_rx, + .init_tx = mlx5e_init_nic_tx, + .cleanup_tx = mlx5e_cleanup_nic_tx, + .enable = mlx5e_nic_enable, + .disable = mlx5e_nic_disable, + .update_stats = mlx5e_update_stats, + .max_nch = mlx5e_get_max_num_channels, + .max_tc = MLX5E_MAX_NUM_TC, +}; + +void *mlx5e_create_netdev(struct mlx5_core_dev *mdev, + const struct mlx5e_profile *profile, void *ppriv) +{ + struct net_device *netdev; + struct mlx5e_priv *priv; + int nch = profile->max_nch(mdev); + int err; + + netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), + nch * profile->max_tc, + nch); + if (!netdev) { + mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); + return NULL; } - err = mlx5e_create_tirs(priv); + profile->init(mdev, netdev, profile, ppriv); + + netif_carrier_off(netdev); + + priv = netdev_priv(netdev); + + priv->wq = create_singlethread_workqueue("mlx5e"); + if (!priv->wq) + goto err_free_netdev; + + err = mlx5e_create_umr_mkey(priv); if (err) { - mlx5_core_warn(mdev, "create tirs failed, %d\n", err); - goto err_destroy_rqts; + mlx5_core_err(mdev, "create umr mkey failed, %d\n", err); + goto err_destroy_wq; } - err = mlx5e_create_flow_steering(priv); + err = profile->init_tx(priv); + if (err) + goto err_destroy_umr_mkey; + + err = mlx5e_open_drop_rq(priv); if (err) { - mlx5_core_warn(mdev, "create flow steering failed, %d\n", err); - goto err_destroy_tirs; + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); + goto err_cleanup_tx; } + err = profile->init_rx(priv); + if (err) + goto err_close_drop_rq; + mlx5e_create_q_counter(priv); mlx5e_init_l2_addr(priv); - mlx5e_vxlan_init(priv); - - err = mlx5e_tc_init(priv); - if (err) - goto err_dealloc_q_counters; - -#ifdef CONFIG_MLX5_CORE_EN_DCB - mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets); -#endif + mlx5e_set_dev_port_mtu(netdev); err = register_netdev(netdev); if (err) { mlx5_core_err(mdev, "register_netdev failed, %d\n", err); - goto err_tc_cleanup; + goto err_dealloc_q_counters; } - if (mlx5e_vxlan_allowed(mdev)) { - rtnl_lock(); - vxlan_get_rx_port(netdev); - rtnl_unlock(); - } - - mlx5e_enable_async_events(priv); - queue_work(priv->wq, &priv->set_rx_mode_work); + if (profile->enable) + profile->enable(priv); return priv; -err_tc_cleanup: - mlx5e_tc_cleanup(priv); - err_dealloc_q_counters: mlx5e_destroy_q_counter(priv); - mlx5e_destroy_flow_steering(priv); - -err_destroy_tirs: - mlx5e_destroy_tirs(priv); - -err_destroy_rqts: - mlx5e_destroy_rqts(priv); + profile->cleanup_rx(priv); err_close_drop_rq: mlx5e_close_drop_rq(priv); -err_destroy_tises: - mlx5e_destroy_tises(priv); +err_cleanup_tx: + profile->cleanup_tx(priv); err_destroy_umr_mkey: mlx5_core_destroy_mkey(mdev, &priv->umr_mkey); -err_destroy_mkey: - mlx5_core_destroy_mkey(mdev, &priv->mkey); - -err_dealloc_transport_domain: - mlx5_core_dealloc_transport_domain(mdev, priv->tdn); - -err_dealloc_pd: - mlx5_core_dealloc_pd(mdev, priv->pdn); - -err_unmap_free_uar: - mlx5_unmap_free_uar(mdev, &priv->cq_uar); - err_destroy_wq: destroy_workqueue(priv->wq); @@ -3271,15 +3484,63 @@ err_free_netdev: return NULL; } -static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) +static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev) { - struct mlx5e_priv *priv = vpriv; + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int total_vfs = MLX5_TOTAL_VPORTS(mdev); + int vport; + u8 mac[ETH_ALEN]; + + if (!MLX5_CAP_GEN(mdev, vport_group_manager)) + return; + + mlx5_query_nic_vport_mac_address(mdev, 0, mac); + + for (vport = 1; vport < total_vfs; vport++) { + struct mlx5_eswitch_rep rep; + + rep.load = mlx5e_vport_rep_load; + rep.unload = mlx5e_vport_rep_unload; + rep.vport = vport; + ether_addr_copy(rep.hw_id, mac); + mlx5_eswitch_register_vport_rep(esw, &rep); + } +} + +static void *mlx5e_add(struct mlx5_core_dev *mdev) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + void *ppriv = NULL; + void *ret; + + if (mlx5e_check_required_hca_cap(mdev)) + return NULL; + + if (mlx5e_create_mdev_resources(mdev)) + return NULL; + + mlx5e_register_vport_rep(mdev); + + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + ppriv = &esw->offloads.vport_reps[0]; + + ret = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv); + if (!ret) { + mlx5e_destroy_mdev_resources(mdev); + return NULL; + } + return ret; +} + +void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv) +{ + const struct mlx5e_profile *profile = priv->profile; struct net_device *netdev = priv->netdev; set_bit(MLX5E_STATE_DESTROYING, &priv->state); + if (profile->disable) + profile->disable(priv); - queue_work(priv->wq, &priv->set_rx_mode_work); - mlx5e_disable_async_events(priv); flush_workqueue(priv->wq); if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { netif_device_detach(netdev); @@ -3288,26 +3549,35 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) unregister_netdev(netdev); } - mlx5e_tc_cleanup(priv); - mlx5e_vxlan_cleanup(priv); mlx5e_destroy_q_counter(priv); - mlx5e_destroy_flow_steering(priv); - mlx5e_destroy_tirs(priv); - mlx5e_destroy_rqts(priv); + profile->cleanup_rx(priv); mlx5e_close_drop_rq(priv); - mlx5e_destroy_tises(priv); + profile->cleanup_tx(priv); mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey); - mlx5_core_destroy_mkey(priv->mdev, &priv->mkey); - mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); - mlx5_core_dealloc_pd(priv->mdev, priv->pdn); - mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); cancel_delayed_work_sync(&priv->update_stats_work); destroy_workqueue(priv->wq); + if (profile->cleanup) + profile->cleanup(priv); if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) free_netdev(netdev); } +static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int total_vfs = MLX5_TOTAL_VPORTS(mdev); + struct mlx5e_priv *priv = vpriv; + int vport; + + mlx5e_destroy_netdev(mdev, priv); + + for (vport = 1; vport < total_vfs; vport++) + mlx5_eswitch_unregister_vport_rep(esw, vport); + + mlx5e_destroy_mdev_resources(mdev); +} + static void *mlx5e_get_netdev(void *vpriv) { struct mlx5e_priv *priv = vpriv; @@ -3316,8 +3586,8 @@ static void *mlx5e_get_netdev(void *vpriv) } static struct mlx5_interface mlx5e_interface = { - .add = mlx5e_create_netdev, - .remove = mlx5e_destroy_netdev, + .add = mlx5e_add, + .remove = mlx5e_remove, .event = mlx5e_async_event, .protocol = MLX5_INTERFACE_PROTOCOL_ETH, .get_dev = mlx5e_get_netdev, @@ -3325,6 +3595,7 @@ static struct mlx5_interface mlx5e_interface = { void mlx5e_init(void) { + mlx5e_build_ptys2ethtool_map(); mlx5_register_interface(&mlx5e_interface); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c new file mode 100644 index 000000000..134de4a11 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -0,0 +1,431 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include + +#include "eswitch.h" +#include "en.h" +#include "en_tc.h" + +static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; + +static void mlx5e_rep_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, mlx5e_rep_driver_name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); +} + +static const struct counter_desc sw_rep_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, +}; + +#define NUM_VPORT_REP_COUNTERS ARRAY_SIZE(sw_rep_stats_desc) + +static void mlx5e_rep_get_strings(struct net_device *dev, + u32 stringset, uint8_t *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++) + strcpy(data + (i * ETH_GSTRING_LEN), + sw_rep_stats_desc[i].format); + break; + } +} + +static void mlx5e_update_sw_rep_counters(struct mlx5e_priv *priv) +{ + struct mlx5e_sw_stats *s = &priv->stats.sw; + struct mlx5e_rq_stats *rq_stats; + struct mlx5e_sq_stats *sq_stats; + int i, j; + + memset(s, 0, sizeof(*s)); + for (i = 0; i < priv->params.num_channels; i++) { + rq_stats = &priv->channel[i]->rq.stats; + + s->rx_packets += rq_stats->packets; + s->rx_bytes += rq_stats->bytes; + + for (j = 0; j < priv->params.num_tc; j++) { + sq_stats = &priv->channel[i]->sq[j].stats; + + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; + } + } +} + +static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int i; + + if (!data) + return; + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_update_sw_rep_counters(priv); + mutex_unlock(&priv->state_lock); + + for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++) + data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, + sw_rep_stats_desc, i); +} + +static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return NUM_VPORT_REP_COUNTERS; + default: + return -EOPNOTSUPP; + } +} + +static const struct ethtool_ops mlx5e_rep_ethtool_ops = { + .get_drvinfo = mlx5e_rep_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = mlx5e_rep_get_strings, + .get_sset_count = mlx5e_rep_get_sset_count, + .get_ethtool_stats = mlx5e_rep_get_ethtool_stats, +}; + +int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + if (esw->mode == SRIOV_NONE) + return -EOPNOTSUPP; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + attr->u.ppid.id_len = ETH_ALEN; + ether_addr_copy(attr->u.ppid.id, rep->hw_id); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) + +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5e_channel *c; + int n, tc, err, num_sqs = 0; + u16 *sqs; + + sqs = kcalloc(priv->params.num_channels * priv->params.num_tc, sizeof(u16), GFP_KERNEL); + if (!sqs) + return -ENOMEM; + + for (n = 0; n < priv->params.num_channels; n++) { + c = priv->channel[n]; + for (tc = 0; tc < c->num_tc; tc++) + sqs[num_sqs++] = c->sq[tc].sqn; + } + + err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs); + + kfree(sqs); + return err; +} + +int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_priv *priv = rep->priv_data; + + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + return mlx5e_add_sqs_fwd_rules(priv); + return 0; +} + +void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_eswitch_rep *rep = priv->ppriv; + + mlx5_eswitch_sqs2vport_stop(esw, rep); +} + +void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_priv *priv = rep->priv_data; + + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_remove_sqs_fwd_rules(priv); + + /* clean (and re-init) existing uplink offloaded TC rules */ + mlx5e_tc_cleanup(priv); + mlx5e_tc_init(priv); +} + +static int mlx5e_rep_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_eswitch_rep *rep = priv->ppriv; + int ret; + + ret = snprintf(buf, len, "%d", rep->vport - 1); + if (ret >= len) + return -EOPNOTSUPP; + + return 0; +} + +static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle, + __be16 proto, struct tc_to_netdev *tc) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) + return -EOPNOTSUPP; + + switch (tc->type) { + case TC_SETUP_CLSFLOWER: + switch (tc->cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return mlx5e_configure_flower(priv, proto, tc->cls_flower); + case TC_CLSFLOWER_DESTROY: + return mlx5e_delete_flower(priv, tc->cls_flower); + case TC_CLSFLOWER_STATS: + return mlx5e_stats_flower(priv, tc->cls_flower); + } + default: + return -EOPNOTSUPP; + } +} + +static const struct switchdev_ops mlx5e_rep_switchdev_ops = { + .switchdev_port_attr_get = mlx5e_attr_get, +}; + +static const struct net_device_ops mlx5e_netdev_ops_rep = { + .ndo_open = mlx5e_open, + .ndo_stop = mlx5e_close, + .ndo_start_xmit = mlx5e_xmit, + .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, + .ndo_setup_tc = mlx5e_rep_ndo_setup_tc, + .ndo_get_stats64 = mlx5e_get_stats, +}; + +static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? + MLX5_CQ_PERIOD_MODE_START_FROM_CQE : + MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + + priv->params.log_sq_size = + MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; + priv->params.rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; + priv->params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; + + priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, + BIT(priv->params.log_rq_size)); + + priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); + mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode); + + priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); + priv->params.num_tc = 1; + + priv->params.lro_wqe_sz = + MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + + priv->mdev = mdev; + priv->netdev = netdev; + priv->params.num_channels = profile->max_nch(mdev); + priv->profile = profile; + priv->ppriv = ppriv; + + mutex_init(&priv->state_lock); + + INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); +} + +static void mlx5e_build_rep_netdev(struct net_device *netdev) +{ + netdev->netdev_ops = &mlx5e_netdev_ops_rep; + + netdev->watchdog_timeo = 15 * HZ; + + netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; + +#ifdef CONFIG_NET_SWITCHDEV + netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; +#endif + + netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC; + netdev->hw_features |= NETIF_F_HW_TC; + + eth_hw_addr_random(netdev); +} + +static void mlx5e_init_rep(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) +{ + mlx5e_build_rep_netdev_priv(mdev, netdev, profile, ppriv); + mlx5e_build_rep_netdev(netdev); +} + +static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_flow_rule *flow_rule; + int err; + int i; + + err = mlx5e_create_direct_rqts(priv); + if (err) { + mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err); + return err; + } + + err = mlx5e_create_direct_tirs(priv); + if (err) { + mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err); + goto err_destroy_direct_rqts; + } + + flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, + rep->vport, + priv->direct_tir[0].tirn); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + goto err_destroy_direct_tirs; + } + rep->vport_rx_rule = flow_rule; + + err = mlx5e_tc_init(priv); + if (err) + goto err_del_flow_rule; + + return 0; + +err_del_flow_rule: + mlx5_del_flow_rule(rep->vport_rx_rule); +err_destroy_direct_tirs: + mlx5e_destroy_direct_tirs(priv); +err_destroy_direct_rqts: + for (i = 0; i < priv->params.num_channels; i++) + mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); + return err; +} + +static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch_rep *rep = priv->ppriv; + int i; + + mlx5e_tc_cleanup(priv); + mlx5_del_flow_rule(rep->vport_rx_rule); + mlx5e_destroy_direct_tirs(priv); + for (i = 0; i < priv->params.num_channels; i++) + mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); +} + +static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) +{ + int err; + + err = mlx5e_create_tises(priv); + if (err) { + mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err); + return err; + } + return 0; +} + +static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev) +{ +#define MLX5E_PORT_REPRESENTOR_NCH 1 + return MLX5E_PORT_REPRESENTOR_NCH; +} + +static struct mlx5e_profile mlx5e_rep_profile = { + .init = mlx5e_init_rep, + .init_rx = mlx5e_init_rep_rx, + .cleanup_rx = mlx5e_cleanup_rep_rx, + .init_tx = mlx5e_init_rep_tx, + .cleanup_tx = mlx5e_cleanup_nic_tx, + .update_stats = mlx5e_update_sw_rep_counters, + .max_nch = mlx5e_get_rep_max_num_channels, + .max_tc = 1, +}; + +int mlx5e_vport_rep_load(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep) +{ + rep->priv_data = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep); + if (!rep->priv_data) { + pr_warn("Failed to create representor for vport %d\n", + rep->vport); + return -EINVAL; + } + return 0; +} + +void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_priv *priv = rep->priv_data; + + mlx5e_destroy_netdev(esw->dev, priv); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index e41a06675..e7c969df3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -324,9 +324,9 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev, } } -static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix) +static u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) { - return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS + + return rq->mpwqe_mtt_offset + wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); } @@ -340,7 +340,7 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5_wqe_data_seg *dseg = &wqe->data; struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); - u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix); + u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix); memset(wqe, 0, sizeof(*wqe)); cseg->opmod_idx_opcode = @@ -353,9 +353,9 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN; ucseg->klm_octowords = - cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE)); + cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE)); ucseg->bsf_octowords = - cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset)); + cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset)); ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); dseg->lkey = sq->mkey_be; @@ -423,7 +423,7 @@ static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq, { struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; int mtt_sz = mlx5e_get_wqe_mtt_sz(); - u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT; + u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT; int i; wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) * @@ -506,6 +506,12 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq) struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); + + if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) { + mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]); + return; + } + mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); rq->stats.mpwqe_frag++; @@ -595,26 +601,9 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) wi->free_wqe(rq, wi); } -void mlx5e_free_rx_descs(struct mlx5e_rq *rq) -{ - struct mlx5_wq_ll *wq = &rq->wq; - struct mlx5e_rx_wqe *wqe; - __be16 wqe_ix_be; - u16 wqe_ix; - - while (!mlx5_wq_ll_is_empty(wq)) { - wqe_ix_be = *wq->tail_next; - wqe_ix = be16_to_cpu(wqe_ix_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix); - rq->dealloc_wqe(rq, wqe_ix); - mlx5_wq_ll_pop(&rq->wq, wqe_ix_be, - &wqe->next.next_wqe_index); - } -} - #define RQ_CANNOT_POST(rq) \ - (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \ - test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) + (test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \ + test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) { @@ -924,7 +913,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); int work_done = 0; - if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state))) + if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) return 0; if (cq->decmprs_left) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c new file mode 100644 index 000000000..1fffe48a9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c @@ -0,0 +1,335 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "en.h" + +/* Adaptive moderation profiles */ +#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256 +#define MLX5E_RX_AM_DEF_PROFILE_CQE 1 +#define MLX5E_RX_AM_DEF_PROFILE_EQE 1 +#define MLX5E_PARAMS_AM_NUM_PROFILES 5 + +/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */ +#define MLX5_AM_EQE_PROFILES { \ + {1, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {8, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {64, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ +} + +#define MLX5_AM_CQE_PROFILES { \ + {2, 256}, \ + {8, 128}, \ + {16, 64}, \ + {32, 64}, \ + {64, 64} \ +} + +static const struct mlx5e_cq_moder +profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = { + MLX5_AM_EQE_PROFILES, + MLX5_AM_CQE_PROFILES, +}; + +static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix) +{ + return profile[cq_period_mode][ix]; +} + +struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode) +{ + int default_profile_ix; + + if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) + default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE; + else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */ + default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE; + + return profile[rx_cq_period_mode][default_profile_ix]; +} + +/* Adaptive moderation logic */ +enum { + MLX5E_AM_START_MEASURE, + MLX5E_AM_MEASURE_IN_PROGRESS, + MLX5E_AM_APPLY_NEW_PROFILE, +}; + +enum { + MLX5E_AM_PARKING_ON_TOP, + MLX5E_AM_PARKING_TIRED, + MLX5E_AM_GOING_RIGHT, + MLX5E_AM_GOING_LEFT, +}; + +enum { + MLX5E_AM_STATS_WORSE, + MLX5E_AM_STATS_SAME, + MLX5E_AM_STATS_BETTER, +}; + +enum { + MLX5E_AM_STEPPED, + MLX5E_AM_TOO_TIRED, + MLX5E_AM_ON_EDGE, +}; + +static bool mlx5e_am_on_top(struct mlx5e_rx_am *am) +{ + switch (am->tune_state) { + case MLX5E_AM_PARKING_ON_TOP: + case MLX5E_AM_PARKING_TIRED: + WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n"); + return true; + case MLX5E_AM_GOING_RIGHT: + return (am->steps_left > 1) && (am->steps_right == 1); + default: /* MLX5E_AM_GOING_LEFT */ + return (am->steps_right > 1) && (am->steps_left == 1); + } +} + +static void mlx5e_am_turn(struct mlx5e_rx_am *am) +{ + switch (am->tune_state) { + case MLX5E_AM_PARKING_ON_TOP: + case MLX5E_AM_PARKING_TIRED: + WARN_ONCE(true, "mlx5e_am_turn: PARKING\n"); + break; + case MLX5E_AM_GOING_RIGHT: + am->tune_state = MLX5E_AM_GOING_LEFT; + am->steps_left = 0; + break; + case MLX5E_AM_GOING_LEFT: + am->tune_state = MLX5E_AM_GOING_RIGHT; + am->steps_right = 0; + break; + } +} + +static int mlx5e_am_step(struct mlx5e_rx_am *am) +{ + if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2)) + return MLX5E_AM_TOO_TIRED; + + switch (am->tune_state) { + case MLX5E_AM_PARKING_ON_TOP: + case MLX5E_AM_PARKING_TIRED: + WARN_ONCE(true, "mlx5e_am_step: PARKING\n"); + break; + case MLX5E_AM_GOING_RIGHT: + if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1)) + return MLX5E_AM_ON_EDGE; + am->profile_ix++; + am->steps_right++; + break; + case MLX5E_AM_GOING_LEFT: + if (am->profile_ix == 0) + return MLX5E_AM_ON_EDGE; + am->profile_ix--; + am->steps_left++; + break; + } + + am->tired++; + return MLX5E_AM_STEPPED; +} + +static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am) +{ + am->steps_right = 0; + am->steps_left = 0; + am->tired = 0; + am->tune_state = MLX5E_AM_PARKING_ON_TOP; +} + +static void mlx5e_am_park_tired(struct mlx5e_rx_am *am) +{ + am->steps_right = 0; + am->steps_left = 0; + am->tune_state = MLX5E_AM_PARKING_TIRED; +} + +static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am) +{ + am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT : + MLX5E_AM_GOING_RIGHT; + mlx5e_am_step(am); +} + +static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, + struct mlx5e_rx_am_stats *prev) +{ + int diff; + + if (!prev->ppms) + return curr->ppms ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_SAME; + + diff = curr->ppms - prev->ppms; + if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */ + return (diff > 0) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; + + if (!prev->epms) + return curr->epms ? MLX5E_AM_STATS_WORSE : + MLX5E_AM_STATS_SAME; + + diff = curr->epms - prev->epms; + if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */ + return (diff < 0) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; + + return MLX5E_AM_STATS_SAME; +} + +static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats, + struct mlx5e_rx_am *am) +{ + int prev_state = am->tune_state; + int prev_ix = am->profile_ix; + int stats_res; + int step_res; + + switch (am->tune_state) { + case MLX5E_AM_PARKING_ON_TOP: + stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats); + if (stats_res != MLX5E_AM_STATS_SAME) + mlx5e_am_exit_parking(am); + break; + + case MLX5E_AM_PARKING_TIRED: + am->tired--; + if (!am->tired) + mlx5e_am_exit_parking(am); + break; + + case MLX5E_AM_GOING_RIGHT: + case MLX5E_AM_GOING_LEFT: + stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats); + if (stats_res != MLX5E_AM_STATS_BETTER) + mlx5e_am_turn(am); + + if (mlx5e_am_on_top(am)) { + mlx5e_am_park_on_top(am); + break; + } + + step_res = mlx5e_am_step(am); + switch (step_res) { + case MLX5E_AM_ON_EDGE: + mlx5e_am_park_on_top(am); + break; + case MLX5E_AM_TOO_TIRED: + mlx5e_am_park_tired(am); + break; + } + + break; + } + + if ((prev_state != MLX5E_AM_PARKING_ON_TOP) || + (am->tune_state != MLX5E_AM_PARKING_ON_TOP)) + am->prev_stats = *curr_stats; + + return am->profile_ix != prev_ix; +} + +static void mlx5e_am_sample(struct mlx5e_rq *rq, + struct mlx5e_rx_am_sample *s) +{ + s->time = ktime_get(); + s->pkt_ctr = rq->stats.packets; + s->event_ctr = rq->cq.event_ctr; +} + +#define MLX5E_AM_NEVENTS 64 + +static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, + struct mlx5e_rx_am_sample *end, + struct mlx5e_rx_am_stats *curr_stats) +{ + /* u32 holds up to 71 minutes, should be enough */ + u32 delta_us = ktime_us_delta(end->time, start->time); + unsigned int npkts = end->pkt_ctr - start->pkt_ctr; + + if (!delta_us) { + WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n"); + return; + } + + curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us; + curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us; +} + +void mlx5e_rx_am_work(struct work_struct *work) +{ + struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am, + work); + struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am); + struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix]; + + mlx5_core_modify_cq_moderation(rq->priv->mdev, &rq->cq.mcq, + cur_profile.usec, cur_profile.pkts); + + am->state = MLX5E_AM_START_MEASURE; +} + +void mlx5e_rx_am(struct mlx5e_rq *rq) +{ + struct mlx5e_rx_am *am = &rq->am; + struct mlx5e_rx_am_sample end_sample; + struct mlx5e_rx_am_stats curr_stats; + u16 nevents; + + switch (am->state) { + case MLX5E_AM_MEASURE_IN_PROGRESS: + nevents = rq->cq.event_ctr - am->start_sample.event_ctr; + if (nevents < MLX5E_AM_NEVENTS) + break; + mlx5e_am_sample(rq, &end_sample); + mlx5e_am_calc_stats(&am->start_sample, &end_sample, + &curr_stats); + if (mlx5e_am_decision(&curr_stats, am)) { + am->state = MLX5E_AM_APPLY_NEW_PROFILE; + schedule_work(&am->work); + break; + } + /* fall through */ + case MLX5E_AM_START_MEASURE: + mlx5e_am_sample(rq, &am->start_sample); + am->state = MLX5E_AM_MEASURE_IN_PROGRESS; + break; + case MLX5E_AM_APPLY_NEW_PROFILE: + break; + } +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index fcd490cc5..499487ce3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -70,6 +70,7 @@ struct mlx5e_sw_stats { u64 tx_queue_stopped; u64 tx_queue_wake; u64 tx_queue_dropped; + u64 tx_xmit_more; u64 rx_wqe_err; u64 rx_mpwqe_filler; u64 rx_mpwqe_frag; @@ -101,6 +102,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) }, @@ -151,6 +153,22 @@ static const struct counter_desc vport_stats_desc[] = { VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) }, { "tx_vport_broadcast_bytes", VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) }, + { "rx_vport_rdma_unicast_packets", + VPORT_COUNTER_OFF(received_ib_unicast.packets) }, + { "rx_vport_rdma_unicast_bytes", + VPORT_COUNTER_OFF(received_ib_unicast.octets) }, + { "tx_vport_rdma_unicast_packets", + VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) }, + { "tx_vport_rdma_unicast_bytes", + VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) }, + { "rx_vport_rdma_multicast_packets", + VPORT_COUNTER_OFF(received_ib_multicast.packets) }, + { "rx_vport_rdma_multicast_bytes", + VPORT_COUNTER_OFF(received_ib_multicast.octets) }, + { "tx_vport_rdma_multicast_packets", + VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) }, + { "tx_vport_rdma_multicast_bytes", + VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) }, }; #define PPORT_802_3_OFF(c) \ @@ -238,11 +256,12 @@ static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { }; static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { - { "rx_prio%d_pause", PPORT_PER_PRIO_OFF(rx_pause) }, - { "rx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) }, - { "tx_prio%d_pause", PPORT_PER_PRIO_OFF(tx_pause) }, - { "tx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) }, - { "rx_prio%d_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, + /* %s is "global" or "prio{i}" */ + { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) }, + { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) }, + { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) }, + { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) }, + { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, }; struct mlx5e_rq_stats { @@ -281,6 +300,7 @@ struct mlx5e_sq_stats { /* commonly accessed in data path */ u64 packets; u64 bytes; + u64 xmit_more; u64 tso_packets; u64 tso_bytes; u64 tso_inner_packets; @@ -307,6 +327,7 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, }; #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 0db51cc39..22cfc4ac1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -37,8 +37,11 @@ #include #include #include +#include +#include #include "en.h" #include "en_tc.h" +#include "eswitch.h" struct mlx5e_tc_flow { struct rhash_head node; @@ -49,9 +52,9 @@ struct mlx5e_tc_flow { #define MLX5E_TC_TABLE_NUM_ENTRIES 1024 #define MLX5E_TC_TABLE_NUM_GROUPS 4 -static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, - u32 *match_c, u32 *match_v, - u32 action, u32 flow_tag) +static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + u32 action, u32 flow_tag) { struct mlx5_core_dev *dev = priv->mdev; struct mlx5_flow_destination dest = { 0 }; @@ -62,7 +65,7 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.ft = priv->fs.vlan.ft.t; - } else { + } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { counter = mlx5_fc_create(dev, true); if (IS_ERR(counter)) return ERR_CAST(counter); @@ -88,8 +91,8 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, table_created = true; } - rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS, - match_c, match_v, + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + rule = mlx5_add_flow_rule(priv->fs.tc.t, spec, action, flow_tag, &dest); @@ -109,6 +112,22 @@ err_create_ft: return rule; } +static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + u32 action, u32 dst_vport) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_eswitch_rep *rep = priv->ppriv; + u32 src_vport; + + if (rep->vport) /* set source vport for the flow */ + src_vport = rep->vport; + else + src_vport = FDB_UPLINK_VPORT; + + return mlx5_eswitch_add_offloaded_rule(esw, spec, action, src_vport, dst_vport); +} + static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, struct mlx5_flow_rule *rule) { @@ -120,18 +139,19 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, mlx5_fc_destroy(priv->mdev, counter); - if (!mlx5e_tc_num_filters(priv)) { + if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { mlx5_destroy_flow_table(priv->fs.tc.t); priv->fs.tc.t = NULL; } } -static int parse_cls_flower(struct mlx5e_priv *priv, - u32 *match_c, u32 *match_v, +static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f) { - void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers); - void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers); + void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers); + void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers); u16 addr_type = 0; u8 ip_proto = 0; @@ -294,10 +314,11 @@ static int parse_cls_flower(struct mlx5e_priv *priv, return 0; } -static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, - u32 *action, u32 *flow_tag) +static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, + u32 *action, u32 *flow_tag) { const struct tc_action *a; + LIST_HEAD(actions); if (tc_no_actions(exts)) return -EINVAL; @@ -305,7 +326,8 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; *action = 0; - tc_for_each_action(a, exts) { + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { /* Only support a single action per rule */ if (*action) return -EINVAL; @@ -338,17 +360,68 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return 0; } +static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, + u32 *action, u32 *dest_vport) +{ + const struct tc_action *a; + LIST_HEAD(actions); + + if (tc_no_actions(exts)) + return -EINVAL; + + *action = 0; + + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { + /* Only support a single action per rule */ + if (*action) + return -EINVAL; + + if (is_tcf_gact_shot(a)) { + *action = MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + continue; + } + + if (is_tcf_mirred_redirect(a)) { + int ifindex = tcf_mirred_ifindex(a); + struct net_device *out_dev; + struct mlx5e_priv *out_priv; + struct mlx5_eswitch_rep *out_rep; + + out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex); + + if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) { + pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", + priv->netdev->name, out_dev->name); + return -EINVAL; + } + + out_priv = netdev_priv(out_dev); + out_rep = out_priv->ppriv; + if (out_rep->vport == 0) + *dest_vport = FDB_UPLINK_VPORT; + else + *dest_vport = out_rep->vport; + *action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + continue; + } + + return -EINVAL; + } + return 0; +} + int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, struct tc_cls_flower_offload *f) { struct mlx5e_tc_table *tc = &priv->fs.tc; - u32 *match_c; - u32 *match_v; int err = 0; - u32 flow_tag; - u32 action; + u32 flow_tag, action, dest_vport = 0; struct mlx5e_tc_flow *flow; + struct mlx5_flow_spec *spec; struct mlx5_flow_rule *old = NULL; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, tc->ht_params); @@ -357,49 +430,53 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, else flow = kzalloc(sizeof(*flow), GFP_KERNEL); - match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); - match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); - if (!match_c || !match_v || !flow) { + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec || !flow) { err = -ENOMEM; goto err_free; } flow->cookie = f->cookie; - err = parse_cls_flower(priv, match_c, match_v, f); + err = parse_cls_flower(priv, spec, f); if (err < 0) goto err_free; - err = parse_tc_actions(priv, f->exts, &action, &flow_tag); - if (err < 0) + if (esw && esw->mode == SRIOV_OFFLOADS) { + err = parse_tc_fdb_actions(priv, f->exts, &action, &dest_vport); + if (err < 0) + goto err_free; + flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, action, dest_vport); + } else { + err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag); + if (err < 0) + goto err_free; + flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag); + } + + if (IS_ERR(flow->rule)) { + err = PTR_ERR(flow->rule); goto err_free; + } err = rhashtable_insert_fast(&tc->ht, &flow->node, tc->ht_params); if (err) - goto err_free; - - flow->rule = mlx5e_tc_add_flow(priv, match_c, match_v, action, - flow_tag); - if (IS_ERR(flow->rule)) { - err = PTR_ERR(flow->rule); - goto err_hash_del; - } + goto err_del_rule; if (old) mlx5e_tc_del_flow(priv, old); goto out; -err_hash_del: - rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params); +err_del_rule: + mlx5_del_flow_rule(flow->rule); err_free: if (!old) kfree(flow); out: - kfree(match_c); - kfree(match_v); + kvfree(spec); return err; } @@ -430,6 +507,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow; struct tc_action *a; struct mlx5_fc *counter; + LIST_HEAD(actions); u64 bytes; u64 packets; u64 lastuse; @@ -445,7 +523,8 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); - tc_for_each_action(a, f->exts) + tcf_exts_to_list(f->exts, &actions); + list_for_each_entry(a, &actions, list) tcf_action_stats_update(a, bytes, packets, lastuse); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 5740b465e..eb0e72537 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -128,6 +128,50 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, return priv->channeltc_to_txq_map[channel_ix][up]; } +static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) +{ +#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN) + + return max(skb_network_offset(skb), MLX5E_MIN_INLINE); +} + +static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb) +{ + struct flow_keys keys; + + if (skb_transport_header_was_set(skb)) + return skb_transport_offset(skb); + else if (skb_flow_dissect_flow_keys(skb, &keys, 0)) + return keys.control.thoff; + else + return mlx5e_skb_l2_header_offset(skb); +} + +static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode, + struct sk_buff *skb) +{ + int hlen; + + switch (mode) { + case MLX5_INLINE_MODE_TCP_UDP: + hlen = eth_get_headlen(skb->data, skb_headlen(skb)); + if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) + hlen += VLAN_HLEN; + return hlen; + case MLX5_INLINE_MODE_IP: + /* When transport header is set to zero, it means no transport + * header. When transport header is set to 0xff's, it means + * transport header wasn't set. + */ + if (skb_transport_offset(skb)) + return mlx5e_skb_l3_header_offset(skb); + /* fall through */ + case MLX5_INLINE_MODE_L2: + default: + return mlx5e_skb_l2_header_offset(skb); + } +} + static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, struct sk_buff *skb, bool bf) { @@ -135,8 +179,6 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, * headers and occur before the data gather. * Therefore these headers must be copied into the WQE */ -#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN) - if (bf) { u16 ihs = skb_headlen(skb); @@ -146,8 +188,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, if (ihs <= sq->max_inline) return skb_headlen(skb); } - - return max(skb_network_offset(skb), MLX5E_MIN_INLINE); + return mlx5e_calc_min_inline(sq->min_inline_mode, skb); } static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, @@ -315,6 +356,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) sq->stats.stopped++; } + sq->stats.xmit_more += skb->xmit_more; if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { int bf_sz = 0; @@ -353,35 +395,6 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) return mlx5e_sq_xmit(sq, skb); } -void mlx5e_free_tx_descs(struct mlx5e_sq *sq) -{ - struct mlx5e_tx_wqe_info *wi; - struct sk_buff *skb; - u16 ci; - int i; - - while (sq->cc != sq->pc) { - ci = sq->cc & sq->wq.sz_m1; - skb = sq->skb[ci]; - wi = &sq->wqe_info[ci]; - - if (!skb) { /* nop */ - sq->cc++; - continue; - } - - for (i = 0; i < wi->num_dma; i++) { - struct mlx5e_sq_dma *dma = - mlx5e_dma_get(sq, sq->dma_fifo_cc++); - - mlx5e_tx_dma_unmap(sq->pdev, dma); - } - - dev_kfree_skb_any(skb); - sq->cc += wi->num_wqebbs; - } -} - bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) { struct mlx5e_sq *sq; @@ -393,7 +406,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) sq = container_of(cq, struct mlx5e_sq, cq); - if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state))) + if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state))) return false; npkts = 0; @@ -471,11 +484,39 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) netdev_tx_completed_queue(sq->txq, npkts, nbytes); if (netif_tx_queue_stopped(sq->txq) && - mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) && - likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) { - netif_tx_wake_queue(sq->txq); - sq->stats.wake++; + mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) { + netif_tx_wake_queue(sq->txq); + sq->stats.wake++; } return (i == MLX5E_TX_CQ_POLL_BUDGET); } + +void mlx5e_free_tx_descs(struct mlx5e_sq *sq) +{ + struct mlx5e_tx_wqe_info *wi; + struct sk_buff *skb; + u16 ci; + int i; + + while (sq->cc != sq->pc) { + ci = sq->cc & sq->wq.sz_m1; + skb = sq->skb[ci]; + wi = &sq->wqe_info[ci]; + + if (!skb) { /* nop */ + sq->cc++; + continue; + } + + for (i = 0; i < wi->num_dma; i++) { + struct mlx5e_sq_dma *dma = + mlx5e_dma_get(sq, sq->dma_fifo_cc++); + + mlx5e_tx_dma_unmap(sq->pdev, dma); + } + + dev_kfree_skb_any(skb); + sq->cc += wi->num_wqebbs; + } +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index c38781fa5..9bf33bb69 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -51,16 +51,18 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq) static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) { + struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq); struct mlx5_wq_cyc *wq; struct mlx5_cqe64 *cqe; - struct mlx5e_sq *sq; u16 sqcc; + if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state))) + return; + cqe = mlx5e_get_cqe(cq); if (likely(!cqe)) return; - sq = container_of(cq, struct mlx5e_sq, cq); wq = &sq->wq; /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), @@ -136,6 +138,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) for (i = 0; i < c->num_tc; i++) mlx5e_cq_arm(&c->sq[i].cq); + + if (test_bit(MLX5E_RQ_STATE_AM, &c->rq.state)) + mlx5e_rx_am(&c->rq); + mlx5e_cq_arm(&c->rq.cq); mlx5e_cq_arm(&c->icosq.cq); @@ -146,6 +152,7 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq) { struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); + cq->event_ctr++; set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags); napi_schedule(cq->napi); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index aebbd6ccb..b247949df 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -40,17 +40,6 @@ #define UPLINK_VPORT 0xFFFF -#define MLX5_DEBUG_ESWITCH_MASK BIT(3) - -#define esw_info(dev, format, ...) \ - pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) - -#define esw_warn(dev, format, ...) \ - pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) - -#define esw_debug(dev, format, ...) \ - mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) - enum { MLX5_ACTION_NONE = 0, MLX5_ACTION_ADD = 1, @@ -92,6 +81,9 @@ enum { MC_ADDR_CHANGE | \ PROMISC_CHANGE) +int esw_offloads_init(struct mlx5_eswitch *esw, int nvports); +void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports); + static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, u32 events_mask) { @@ -337,25 +329,23 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, MLX5_MATCH_OUTER_HEADERS); struct mlx5_flow_rule *flow_rule = NULL; struct mlx5_flow_destination dest; + struct mlx5_flow_spec *spec; void *mv_misc = NULL; void *mc_misc = NULL; u8 *dmac_v = NULL; u8 *dmac_c = NULL; - u32 *match_v; - u32 *match_c; if (rx_rule) match_header |= MLX5_MATCH_MISC_PARAMETERS; - match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); - match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); - if (!match_v || !match_c) { + + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { pr_warn("FDB: Failed to alloc match parameters\n"); - goto out; + return NULL; } - - dmac_v = MLX5_ADDR_OF(fte_match_param, match_v, + dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.dmac_47_16); - dmac_c = MLX5_ADDR_OF(fte_match_param, match_c, + dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers.dmac_47_16); if (match_header & MLX5_MATCH_OUTER_HEADERS) { @@ -364,8 +354,10 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, } if (match_header & MLX5_MATCH_MISC_PARAMETERS) { - mv_misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters); - mc_misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); + mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); + mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters); MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT); MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port); } @@ -376,11 +368,9 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, esw_debug(esw->dev, "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", dmac_v, dmac_c, vport); + spec->match_criteria_enable = match_header; flow_rule = - mlx5_add_flow_rule(esw->fdb_table.fdb, - match_header, - match_c, - match_v, + mlx5_add_flow_rule(esw->fdb_table.fdb, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 0, &dest); if (IS_ERR(flow_rule)) { @@ -389,9 +379,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); flow_rule = NULL; } -out: - kfree(match_v); - kfree(match_c); + + kvfree(spec); return flow_rule; } @@ -428,7 +417,7 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport) return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v); } -static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) +static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_core_dev *dev = esw->dev; @@ -479,7 +468,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) esw_warn(dev, "Failed to create flow group err(%d)\n", err); goto out; } - esw->fdb_table.addr_grp = g; + esw->fdb_table.legacy.addr_grp = g; /* Allmulti group : One rule that forwards any mcast traffic */ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, @@ -494,7 +483,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); goto out; } - esw->fdb_table.allmulti_grp = g; + esw->fdb_table.legacy.allmulti_grp = g; /* Promiscuous group : * One rule that forward all unmatched traffic from previous groups @@ -511,17 +500,17 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); goto out; } - esw->fdb_table.promisc_grp = g; + esw->fdb_table.legacy.promisc_grp = g; out: if (err) { - if (!IS_ERR_OR_NULL(esw->fdb_table.allmulti_grp)) { - mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp); - esw->fdb_table.allmulti_grp = NULL; + if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) { + mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp); + esw->fdb_table.legacy.allmulti_grp = NULL; } - if (!IS_ERR_OR_NULL(esw->fdb_table.addr_grp)) { - mlx5_destroy_flow_group(esw->fdb_table.addr_grp); - esw->fdb_table.addr_grp = NULL; + if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) { + mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); + esw->fdb_table.legacy.addr_grp = NULL; } if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) { mlx5_destroy_flow_table(esw->fdb_table.fdb); @@ -533,20 +522,20 @@ out: return err; } -static void esw_destroy_fdb_table(struct mlx5_eswitch *esw) +static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw) { if (!esw->fdb_table.fdb) return; esw_debug(esw->dev, "Destroy FDB Table\n"); - mlx5_destroy_flow_group(esw->fdb_table.promisc_grp); - mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp); - mlx5_destroy_flow_group(esw->fdb_table.addr_grp); + mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp); + mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp); + mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); mlx5_destroy_flow_table(esw->fdb_table.fdb); esw->fdb_table.fdb = NULL; - esw->fdb_table.addr_grp = NULL; - esw->fdb_table.allmulti_grp = NULL; - esw->fdb_table.promisc_grp = NULL; + esw->fdb_table.legacy.addr_grp = NULL; + esw->fdb_table.legacy.allmulti_grp = NULL; + esw->fdb_table.legacy.promisc_grp = NULL; } /* E-Switch vport UC/MC lists management */ @@ -578,7 +567,8 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) if (err) goto abort; - if (esw->fdb_table.fdb) /* SRIOV is enabled: Forward UC MAC to vport */ + /* SRIOV is enabled: Forward UC MAC to vport */ + if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY) vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n", @@ -1300,9 +1290,8 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, static int esw_vport_ingress_config(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + struct mlx5_flow_spec *spec; u8 smac[ETH_ALEN]; - u32 *match_v; - u32 *match_c; int err = 0; u8 *smac_v; @@ -1336,9 +1325,8 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", vport->vport, vport->vlan, vport->qos); - match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); - match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); - if (!match_v || !match_c) { + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { err = -ENOMEM; esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n", vport->vport, err); @@ -1346,22 +1334,20 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, } if (vport->vlan || vport->qos) - MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); if (vport->spoofchk) { - MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_47_16); - MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_15_0); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0); smac_v = MLX5_ADDR_OF(fte_match_param, - match_v, + spec->match_value, outer_headers.smac_47_16); ether_addr_copy(smac_v, smac); } + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; vport->ingress.allow_rule = - mlx5_add_flow_rule(vport->ingress.acl, - MLX5_MATCH_OUTER_HEADERS, - match_c, - match_v, + mlx5_add_flow_rule(vport->ingress.acl, spec, MLX5_FLOW_CONTEXT_ACTION_ALLOW, 0, NULL); if (IS_ERR(vport->ingress.allow_rule)) { @@ -1372,13 +1358,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, goto out; } - memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param)); - memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param)); + memset(spec, 0, sizeof(*spec)); vport->ingress.drop_rule = - mlx5_add_flow_rule(vport->ingress.acl, - 0, - match_c, - match_v, + mlx5_add_flow_rule(vport->ingress.acl, spec, MLX5_FLOW_CONTEXT_ACTION_DROP, 0, NULL); if (IS_ERR(vport->ingress.drop_rule)) { @@ -1392,17 +1374,14 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, out: if (err) esw_vport_cleanup_ingress_rules(esw, vport); - - kfree(match_v); - kfree(match_c); + kvfree(spec); return err; } static int esw_vport_egress_config(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { - u32 *match_v; - u32 *match_c; + struct mlx5_flow_spec *spec; int err = 0; esw_vport_cleanup_egress_rules(esw, vport); @@ -1418,9 +1397,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", vport->vport, vport->vlan, vport->qos); - match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); - match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); - if (!match_v || !match_c) { + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { err = -ENOMEM; esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n", vport->vport, err); @@ -1428,16 +1406,14 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, } /* Allowed vlan rule */ - MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag); - MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag); - MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid); - MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->vlan); + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; vport->egress.allowed_vlan = - mlx5_add_flow_rule(vport->egress.acl, - MLX5_MATCH_OUTER_HEADERS, - match_c, - match_v, + mlx5_add_flow_rule(vport->egress.acl, spec, MLX5_FLOW_CONTEXT_ACTION_ALLOW, 0, NULL); if (IS_ERR(vport->egress.allowed_vlan)) { @@ -1449,13 +1425,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, } /* Drop others rule (star rule) */ - memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param)); - memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param)); + memset(spec, 0, sizeof(*spec)); vport->egress.drop_rule = - mlx5_add_flow_rule(vport->egress.acl, - 0, - match_c, - match_v, + mlx5_add_flow_rule(vport->egress.acl, spec, MLX5_FLOW_CONTEXT_ACTION_DROP, 0, NULL); if (IS_ERR(vport->egress.drop_rule)) { @@ -1465,8 +1437,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, vport->egress.drop_rule = NULL; } out: - kfree(match_v); - kfree(match_c); + kvfree(spec); return err; } @@ -1480,7 +1451,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); - if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */ + /* Only VFs need ACLs for VST and spoofchk filtering */ + if (vport_num && esw->mode == SRIOV_LEGACY) { esw_vport_ingress_config(esw, vport); esw_vport_egress_config(esw, vport); } @@ -1531,7 +1503,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) */ esw_vport_change_handle_locked(vport); vport->enabled_events = 0; - if (vport_num) { + if (vport_num && esw->mode == SRIOV_LEGACY) { esw_vport_disable_egress_acl(esw, vport); esw_vport_disable_ingress_acl(esw, vport); } @@ -1540,10 +1512,10 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) } /* Public E-Switch API */ -int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs) +int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { int err; - int i; + int i, enabled_events; if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) @@ -1561,16 +1533,20 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs) if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support)) esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n"); - esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs); - + esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode); + esw->mode = mode; esw_disable_vport(esw, 0); - err = esw_create_fdb_table(esw, nvfs + 1); + if (mode == SRIOV_LEGACY) + err = esw_create_legacy_fdb_table(esw, nvfs + 1); + else + err = esw_offloads_init(esw, nvfs + 1); if (err) goto abort; + enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE; for (i = 0; i <= nvfs; i++) - esw_enable_vport(esw, i, SRIOV_VPORT_EVENTS); + esw_enable_vport(esw, i, enabled_events); esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n", esw->enabled_vports); @@ -1578,22 +1554,25 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs) abort: esw_enable_vport(esw, 0, UC_ADDR_CHANGE); + esw->mode = SRIOV_NONE; return err; } void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) { struct esw_mc_addr *mc_promisc; + int nvports; int i; if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) return; - esw_info(esw->dev, "disable SRIOV: active vports(%d)\n", - esw->enabled_vports); + esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n", + esw->enabled_vports, esw->mode); mc_promisc = esw->mc_promisc; + nvports = esw->enabled_vports; for (i = 0; i < esw->total_vports; i++) esw_disable_vport(esw, i); @@ -1601,8 +1580,12 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) if (mc_promisc && mc_promisc->uplink_rule) mlx5_del_flow_rule(mc_promisc->uplink_rule); - esw_destroy_fdb_table(esw); + if (esw->mode == SRIOV_LEGACY) + esw_destroy_legacy_fdb_table(esw); + else if (esw->mode == SRIOV_OFFLOADS) + esw_offloads_cleanup(esw, nvports); + esw->mode = SRIOV_NONE; /* VPORT 0 (PF) must be enabled back with non-sriov configuration */ esw_enable_vport(esw, 0, UC_ADDR_CHANGE); } @@ -1660,6 +1643,14 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) goto abort; } + esw->offloads.vport_reps = + kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep), + GFP_KERNEL); + if (!esw->offloads.vport_reps) { + err = -ENOMEM; + goto abort; + } + mutex_init(&esw->state_lock); for (vport_num = 0; vport_num < total_vports; vport_num++) { @@ -1673,6 +1664,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) esw->total_vports = total_vports; esw->enabled_vports = 0; + esw->mode = SRIOV_NONE; dev->priv.eswitch = esw; esw_enable_vport(esw, 0, UC_ADDR_CHANGE); @@ -1683,6 +1675,7 @@ abort: destroy_workqueue(esw->work_queue); kfree(esw->l2_table.bitmap); kfree(esw->vports); + kfree(esw->offloads.vport_reps); kfree(esw); return err; } @@ -1700,6 +1693,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) destroy_workqueue(esw->work_queue); kfree(esw->l2_table.bitmap); kfree(esw->mc_promisc); + kfree(esw->offloads.vport_reps); kfree(esw->vports); kfree(esw); } @@ -1775,7 +1769,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, vport, err); mutex_lock(&esw->state_lock); - if (evport->enabled) + if (evport->enabled && esw->mode == SRIOV_LEGACY) err = esw_vport_ingress_config(esw, evport); mutex_unlock(&esw->state_lock); return err; @@ -1847,7 +1841,7 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, mutex_lock(&esw->state_lock); evport->vlan = vlan; evport->qos = qos; - if (evport->enabled) { + if (evport->enabled && esw->mode == SRIOV_LEGACY) { err = esw_vport_ingress_config(esw, evport); if (err) goto out; @@ -1876,10 +1870,11 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, mutex_lock(&esw->state_lock); pschk = evport->spoofchk; evport->spoofchk = spoofchk; - if (evport->enabled) + if (evport->enabled && esw->mode == SRIOV_LEGACY) { err = esw_vport_ingress_config(esw, evport); - if (err) - evport->spoofchk = pschk; + if (err) + evport->spoofchk = pschk; + } mutex_unlock(&esw->state_lock); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index fd6800256..a96140971 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -35,6 +35,7 @@ #include #include +#include #include #define MLX5_MAX_UC_PER_VPORT(dev) \ @@ -46,6 +47,8 @@ #define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE)) #define MLX5_L2_ADDR_HASH(addr) (addr[5]) +#define FDB_UPLINK_VPORT 0xffff + /* L2 -mac address based- hash helpers */ struct l2addr_node { struct hlist_node hlist; @@ -134,9 +137,50 @@ struct mlx5_l2_table { struct mlx5_eswitch_fdb { void *fdb; - struct mlx5_flow_group *addr_grp; - struct mlx5_flow_group *allmulti_grp; - struct mlx5_flow_group *promisc_grp; + union { + struct legacy_fdb { + struct mlx5_flow_group *addr_grp; + struct mlx5_flow_group *allmulti_grp; + struct mlx5_flow_group *promisc_grp; + } legacy; + + struct offloads_fdb { + struct mlx5_flow_table *fdb; + struct mlx5_flow_group *send_to_vport_grp; + struct mlx5_flow_group *miss_grp; + struct mlx5_flow_rule *miss_rule; + } offloads; + }; +}; + +enum { + SRIOV_NONE, + SRIOV_LEGACY, + SRIOV_OFFLOADS +}; + +struct mlx5_esw_sq { + struct mlx5_flow_rule *send_to_vport_rule; + struct list_head list; +}; + +struct mlx5_eswitch_rep { + int (*load)(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep); + void (*unload)(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep); + u16 vport; + struct mlx5_flow_rule *vport_rx_rule; + void *priv_data; + struct list_head vport_sqs_list; + bool valid; + u8 hw_id[ETH_ALEN]; +}; + +struct mlx5_esw_offload { + struct mlx5_flow_table *ft_offloads; + struct mlx5_flow_group *vport_rx_group; + struct mlx5_eswitch_rep *vport_reps; }; struct mlx5_eswitch { @@ -153,13 +197,15 @@ struct mlx5_eswitch { */ struct mutex state_lock; struct esw_mc_addr *mc_promisc; + struct mlx5_esw_offload offloads; + int mode; }; /* E-Switch API */ int mlx5_eswitch_init(struct mlx5_core_dev *dev); void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe); -int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs); +int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode); void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw); int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, int vport, u8 mac[ETH_ALEN]); @@ -177,4 +223,36 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int vport, struct ifla_vf_stats *vf_stats); +struct mlx5_flow_spec; + +struct mlx5_flow_rule * +mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_spec *spec, + u32 action, u32 src_vport, u32 dst_vport); +struct mlx5_flow_rule * +mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); + +int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, + u16 *sqns_array, int sqns_num); +void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep); + +int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode); +int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); +void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep); +void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, + int vport); + +#define MLX5_DEBUG_ESWITCH_MASK BIT(3) + +#define esw_info(dev, format, ...) \ + pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) + +#define esw_warn(dev, format, ...) \ + pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) + +#define esw_debug(dev, format, ...) \ + mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) #endif /* __MLX5_ESWITCH_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c new file mode 100644 index 000000000..7de40e6b0 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -0,0 +1,646 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include "mlx5_core.h" +#include "eswitch.h" + +enum { + FDB_FAST_PATH = 0, + FDB_SLOW_PATH +}; + +struct mlx5_flow_rule * +mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_spec *spec, + u32 action, u32 src_vport, u32 dst_vport) +{ + struct mlx5_flow_destination dest = { 0 }; + struct mlx5_fc *counter = NULL; + struct mlx5_flow_rule *rule; + void *misc; + + if (esw->mode != SRIOV_OFFLOADS) + return ERR_PTR(-EOPNOTSUPP); + + if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.vport_num = dst_vport; + action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + counter = mlx5_fc_create(esw->dev, true); + if (IS_ERR(counter)) + return ERR_CAST(counter); + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter = counter; + } + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_port, src_vport); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | + MLX5_MATCH_MISC_PARAMETERS; + + rule = mlx5_add_flow_rule((struct mlx5_flow_table *)esw->fdb_table.fdb, + spec, action, 0, &dest); + + if (IS_ERR(rule)) + mlx5_fc_destroy(esw->dev, counter); + + return rule; +} + +static struct mlx5_flow_rule * +mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) +{ + struct mlx5_flow_destination dest; + struct mlx5_flow_rule *flow_rule; + struct mlx5_flow_spec *spec; + void *misc; + + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { + esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n"); + flow_rule = ERR_PTR(-ENOMEM); + goto out; + } + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); + MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */ + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.vport_num = vport; + + flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + 0, &dest); + if (IS_ERR(flow_rule)) + esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); +out: + kvfree(spec); + return flow_rule; +} + +void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep) +{ + struct mlx5_esw_sq *esw_sq, *tmp; + + if (esw->mode != SRIOV_OFFLOADS) + return; + + list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) { + mlx5_del_flow_rule(esw_sq->send_to_vport_rule); + list_del(&esw_sq->list); + kfree(esw_sq); + } +} + +int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, + u16 *sqns_array, int sqns_num) +{ + struct mlx5_flow_rule *flow_rule; + struct mlx5_esw_sq *esw_sq; + int vport; + int err; + int i; + + if (esw->mode != SRIOV_OFFLOADS) + return 0; + + vport = rep->vport == 0 ? + FDB_UPLINK_VPORT : rep->vport; + + for (i = 0; i < sqns_num; i++) { + esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL); + if (!esw_sq) { + err = -ENOMEM; + goto out_err; + } + + /* Add re-inject rule to the PF/representor sqs */ + flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, + vport, + sqns_array[i]); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + kfree(esw_sq); + goto out_err; + } + esw_sq->send_to_vport_rule = flow_rule; + list_add(&esw_sq->list, &rep->vport_sqs_list); + } + return 0; + +out_err: + mlx5_eswitch_sqs2vport_stop(esw, rep); + return err; +} + +static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) +{ + struct mlx5_flow_destination dest; + struct mlx5_flow_rule *flow_rule = NULL; + struct mlx5_flow_spec *spec; + int err = 0; + + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { + esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n"); + err = -ENOMEM; + goto out; + } + + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.vport_num = 0; + + flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + 0, &dest); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err); + goto out; + } + + esw->fdb_table.offloads.miss_rule = flow_rule; +out: + kvfree(spec); + return err; +} + +#define MAX_PF_SQ 256 +#define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */ +#define ESW_OFFLOADS_NUM_GROUPS 4 + +static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *fdb = NULL; + struct mlx5_flow_group *g; + u32 *flow_group_in; + void *match_criteria; + int table_size, ix, err = 0; + + flow_group_in = mlx5_vzalloc(inlen); + if (!flow_group_in) + return -ENOMEM; + + root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); + if (!root_ns) { + esw_warn(dev, "Failed to get FDB flow namespace\n"); + goto ns_err; + } + + esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n", + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + + fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, + ESW_OFFLOADS_NUM_ENTRIES, + ESW_OFFLOADS_NUM_GROUPS, 0); + if (IS_ERR(fdb)) { + err = PTR_ERR(fdb); + esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err); + goto fast_fdb_err; + } + esw->fdb_table.fdb = fdb; + + table_size = nvports + MAX_PF_SQ + 1; + fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0); + if (IS_ERR(fdb)) { + err = PTR_ERR(fdb); + esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err); + goto slow_fdb_err; + } + esw->fdb_table.offloads.fdb = fdb; + + /* create send-to-vport group */ + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_MISC_PARAMETERS); + + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); + + MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); + + ix = nvports + MAX_PF_SQ; + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1); + + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err); + goto send_vport_err; + } + esw->fdb_table.offloads.send_to_vport_grp = g; + + /* create miss group */ + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0); + + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1); + + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create miss flow group err(%d)\n", err); + goto miss_err; + } + esw->fdb_table.offloads.miss_grp = g; + + err = esw_add_fdb_miss_rule(esw); + if (err) + goto miss_rule_err; + + return 0; + +miss_rule_err: + mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); +miss_err: + mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); +send_vport_err: + mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb); +slow_fdb_err: + mlx5_destroy_flow_table(esw->fdb_table.fdb); +fast_fdb_err: +ns_err: + kvfree(flow_group_in); + return err; +} + +static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw) +{ + if (!esw->fdb_table.fdb) + return; + + esw_debug(esw->dev, "Destroy offloads FDB Table\n"); + mlx5_del_flow_rule(esw->fdb_table.offloads.miss_rule); + mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); + mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); + + mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb); + mlx5_destroy_flow_table(esw->fdb_table.fdb); +} + +static int esw_create_offloads_table(struct mlx5_eswitch *esw) +{ + struct mlx5_flow_namespace *ns; + struct mlx5_flow_table *ft_offloads; + struct mlx5_core_dev *dev = esw->dev; + int err = 0; + + ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); + if (!ns) { + esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); + return -ENOMEM; + } + + ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0); + if (IS_ERR(ft_offloads)) { + err = PTR_ERR(ft_offloads); + esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err); + return err; + } + + esw->offloads.ft_offloads = ft_offloads; + return 0; +} + +static void esw_destroy_offloads_table(struct mlx5_eswitch *esw) +{ + struct mlx5_esw_offload *offloads = &esw->offloads; + + mlx5_destroy_flow_table(offloads->ft_offloads); +} + +static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *g; + struct mlx5_priv *priv = &esw->dev->priv; + u32 *flow_group_in; + void *match_criteria, *misc; + int err = 0; + int nvports = priv->sriov.num_vfs + 2; + + flow_group_in = mlx5_vzalloc(inlen); + if (!flow_group_in) + return -ENOMEM; + + /* create vport rx group */ + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_MISC_PARAMETERS); + + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); + misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1); + + g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in); + + if (IS_ERR(g)) { + err = PTR_ERR(g); + mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err); + goto out; + } + + esw->offloads.vport_rx_group = g; +out: + kfree(flow_group_in); + return err; +} + +static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) +{ + mlx5_destroy_flow_group(esw->offloads.vport_rx_group); +} + +struct mlx5_flow_rule * +mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) +{ + struct mlx5_flow_destination dest; + struct mlx5_flow_rule *flow_rule; + struct mlx5_flow_spec *spec; + void *misc; + + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { + esw_warn(esw->dev, "Failed to alloc match parameters\n"); + flow_rule = ERR_PTR(-ENOMEM); + goto out; + } + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_port, vport); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dest.tir_num = tirn; + + flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, spec, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + 0, &dest); + if (IS_ERR(flow_rule)) { + esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); + goto out; + } + +out: + kvfree(spec); + return flow_rule; +} + +static int esw_offloads_start(struct mlx5_eswitch *esw) +{ + int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; + + if (esw->mode != SRIOV_LEGACY) { + esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n"); + return -EINVAL; + } + + mlx5_eswitch_disable_sriov(esw); + err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); + if (err) { + esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); + err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); + if (err1) + esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err); + } + return err; +} + +int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) +{ + struct mlx5_eswitch_rep *rep; + int vport; + int err; + + err = esw_create_offloads_fdb_table(esw, nvports); + if (err) + return err; + + err = esw_create_offloads_table(esw); + if (err) + goto create_ft_err; + + err = esw_create_vport_rx_group(esw); + if (err) + goto create_fg_err; + + for (vport = 0; vport < nvports; vport++) { + rep = &esw->offloads.vport_reps[vport]; + if (!rep->valid) + continue; + + err = rep->load(esw, rep); + if (err) + goto err_reps; + } + return 0; + +err_reps: + for (vport--; vport >= 0; vport--) { + rep = &esw->offloads.vport_reps[vport]; + if (!rep->valid) + continue; + rep->unload(esw, rep); + } + esw_destroy_vport_rx_group(esw); + +create_fg_err: + esw_destroy_offloads_table(esw); + +create_ft_err: + esw_destroy_offloads_fdb_table(esw); + return err; +} + +static int esw_offloads_stop(struct mlx5_eswitch *esw) +{ + int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; + + mlx5_eswitch_disable_sriov(esw); + err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); + if (err) { + esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err); + err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); + if (err1) + esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err); + } + + return err; +} + +void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports) +{ + struct mlx5_eswitch_rep *rep; + int vport; + + for (vport = 0; vport < nvports; vport++) { + rep = &esw->offloads.vport_reps[vport]; + if (!rep->valid) + continue; + rep->unload(esw, rep); + } + + esw_destroy_vport_rx_group(esw); + esw_destroy_offloads_table(esw); + esw_destroy_offloads_fdb_table(esw); +} + +static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) +{ + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + *mlx5_mode = SRIOV_LEGACY; + break; + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + *mlx5_mode = SRIOV_OFFLOADS; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) +{ + switch (mlx5_mode) { + case SRIOV_LEGACY: + *mode = DEVLINK_ESWITCH_MODE_LEGACY; + break; + case SRIOV_OFFLOADS: + *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; + break; + default: + return -EINVAL; + } + + return 0; +} + +int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) +{ + struct mlx5_core_dev *dev; + u16 cur_mlx5_mode, mlx5_mode = 0; + + dev = devlink_priv(devlink); + + if (!MLX5_CAP_GEN(dev, vport_group_manager)) + return -EOPNOTSUPP; + + cur_mlx5_mode = dev->priv.eswitch->mode; + + if (cur_mlx5_mode == SRIOV_NONE) + return -EOPNOTSUPP; + + if (esw_mode_from_devlink(mode, &mlx5_mode)) + return -EINVAL; + + if (cur_mlx5_mode == mlx5_mode) + return 0; + + if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) + return esw_offloads_start(dev->priv.eswitch); + else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) + return esw_offloads_stop(dev->priv.eswitch); + else + return -EINVAL; +} + +int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct mlx5_core_dev *dev; + + dev = devlink_priv(devlink); + + if (!MLX5_CAP_GEN(dev, vport_group_manager)) + return -EOPNOTSUPP; + + if (dev->priv.eswitch->mode == SRIOV_NONE) + return -EOPNOTSUPP; + + return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); +} + +void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep) +{ + struct mlx5_esw_offload *offloads = &esw->offloads; + + memcpy(&offloads->vport_reps[rep->vport], rep, + sizeof(struct mlx5_eswitch_rep)); + + INIT_LIST_HEAD(&offloads->vport_reps[rep->vport].vport_sqs_list); + offloads->vport_reps[rep->vport].valid = true; +} + +void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, + int vport) +{ + struct mlx5_esw_offload *offloads = &esw->offloads; + struct mlx5_eswitch_rep *rep; + + rep = &offloads->vport_reps[vport]; + + if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport].enabled) + rep->unload(esw, rep); + + offloads->vport_reps[vport].valid = false; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index a5bb6b695..287ade151 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -413,3 +413,70 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, return 0; } + +struct mlx5_cmd_fc_bulk { + u16 id; + int num; + int outlen; + u32 out[0]; +}; + +struct mlx5_cmd_fc_bulk * +mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num) +{ + struct mlx5_cmd_fc_bulk *b; + int outlen = + MLX5_ST_SZ_BYTES(query_flow_counter_out) + + MLX5_ST_SZ_BYTES(traffic_counter) * num; + + b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL); + if (!b) + return NULL; + + b->id = id; + b->num = num; + b->outlen = outlen; + + return b; +} + +void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b) +{ + kfree(b); +} + +int +mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b) +{ + u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_flow_counter_in, in, opcode, + MLX5_CMD_OP_QUERY_FLOW_COUNTER); + MLX5_SET(query_flow_counter_in, in, op_mod, 0); + MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id); + MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), + b->out, b->outlen); +} + +void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, + struct mlx5_cmd_fc_bulk *b, u16 id, + u64 *packets, u64 *bytes) +{ + int index = id - b->id; + void *stats; + + if (index < 0 || index >= b->num) { + mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n", + id, b->id, b->id + b->num - 1); + return; + } + + stats = MLX5_ADDR_OF(query_flow_counter_out, b->out, + flow_statistics[index]); + *packets = MLX5_GET64(traffic_counter, stats, packets); + *bytes = MLX5_GET64(traffic_counter, stats, octets); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index fc4f7b83f..158844cef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -76,4 +76,16 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id); int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id); int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, u64 *packets, u64 *bytes); + +struct mlx5_cmd_fc_bulk; + +struct mlx5_cmd_fc_bulk * +mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num); +void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b); +int +mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b); +void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, + struct mlx5_cmd_fc_bulk *b, u16 id, + u64 *packets, u64 *bytes); + #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index e912a3d25..3d6c1f65e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -67,13 +67,21 @@ #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \ .caps = (long[]) {__VA_ARGS__} } +#define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \ + FS_CAP(flow_table_properties_nic_receive.modify_root), \ + FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \ + FS_CAP(flow_table_properties_nic_receive.flow_table_modify)) + #define LEFTOVERS_NUM_LEVELS 1 #define LEFTOVERS_NUM_PRIOS 1 #define BY_PASS_PRIO_NUM_LEVELS 1 -#define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ +#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ LEFTOVERS_NUM_PRIOS) +#define ETHTOOL_PRIO_NUM_LEVELS 1 +#define ETHTOOL_NUM_PRIOS 11 +#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) /* Vlan, mac, ttc, aRFS */ #define KERNEL_NIC_PRIO_NUM_LEVELS 4 #define KERNEL_NIC_NUM_PRIOS 1 @@ -83,6 +91,11 @@ #define ANCHOR_NUM_LEVELS 1 #define ANCHOR_NUM_PRIOS 1 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1) + +#define OFFLOADS_MAX_FT 1 +#define OFFLOADS_NUM_PRIOS 1 +#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1) + struct node_caps { size_t arr_sz; long *caps; @@ -98,24 +111,24 @@ static struct init_tree_node { int num_levels; } root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 4, + .ar_size = 6, .children = (struct init_tree_node[]) { ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, - FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), - FS_CAP(flow_table_properties_nic_receive.modify_root), - FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), - FS_CAP(flow_table_properties_nic_receive.flow_table_modify)), + FS_CHAINING_CAPS, ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_NUM_LEVELS))), + ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {}, + ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))), + ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, + FS_CHAINING_CAPS, + ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS, + ETHTOOL_PRIO_NUM_LEVELS))), ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {}, ADD_NS(ADD_MULTIPLE_PRIO(1, 1), ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, KERNEL_NIC_PRIO_NUM_LEVELS))), ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, - FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), - FS_CAP(flow_table_properties_nic_receive.modify_root), - FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), - FS_CAP(flow_table_properties_nic_receive.flow_table_modify)), + FS_CHAINING_CAPS, ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))), ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {}, ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))), @@ -1152,9 +1165,7 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest, static struct mlx5_flow_rule * _mlx5_add_flow_rule(struct mlx5_flow_table *ft, - u8 match_criteria_enable, - u32 *match_criteria, - u32 *match_value, + struct mlx5_flow_spec *spec, u32 action, u32 flow_tag, struct mlx5_flow_destination *dest) @@ -1168,22 +1179,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft, nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); fs_for_each_fg(g, ft) if (compare_match_criteria(g->mask.match_criteria_enable, - match_criteria_enable, + spec->match_criteria_enable, g->mask.match_criteria, - match_criteria)) { - rule = add_rule_fg(g, match_value, + spec->match_criteria)) { + rule = add_rule_fg(g, spec->match_value, action, flow_tag, dest); if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) goto unlock; } - g = create_autogroup(ft, match_criteria_enable, match_criteria); + g = create_autogroup(ft, spec->match_criteria_enable, + spec->match_criteria); if (IS_ERR(g)) { rule = (void *)g; goto unlock; } - rule = add_rule_fg(g, match_value, + rule = add_rule_fg(g, spec->match_value, action, flow_tag, dest); if (IS_ERR(rule)) { /* Remove assumes refcount > 0 and autogroup creates a group @@ -1207,9 +1219,7 @@ static bool fwd_next_prio_supported(struct mlx5_flow_table *ft) struct mlx5_flow_rule * mlx5_add_flow_rule(struct mlx5_flow_table *ft, - u8 match_criteria_enable, - u32 *match_criteria, - u32 *match_value, + struct mlx5_flow_spec *spec, u32 action, u32 flow_tag, struct mlx5_flow_destination *dest) @@ -1240,8 +1250,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft, } } - rule = _mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, - match_value, action, flow_tag, dest); + rule = _mlx5_add_flow_rule(ft, spec, action, flow_tag, dest); if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { if (!IS_ERR_OR_NULL(rule) && @@ -1359,40 +1368,47 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg) struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type) { - struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; + struct mlx5_flow_steering *steering = dev->priv.steering; + struct mlx5_flow_root_namespace *root_ns; int prio; struct fs_prio *fs_prio; struct mlx5_flow_namespace *ns; - if (!root_ns) + if (!steering) return NULL; switch (type) { case MLX5_FLOW_NAMESPACE_BYPASS: + case MLX5_FLOW_NAMESPACE_OFFLOADS: + case MLX5_FLOW_NAMESPACE_ETHTOOL: case MLX5_FLOW_NAMESPACE_KERNEL: case MLX5_FLOW_NAMESPACE_LEFTOVERS: case MLX5_FLOW_NAMESPACE_ANCHOR: prio = type; break; case MLX5_FLOW_NAMESPACE_FDB: - if (dev->priv.fdb_root_ns) - return &dev->priv.fdb_root_ns->ns; + if (steering->fdb_root_ns) + return &steering->fdb_root_ns->ns; else return NULL; case MLX5_FLOW_NAMESPACE_ESW_EGRESS: - if (dev->priv.esw_egress_root_ns) - return &dev->priv.esw_egress_root_ns->ns; + if (steering->esw_egress_root_ns) + return &steering->esw_egress_root_ns->ns; else return NULL; case MLX5_FLOW_NAMESPACE_ESW_INGRESS: - if (dev->priv.esw_ingress_root_ns) - return &dev->priv.esw_ingress_root_ns->ns; + if (steering->esw_ingress_root_ns) + return &steering->esw_ingress_root_ns->ns; else return NULL; default: return NULL; } + root_ns = steering->root_ns; + if (!root_ns) + return NULL; + fs_prio = find_prio(&root_ns->ns, prio); if (!fs_prio) return NULL; @@ -1478,13 +1494,13 @@ static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps) return true; } -static int init_root_tree_recursive(struct mlx5_core_dev *dev, +static int init_root_tree_recursive(struct mlx5_flow_steering *steering, struct init_tree_node *init_node, struct fs_node *fs_parent_node, struct init_tree_node *init_parent_node, int prio) { - int max_ft_level = MLX5_CAP_FLOWTABLE(dev, + int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev, flow_table_properties_nic_receive. max_ft_level); struct mlx5_flow_namespace *fs_ns; @@ -1495,7 +1511,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, if (init_node->type == FS_TYPE_PRIO) { if ((init_node->min_ft_level > max_ft_level) || - !has_required_caps(dev, &init_node->caps)) + !has_required_caps(steering->dev, &init_node->caps)) return 0; fs_get_obj(fs_ns, fs_parent_node); @@ -1516,7 +1532,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, } prio = 0; for (i = 0; i < init_node->ar_size; i++) { - err = init_root_tree_recursive(dev, &init_node->children[i], + err = init_root_tree_recursive(steering, &init_node->children[i], base, init_node, prio); if (err) return err; @@ -1529,7 +1545,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, return 0; } -static int init_root_tree(struct mlx5_core_dev *dev, +static int init_root_tree(struct mlx5_flow_steering *steering, struct init_tree_node *init_node, struct fs_node *fs_parent_node) { @@ -1539,7 +1555,7 @@ static int init_root_tree(struct mlx5_core_dev *dev, fs_get_obj(fs_ns, fs_parent_node); for (i = 0; i < init_node->ar_size; i++) { - err = init_root_tree_recursive(dev, &init_node->children[i], + err = init_root_tree_recursive(steering, &init_node->children[i], &fs_ns->node, init_node, i); if (err) @@ -1548,7 +1564,7 @@ static int init_root_tree(struct mlx5_core_dev *dev, return 0; } -static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev, +static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering *steering, enum fs_flow_table_type table_type) { @@ -1560,7 +1576,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev if (!root_ns) return NULL; - root_ns->dev = dev; + root_ns->dev = steering->dev; root_ns->table_type = table_type; ns = &root_ns->ns; @@ -1615,220 +1631,135 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns) #define ANCHOR_PRIO 0 #define ANCHOR_SIZE 1 #define ANCHOR_LEVEL 0 -static int create_anchor_flow_table(struct mlx5_core_dev - *dev) +static int create_anchor_flow_table(struct mlx5_flow_steering *steering) { struct mlx5_flow_namespace *ns = NULL; struct mlx5_flow_table *ft; - ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR); + ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR); if (!ns) return -EINVAL; ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL); if (IS_ERR(ft)) { - mlx5_core_err(dev, "Failed to create last anchor flow table"); + mlx5_core_err(steering->dev, "Failed to create last anchor flow table"); return PTR_ERR(ft); } return 0; } -static int init_root_ns(struct mlx5_core_dev *dev) +static int init_root_ns(struct mlx5_flow_steering *steering) { - dev->priv.root_ns = create_root_ns(dev, FS_FT_NIC_RX); - if (IS_ERR_OR_NULL(dev->priv.root_ns)) + steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); + if (IS_ERR_OR_NULL(steering->root_ns)) goto cleanup; - if (init_root_tree(dev, &root_fs, &dev->priv.root_ns->ns.node)) + if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) goto cleanup; - set_prio_attrs(dev->priv.root_ns); + set_prio_attrs(steering->root_ns); - if (create_anchor_flow_table(dev)) + if (create_anchor_flow_table(steering)) goto cleanup; return 0; cleanup: - mlx5_cleanup_fs(dev); + mlx5_cleanup_fs(steering->dev); return -ENOMEM; } -static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev, - struct mlx5_flow_root_namespace *root_ns) +static void clean_tree(struct fs_node *node) { - struct fs_node *prio; - - if (!root_ns) - return; + if (node) { + struct fs_node *iter; + struct fs_node *temp; - if (!list_empty(&root_ns->ns.node.children)) { - prio = list_first_entry(&root_ns->ns.node.children, - struct fs_node, - list); - if (tree_remove_node(prio)) - mlx5_core_warn(dev, - "Flow steering priority wasn't destroyed, refcount > 1\n"); + list_for_each_entry_safe(iter, temp, &node->children, list) + clean_tree(iter); + tree_remove_node(node); } - if (tree_remove_node(&root_ns->ns.node)) - mlx5_core_warn(dev, - "Flow steering namespace wasn't destroyed, refcount > 1\n"); - root_ns = NULL; } -static void destroy_flow_tables(struct fs_prio *prio) +static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns) { - struct mlx5_flow_table *iter; - struct mlx5_flow_table *tmp; - - fs_for_each_ft_safe(iter, tmp, prio) - mlx5_destroy_flow_table(iter); -} - -static void cleanup_root_ns(struct mlx5_core_dev *dev) -{ - struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; - struct fs_prio *iter_prio; - - if (!MLX5_CAP_GEN(dev, nic_flow_table)) - return; - if (!root_ns) return; - /* stage 1 */ - fs_for_each_prio(iter_prio, &root_ns->ns) { - struct fs_node *node; - struct mlx5_flow_namespace *iter_ns; - - fs_for_each_ns_or_ft(node, iter_prio) { - if (node->type == FS_TYPE_FLOW_TABLE) - continue; - fs_get_obj(iter_ns, node); - while (!list_empty(&iter_ns->node.children)) { - struct fs_prio *obj_iter_prio2; - struct fs_node *iter_prio2 = - list_first_entry(&iter_ns->node.children, - struct fs_node, - list); - - fs_get_obj(obj_iter_prio2, iter_prio2); - destroy_flow_tables(obj_iter_prio2); - if (tree_remove_node(iter_prio2)) { - mlx5_core_warn(dev, - "Priority %d wasn't destroyed, refcount > 1\n", - obj_iter_prio2->prio); - return; - } - } - } - } - - /* stage 2 */ - fs_for_each_prio(iter_prio, &root_ns->ns) { - while (!list_empty(&iter_prio->node.children)) { - struct fs_node *iter_ns = - list_first_entry(&iter_prio->node.children, - struct fs_node, - list); - if (tree_remove_node(iter_ns)) { - mlx5_core_warn(dev, - "Namespace wasn't destroyed, refcount > 1\n"); - return; - } - } - } - - /* stage 3 */ - while (!list_empty(&root_ns->ns.node.children)) { - struct fs_prio *obj_prio_node; - struct fs_node *prio_node = - list_first_entry(&root_ns->ns.node.children, - struct fs_node, - list); - - fs_get_obj(obj_prio_node, prio_node); - if (tree_remove_node(prio_node)) { - mlx5_core_warn(dev, - "Priority %d wasn't destroyed, refcount > 1\n", - obj_prio_node->prio); - return; - } - } - - if (tree_remove_node(&root_ns->ns.node)) { - mlx5_core_warn(dev, - "root namespace wasn't destroyed, refcount > 1\n"); - return; - } - - dev->priv.root_ns = NULL; + clean_tree(&root_ns->ns.node); } void mlx5_cleanup_fs(struct mlx5_core_dev *dev) { + struct mlx5_flow_steering *steering = dev->priv.steering; + if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) return; - cleanup_root_ns(dev); - cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); - cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns); - cleanup_single_prio_root_ns(dev, dev->priv.esw_ingress_root_ns); + cleanup_root_ns(steering->root_ns); + cleanup_root_ns(steering->esw_egress_root_ns); + cleanup_root_ns(steering->esw_ingress_root_ns); + cleanup_root_ns(steering->fdb_root_ns); mlx5_cleanup_fc_stats(dev); + kfree(steering); } -static int init_fdb_root_ns(struct mlx5_core_dev *dev) +static int init_fdb_root_ns(struct mlx5_flow_steering *steering) { struct fs_prio *prio; - dev->priv.fdb_root_ns = create_root_ns(dev, FS_FT_FDB); - if (!dev->priv.fdb_root_ns) + steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB); + if (!steering->fdb_root_ns) return -ENOMEM; - /* Create single prio */ - prio = fs_create_prio(&dev->priv.fdb_root_ns->ns, 0, 1); - if (IS_ERR(prio)) { - cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); - return PTR_ERR(prio); - } else { - return 0; - } + prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1); + if (IS_ERR(prio)) + goto out_err; + + prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1); + if (IS_ERR(prio)) + goto out_err; + + set_prio_attrs(steering->fdb_root_ns); + return 0; + +out_err: + cleanup_root_ns(steering->fdb_root_ns); + steering->fdb_root_ns = NULL; + return PTR_ERR(prio); } -static int init_egress_acl_root_ns(struct mlx5_core_dev *dev) +static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering) { struct fs_prio *prio; - dev->priv.esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL); - if (!dev->priv.esw_egress_root_ns) + steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL); + if (!steering->esw_egress_root_ns) return -ENOMEM; /* create 1 prio*/ - prio = fs_create_prio(&dev->priv.esw_egress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev)); - if (IS_ERR(prio)) - return PTR_ERR(prio); - else - return 0; + prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0, + MLX5_TOTAL_VPORTS(steering->dev)); + return PTR_ERR_OR_ZERO(prio); } -static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev) +static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering) { struct fs_prio *prio; - dev->priv.esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL); - if (!dev->priv.esw_ingress_root_ns) + steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL); + if (!steering->esw_ingress_root_ns) return -ENOMEM; /* create 1 prio*/ - prio = fs_create_prio(&dev->priv.esw_ingress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev)); - if (IS_ERR(prio)) - return PTR_ERR(prio); - else - return 0; + prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0, + MLX5_TOTAL_VPORTS(steering->dev)); + return PTR_ERR_OR_ZERO(prio); } int mlx5_init_fs(struct mlx5_core_dev *dev) { + struct mlx5_flow_steering *steering; int err = 0; if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) @@ -1838,26 +1769,32 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) if (err) return err; + steering = kzalloc(sizeof(*steering), GFP_KERNEL); + if (!steering) + return -ENOMEM; + steering->dev = dev; + dev->priv.steering = steering; + if (MLX5_CAP_GEN(dev, nic_flow_table) && MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) { - err = init_root_ns(dev); + err = init_root_ns(steering); if (err) goto err; } if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { - err = init_fdb_root_ns(dev); + err = init_fdb_root_ns(steering); if (err) goto err; } if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { - err = init_egress_acl_root_ns(dev); + err = init_egress_acl_root_ns(steering); if (err) goto err; } if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { - err = init_ingress_acl_root_ns(dev); + err = init_ingress_acl_root_ns(steering); if (err) goto err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index aa41a7314..9cffb6aeb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -55,6 +55,14 @@ enum fs_fte_status { FS_FTE_STATUS_EXISTING = 1UL << 0, }; +struct mlx5_flow_steering { + struct mlx5_core_dev *dev; + struct mlx5_flow_root_namespace *root_ns; + struct mlx5_flow_root_namespace *fdb_root_ns; + struct mlx5_flow_root_namespace *esw_egress_root_ns; + struct mlx5_flow_root_namespace *esw_ingress_root_ns; +}; + struct fs_node { struct list_head list; struct list_head children; @@ -103,6 +111,7 @@ struct mlx5_fc_cache { }; struct mlx5_fc { + struct rb_node node; struct list_head list; /* last{packets,bytes} members are used when calculating the delta since diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 164dc37fd..3a9195b41 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -32,6 +32,7 @@ #include #include +#include #include "mlx5_core.h" #include "fs_core.h" #include "fs_cmd.h" @@ -68,32 +69,117 @@ * elapsed, the thread will actually query the hardware. */ +static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter) +{ + struct rb_node **new = &root->rb_node; + struct rb_node *parent = NULL; + + while (*new) { + struct mlx5_fc *this = container_of(*new, struct mlx5_fc, node); + int result = counter->id - this->id; + + parent = *new; + if (result < 0) + new = &((*new)->rb_left); + else + new = &((*new)->rb_right); + } + + /* Add new node and rebalance tree. */ + rb_link_node(&counter->node, parent, new); + rb_insert_color(&counter->node, root); +} + +static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, + struct mlx5_fc *first, + u16 last_id) +{ + struct mlx5_cmd_fc_bulk *b; + struct rb_node *node = NULL; + u16 afirst_id; + int num; + int err; + int max_bulk = 1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk); + + /* first id must be aligned to 4 when using bulk query */ + afirst_id = first->id & ~0x3; + + /* number of counters to query inc. the last counter */ + num = ALIGN(last_id - afirst_id + 1, 4); + if (num > max_bulk) { + num = max_bulk; + last_id = afirst_id + num - 1; + } + + b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num); + if (!b) { + mlx5_core_err(dev, "Error allocating resources for bulk query\n"); + return NULL; + } + + err = mlx5_cmd_fc_bulk_query(dev, b); + if (err) { + mlx5_core_err(dev, "Error doing bulk query: %d\n", err); + goto out; + } + + for (node = &first->node; node; node = rb_next(node)) { + struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node); + struct mlx5_fc_cache *c = &counter->cache; + u64 packets; + u64 bytes; + + if (counter->id > last_id) + break; + + mlx5_cmd_fc_bulk_get(dev, b, + counter->id, &packets, &bytes); + + if (c->packets == packets) + continue; + + c->packets = packets; + c->bytes = bytes; + c->lastuse = jiffies; + } + +out: + mlx5_cmd_fc_bulk_free(b); + + return node; +} + static void mlx5_fc_stats_work(struct work_struct *work) { struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, priv.fc_stats.work.work); struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; unsigned long now = jiffies; - struct mlx5_fc *counter; - struct mlx5_fc *tmp; - int err = 0; + struct mlx5_fc *counter = NULL; + struct mlx5_fc *last = NULL; + struct rb_node *node; + LIST_HEAD(tmplist); spin_lock(&fc_stats->addlist_lock); - list_splice_tail_init(&fc_stats->addlist, &fc_stats->list); + list_splice_tail_init(&fc_stats->addlist, &tmplist); - if (!list_empty(&fc_stats->list)) + if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters)) queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD); spin_unlock(&fc_stats->addlist_lock); - list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) { - struct mlx5_fc_cache *c = &counter->cache; - u64 packets; - u64 bytes; + list_for_each_entry(counter, &tmplist, list) + mlx5_fc_stats_insert(&fc_stats->counters, counter); + + node = rb_first(&fc_stats->counters); + while (node) { + counter = rb_entry(node, struct mlx5_fc, node); + + node = rb_next(node); if (counter->deleted) { - list_del(&counter->list); + rb_erase(&counter->node, &fc_stats->counters); mlx5_cmd_fc_free(dev, counter->id); @@ -101,26 +187,20 @@ static void mlx5_fc_stats_work(struct work_struct *work) continue; } - if (time_before(now, fc_stats->next_query)) - continue; + last = counter; + } - err = mlx5_cmd_fc_query(dev, counter->id, &packets, &bytes); - if (err) { - pr_err("Error querying stats for counter id %d\n", - counter->id); - continue; - } + if (time_before(now, fc_stats->next_query) || !last) + return; - if (packets == c->packets) - continue; + node = rb_first(&fc_stats->counters); + while (node) { + counter = rb_entry(node, struct mlx5_fc, node); - c->lastuse = jiffies; - c->packets = packets; - c->bytes = bytes; + node = mlx5_fc_stats_query(dev, counter, last->id); } - if (time_after_eq(now, fc_stats->next_query)) - fc_stats->next_query = now + MLX5_FC_STATS_PERIOD; + fc_stats->next_query = now + MLX5_FC_STATS_PERIOD; } struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) @@ -176,7 +256,7 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) { struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - INIT_LIST_HEAD(&fc_stats->list); + fc_stats->counters = RB_ROOT; INIT_LIST_HEAD(&fc_stats->addlist); spin_lock_init(&fc_stats->addlist_lock); @@ -194,20 +274,32 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; struct mlx5_fc *counter; struct mlx5_fc *tmp; + struct rb_node *node; cancel_delayed_work_sync(&dev->priv.fc_stats.work); destroy_workqueue(dev->priv.fc_stats.wq); dev->priv.fc_stats.wq = NULL; - list_splice_tail_init(&fc_stats->addlist, &fc_stats->list); - - list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) { + list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) { list_del(&counter->list); mlx5_cmd_fc_free(dev, counter->id); kfree(counter); } + + node = rb_first(&fc_stats->counters); + while (node) { + counter = rb_entry(node, struct mlx5_fc, node); + + node = rb_next(node); + + rb_erase(&counter->node, &fc_stats->counters); + + mlx5_cmd_fc_free(dev, counter->id); + + kfree(counter); + } } void mlx5_fc_query_cached(struct mlx5_fc *counter, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 75c7ae6a5..77fc1aa26 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -151,6 +151,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } + if (MLX5_CAP_GEN(dev, qos)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_QOS); + if (err) + return err; + } + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 96a59463a..1a05fb965 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -272,7 +272,7 @@ static void poll_health(unsigned long data) if (in_fatal(dev) && !health->sick) { health->sick = true; print_health_info(dev); - queue_work(health->wq, &health->work); + schedule_work(&health->work); } } @@ -301,7 +301,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; - destroy_workqueue(health->wq); + flush_work(&health->work); } int mlx5_health_init(struct mlx5_core_dev *dev) @@ -316,10 +316,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev) strcpy(name, "mlx5_health"); strcat(name, dev_name(&dev->pdev->dev)); - health->wq = create_singlethread_workqueue(name); kfree(name); - if (!health->wq) - return -ENOMEM; INIT_WORK(&health->work, health_care); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index e782d0fde..2385bae92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -51,6 +51,7 @@ #ifdef CONFIG_RFS_ACCEL #include #endif +#include #include "mlx5_core.h" #include "fs_core.h" #ifdef CONFIG_MLX5_CORE_EN @@ -1144,6 +1145,13 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) dev_err(&pdev->dev, "Failed to init flow steering\n"); goto err_fs; } + + err = mlx5_init_rl_table(dev); + if (err) { + dev_err(&pdev->dev, "Failed to init rate limiting\n"); + goto err_rl; + } + #ifdef CONFIG_MLX5_CORE_EN err = mlx5_eswitch_init(dev); if (err) { @@ -1183,6 +1191,8 @@ err_sriov: mlx5_eswitch_cleanup(dev->priv.eswitch); #endif err_reg_dev: + mlx5_cleanup_rl_table(dev); +err_rl: mlx5_cleanup_fs(dev); err_fs: mlx5_cleanup_mkey_table(dev); @@ -1253,6 +1263,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) mlx5_eswitch_cleanup(dev->priv.eswitch); #endif + mlx5_cleanup_rl_table(dev); mlx5_cleanup_fs(dev); mlx5_cleanup_mkey_table(dev); mlx5_cleanup_srq_table(dev); @@ -1305,19 +1316,28 @@ struct mlx5_core_event_handler { void *data); }; +static const struct devlink_ops mlx5_devlink_ops = { +#ifdef CONFIG_MLX5_CORE_EN + .eswitch_mode_set = mlx5_devlink_eswitch_mode_set, + .eswitch_mode_get = mlx5_devlink_eswitch_mode_get, +#endif +}; static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct mlx5_core_dev *dev; + struct devlink *devlink; struct mlx5_priv *priv; int err; - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) { + devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev)); + if (!devlink) { dev_err(&pdev->dev, "kzalloc failed\n"); return -ENOMEM; } + + dev = devlink_priv(devlink); priv = &dev->priv; priv->pci_dev_data = id->driver_data; @@ -1354,15 +1374,21 @@ static int init_one(struct pci_dev *pdev, goto clean_health; } + err = devlink_register(devlink, &pdev->dev); + if (err) + goto clean_load; + return 0; +clean_load: + mlx5_unload_one(dev, priv); clean_health: mlx5_health_cleanup(dev); close_pci: mlx5_pci_close(dev, priv); clean_dev: pci_set_drvdata(pdev, NULL); - kfree(dev); + devlink_free(devlink); return err; } @@ -1370,8 +1396,10 @@ clean_dev: static void remove_one(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + struct devlink *devlink = priv_to_devlink(dev); struct mlx5_priv *priv = &dev->priv; + devlink_unregister(devlink); if (mlx5_unload_one(dev, priv)) { dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n"); mlx5_health_cleanup(dev); @@ -1380,7 +1408,7 @@ static void remove_one(struct pci_dev *pdev) mlx5_health_cleanup(dev); mlx5_pci_close(dev, priv); pci_set_drvdata(pdev, NULL); - kfree(dev); + devlink_free(devlink); } static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 3e35611b1..752c08127 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -202,15 +202,24 @@ int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper); -int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, - int proto_mask) +int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable, + u32 proto_admin, int proto_mask) { - u32 in[MLX5_ST_SZ_DW(ptys_reg)]; u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + u32 in[MLX5_ST_SZ_DW(ptys_reg)]; + u8 an_disable_admin; + u8 an_disable_cap; + u8 an_status; + + mlx5_query_port_autoneg(dev, proto_mask, &an_status, + &an_disable_cap, &an_disable_admin); + if (!an_disable_cap && an_disable) + return -EPERM; memset(in, 0, sizeof(in)); MLX5_SET(ptys_reg, in, local_port, 1); + MLX5_SET(ptys_reg, in, an_disable_admin, an_disable); MLX5_SET(ptys_reg, in, proto_mask, proto_mask); if (proto_mask == MLX5_PTYS_EN) MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin); @@ -220,7 +229,19 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PTYS, 0, 1); } -EXPORT_SYMBOL_GPL(mlx5_set_port_proto); +EXPORT_SYMBOL_GPL(mlx5_set_port_ptys); + +/* This function should be used after setting a port register only */ +void mlx5_toggle_port_link(struct mlx5_core_dev *dev) +{ + enum mlx5_port_status ps; + + mlx5_query_port_admin_status(dev, &ps); + mlx5_set_port_admin_status(dev, MLX5_PORT_DOWN); + if (ps == MLX5_PORT_UP) + mlx5_set_port_admin_status(dev, MLX5_PORT_UP); +} +EXPORT_SYMBOL_GPL(mlx5_toggle_port_link); int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status status) @@ -518,6 +539,25 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx) } EXPORT_SYMBOL_GPL(mlx5_query_port_pfc); +void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask, + u8 *an_status, + u8 *an_disable_cap, u8 *an_disable_admin) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + + *an_status = 0; + *an_disable_cap = 0; + *an_disable_admin = 0; + + if (mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1)) + return; + + *an_status = MLX5_GET(ptys_reg, out, an_status); + *an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap); + *an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); +} +EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg); + int mlx5_max_tc(struct mlx5_core_dev *mdev) { u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c new file mode 100644 index 000000000..c07c28bd3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include "mlx5_core.h" + +/* Finds an entry where we can register the given rate + * If the rate already exists, return the entry where it is registered, + * otherwise return the first available entry. + * If the table is full, return NULL + */ +static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, + u32 rate) +{ + struct mlx5_rl_entry *ret_entry = NULL; + bool empty_found = false; + int i; + + for (i = 0; i < table->max_size; i++) { + if (table->rl_entry[i].rate == rate) + return &table->rl_entry[i]; + if (!empty_found && !table->rl_entry[i].rate) { + empty_found = true; + ret_entry = &table->rl_entry[i]; + } + } + + return ret_entry; +} + +static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev, + u32 rate, u16 index) +{ + u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)]; + u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(set_rate_limit_in, in, opcode, + MLX5_CMD_OP_SET_RATE_LIMIT); + MLX5_SET(set_rate_limit_in, in, rate_limit_index, index); + MLX5_SET(set_rate_limit_in, in, rate_limit, rate); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), + out, sizeof(out)); +} + +bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate) +{ + struct mlx5_rl_table *table = &dev->priv.rl_table; + + return (rate <= table->max_rate && rate >= table->min_rate); +} +EXPORT_SYMBOL(mlx5_rl_is_in_range); + +int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index) +{ + struct mlx5_rl_table *table = &dev->priv.rl_table; + struct mlx5_rl_entry *entry; + int err = 0; + + mutex_lock(&table->rl_lock); + + if (!rate || !mlx5_rl_is_in_range(dev, rate)) { + mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n", + rate, table->min_rate, table->max_rate); + err = -EINVAL; + goto out; + } + + entry = find_rl_entry(table, rate); + if (!entry) { + mlx5_core_err(dev, "Max number of %u rates reached\n", + table->max_size); + err = -ENOSPC; + goto out; + } + if (entry->refcount) { + /* rate already configured */ + entry->refcount++; + } else { + /* new rate limit */ + err = mlx5_set_rate_limit_cmd(dev, rate, entry->index); + if (err) { + mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n", + rate, err); + goto out; + } + entry->rate = rate; + entry->refcount = 1; + } + *index = entry->index; + +out: + mutex_unlock(&table->rl_lock); + return err; +} +EXPORT_SYMBOL(mlx5_rl_add_rate); + +void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate) +{ + struct mlx5_rl_table *table = &dev->priv.rl_table; + struct mlx5_rl_entry *entry = NULL; + + /* 0 is a reserved value for unlimited rate */ + if (rate == 0) + return; + + mutex_lock(&table->rl_lock); + entry = find_rl_entry(table, rate); + if (!entry || !entry->refcount) { + mlx5_core_warn(dev, "Rate %u is not configured\n", rate); + goto out; + } + + entry->refcount--; + if (!entry->refcount) { + /* need to remove rate */ + mlx5_set_rate_limit_cmd(dev, 0, entry->index); + entry->rate = 0; + } + +out: + mutex_unlock(&table->rl_lock); +} +EXPORT_SYMBOL(mlx5_rl_remove_rate); + +int mlx5_init_rl_table(struct mlx5_core_dev *dev) +{ + struct mlx5_rl_table *table = &dev->priv.rl_table; + int i; + + mutex_init(&table->rl_lock); + if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) { + table->max_size = 0; + return 0; + } + + /* First entry is reserved for unlimited rate */ + table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1; + table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate); + table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate); + + table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry), + GFP_KERNEL); + if (!table->rl_entry) + return -ENOMEM; + + /* The index represents the index in HW rate limit table + * Index 0 is reserved for unlimited rate + */ + for (i = 0; i < table->max_size; i++) + table->rl_entry[i].index = i + 1; + + /* Index 0 is reserved */ + mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n", + table->max_size, + table->min_rate >> 10, + table->max_rate >> 10); + + return 0; +} + +void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev) +{ + struct mlx5_rl_table *table = &dev->priv.rl_table; + int i; + + /* Clear all configured rates */ + for (i = 0; i < table->max_size; i++) + if (table->rl_entry[i].rate) + mlx5_set_rate_limit_cmd(dev, 0, + table->rl_entry[i].index); + + kfree(dev->priv.rl_table.rl_entry); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index d6a3f412b..b380a6bc1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -167,7 +167,7 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) mlx5_core_init_vfs(dev, num_vfs); #ifdef CONFIG_MLX5_CORE_EN - mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs); + mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); #endif return num_vfs; @@ -209,7 +209,8 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev) mlx5_core_init_vfs(dev, cur_vfs); #ifdef CONFIG_MLX5_CORE_EN if (cur_vfs) - mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs); + mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs, + SRIOV_LEGACY); #endif enable_vfs(dev, cur_vfs); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c index 04bc52260..c07f4d01b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -63,12 +63,12 @@ void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type) complete(&srq->free); } -static int get_pas_size(void *srqc) +static int get_pas_size(struct mlx5_srq_attr *in) { - u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12; - u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size); - u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride); - u32 page_offset = MLX5_GET(srqc, srqc, page_offset); + u32 log_page_size = in->log_page_size + 12; + u32 log_srq_size = in->log_size; + u32 log_rq_stride = in->wqe_shift; + u32 page_offset = in->page_offset; u32 po_quanta = 1 << (log_page_size - 6); u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride); u32 page_size = 1 << log_page_size; @@ -78,57 +78,58 @@ static int get_pas_size(void *srqc) return rq_num_pas * sizeof(u64); } -static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc) +static void set_wq(void *wq, struct mlx5_srq_attr *in) { - void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq); - - if (srqc_to_rmpc) { - switch (MLX5_GET(srqc, srqc, state)) { - case MLX5_SRQC_STATE_GOOD: - MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); - break; - case MLX5_SRQC_STATE_ERROR: - MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR); - break; - default: - pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__, - __LINE__, MLX5_GET(srqc, srqc, state)); - MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state)); - } - - MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature)); - MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size)); - MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4); - MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size)); - MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset)); - MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm)); - MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd)); - MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr)); - } else { - switch (MLX5_GET(rmpc, rmpc, state)) { - case MLX5_RMPC_STATE_RDY: - MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD); - break; - case MLX5_RMPC_STATE_ERR: - MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR); - break; - default: - pr_warn("%s: %d: Unknown rmp state = 0x%x\n", - __func__, __LINE__, - MLX5_GET(rmpc, rmpc, state)); - MLX5_SET(srqc, srqc, state, - MLX5_GET(rmpc, rmpc, state)); - } - - MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature)); - MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz)); - MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4); - MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz)); - MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset)); - MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm)); - MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd)); - MLX5_SET64(srqc, srqc, dbr_addr, MLX5_GET64(wq, wq, dbr_addr)); - } + MLX5_SET(wq, wq, wq_signature, !!(in->flags + & MLX5_SRQ_FLAG_WQ_SIG)); + MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size); + MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4); + MLX5_SET(wq, wq, log_wq_sz, in->log_size); + MLX5_SET(wq, wq, page_offset, in->page_offset); + MLX5_SET(wq, wq, lwm, in->lwm); + MLX5_SET(wq, wq, pd, in->pd); + MLX5_SET64(wq, wq, dbr_addr, in->db_record); +} + +static void set_srqc(void *srqc, struct mlx5_srq_attr *in) +{ + MLX5_SET(srqc, srqc, wq_signature, !!(in->flags + & MLX5_SRQ_FLAG_WQ_SIG)); + MLX5_SET(srqc, srqc, log_page_size, in->log_page_size); + MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift); + MLX5_SET(srqc, srqc, log_srq_size, in->log_size); + MLX5_SET(srqc, srqc, page_offset, in->page_offset); + MLX5_SET(srqc, srqc, lwm, in->lwm); + MLX5_SET(srqc, srqc, pd, in->pd); + MLX5_SET64(srqc, srqc, dbr_addr, in->db_record); + MLX5_SET(srqc, srqc, xrcd, in->xrcd); + MLX5_SET(srqc, srqc, cqn, in->cqn); +} + +static void get_wq(void *wq, struct mlx5_srq_attr *in) +{ + if (MLX5_GET(wq, wq, wq_signature)) + in->flags &= MLX5_SRQ_FLAG_WQ_SIG; + in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz); + in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4; + in->log_size = MLX5_GET(wq, wq, log_wq_sz); + in->page_offset = MLX5_GET(wq, wq, page_offset); + in->lwm = MLX5_GET(wq, wq, lwm); + in->pd = MLX5_GET(wq, wq, pd); + in->db_record = MLX5_GET64(wq, wq, dbr_addr); +} + +static void get_srqc(void *srqc, struct mlx5_srq_attr *in) +{ + if (MLX5_GET(srqc, srqc, wq_signature)) + in->flags &= MLX5_SRQ_FLAG_WQ_SIG; + in->log_page_size = MLX5_GET(srqc, srqc, log_page_size); + in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride); + in->log_size = MLX5_GET(srqc, srqc, log_srq_size); + in->page_offset = MLX5_GET(srqc, srqc, page_offset); + in->lwm = MLX5_GET(srqc, srqc, lwm); + in->pd = MLX5_GET(srqc, srqc, pd); + in->db_record = MLX5_GET64(srqc, srqc, dbr_addr); } struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) @@ -149,19 +150,36 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) EXPORT_SYMBOL(mlx5_core_get_srq); static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_create_srq_mbox_in *in, int inlen) + struct mlx5_srq_attr *in) { - struct mlx5_create_srq_mbox_out out; + u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0}; + void *create_in; + void *srqc; + void *pas; + int pas_size; + int inlen; int err; - memset(&out, 0, sizeof(out)); + pas_size = get_pas_size(in); + inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size; + create_in = mlx5_vzalloc(inlen); + if (!create_in) + return -ENOMEM; + + srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry); + pas = MLX5_ADDR_OF(create_srq_in, create_in, pas); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ); + set_srqc(srqc, in); + memcpy(pas, in->pas, pas_size); - err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out), - sizeof(out)); + MLX5_SET(create_srq_in, create_in, opcode, + MLX5_CMD_OP_CREATE_SRQ); - srq->srqn = be32_to_cpu(out.srqn) & 0xffffff; + err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out, + sizeof(create_out)); + kvfree(create_in); + if (!err) + srq->srqn = MLX5_GET(create_srq_out, create_out, srqn); return err; } @@ -169,67 +187,75 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, static int destroy_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) { - struct mlx5_destroy_srq_mbox_in in; - struct mlx5_destroy_srq_mbox_out out; + u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0}; + u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0}; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); - in.srqn = cpu_to_be32(srq->srqn); + MLX5_SET(destroy_srq_in, srq_in, opcode, + MLX5_CMD_OP_DESTROY_SRQ); + MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn); - return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), - (u32 *)(&out), sizeof(out)); + return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in), + srq_out, sizeof(srq_out)); } static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm, int is_srq) { - struct mlx5_arm_srq_mbox_in in; - struct mlx5_arm_srq_mbox_out out; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); + /* arm_srq structs missing using identical xrc ones */ + u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0}; + u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0}; - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ); - in.hdr.opmod = cpu_to_be16(!!is_srq); - in.srqn = cpu_to_be32(srq->srqn); - in.lwm = cpu_to_be16(lwm); + MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); + MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn); + MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm); - return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), - sizeof(in), (u32 *)(&out), - sizeof(out)); + return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in), + srq_out, sizeof(srq_out)); } static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_query_srq_mbox_out *out) + struct mlx5_srq_attr *out) { - struct mlx5_query_srq_mbox_in in; + u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0}; + u32 *srq_out; + void *srqc; + int err; - memset(&in, 0, sizeof(in)); + srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out)); + if (!srq_out) + return -ENOMEM; - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ); - in.srqn = cpu_to_be32(srq->srqn); + MLX5_SET(query_srq_in, srq_in, opcode, + MLX5_CMD_OP_QUERY_SRQ); + MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn); + err = mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in), + srq_out, + MLX5_ST_SZ_BYTES(query_srq_out)); + if (err) + goto out; - return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), - (u32 *)out, sizeof(*out)); + srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry); + get_srqc(srqc, out); + if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD) + out->flags |= MLX5_SRQ_FLAG_ERR; +out: + kvfree(srq_out); + return err; } static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_create_srq_mbox_in *in, - int srq_inlen) + struct mlx5_srq_attr *in) { u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)]; void *create_in; - void *srqc; void *xrc_srqc; void *pas; int pas_size; int inlen; int err; - srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry); - pas_size = get_pas_size(srqc); + pas_size = get_pas_size(in); inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size; create_in = mlx5_vzalloc(inlen); if (!create_in) @@ -239,7 +265,8 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, xrc_srq_context_entry); pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas); - memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc)); + set_srqc(xrc_srqc, in); + MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index); memcpy(pas, in->pas, pas_size); MLX5_SET(create_xrc_srq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ); @@ -293,11 +320,10 @@ static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev, static int query_xrc_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_query_srq_mbox_out *out) + struct mlx5_srq_attr *out) { u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)]; u32 *xrcsrq_out; - void *srqc; void *xrc_srqc; int err; @@ -317,8 +343,9 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev, xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out, xrc_srq_context_entry); - srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); - memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc)); + get_srqc(xrc_srqc, out); + if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD) + out->flags |= MLX5_SRQ_FLAG_ERR; out: kvfree(xrcsrq_out); @@ -326,26 +353,27 @@ out: } static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_create_srq_mbox_in *in, int srq_inlen) + struct mlx5_srq_attr *in) { void *create_in; void *rmpc; - void *srqc; + void *wq; int pas_size; int inlen; int err; - srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry); - pas_size = get_pas_size(srqc); + pas_size = get_pas_size(in); inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size; create_in = mlx5_vzalloc(inlen); if (!create_in) return -ENOMEM; rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx); + wq = MLX5_ADDR_OF(rmpc, rmpc, wq); + MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); + set_wq(wq, in); memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size); - rmpc_srqc_reformat(srqc, rmpc, true); err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn); @@ -390,11 +418,10 @@ static int arm_rmp_cmd(struct mlx5_core_dev *dev, } static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_query_srq_mbox_out *out) + struct mlx5_srq_attr *out) { u32 *rmp_out; void *rmpc; - void *srqc; int err; rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out)); @@ -405,9 +432,10 @@ static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, if (err) goto out; - srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context); - rmpc_srqc_reformat(srqc, rmpc, false); + get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out); + if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY) + out->flags |= MLX5_SRQ_FLAG_ERR; out: kvfree(rmp_out); @@ -416,15 +444,14 @@ out: static int create_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_create_srq_mbox_in *in, - int inlen, int is_xrc) + struct mlx5_srq_attr *in) { if (!dev->issi) - return create_srq_cmd(dev, srq, in, inlen); + return create_srq_cmd(dev, srq, in); else if (srq->common.res == MLX5_RES_XSRQ) - return create_xrc_srq_cmd(dev, srq, in, inlen); + return create_xrc_srq_cmd(dev, srq, in); else - return create_rmp_cmd(dev, srq, in, inlen); + return create_rmp_cmd(dev, srq, in); } static int destroy_srq_split(struct mlx5_core_dev *dev, @@ -439,15 +466,17 @@ static int destroy_srq_split(struct mlx5_core_dev *dev, } int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_create_srq_mbox_in *in, int inlen, - int is_xrc) + struct mlx5_srq_attr *in) { int err; struct mlx5_srq_table *table = &dev->priv.srq_table; - srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ; + if (in->type == IB_SRQT_XRC) + srq->common.res = MLX5_RES_XSRQ; + else + srq->common.res = MLX5_RES_SRQ; - err = create_srq_split(dev, srq, in, inlen, is_xrc); + err = create_srq_split(dev, srq, in); if (err) return err; @@ -502,7 +531,7 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) EXPORT_SYMBOL(mlx5_core_destroy_srq); int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_query_srq_mbox_out *out) + struct mlx5_srq_attr *out) { if (!dev->issi) return query_srq_cmd(dev, srq, out); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index 03a5093ff..28274a6fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -85,6 +85,7 @@ int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn) return err; } +EXPORT_SYMBOL(mlx5_core_create_rq); int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen) { @@ -110,6 +111,7 @@ void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn) mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); } +EXPORT_SYMBOL(mlx5_core_destroy_rq); int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out) { @@ -430,6 +432,7 @@ int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, return err; } +EXPORT_SYMBOL(mlx5_core_create_rqt); int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, int inlen) @@ -455,3 +458,4 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn) mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); } +EXPORT_SYMBOL(mlx5_core_destroy_rqt); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 91846dfcb..21365d069 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -135,6 +135,18 @@ static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in, return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out)); } +void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, + u8 *min_inline_mode) +{ + u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0}; + + mlx5_query_nic_vport_context(mdev, 0, out, sizeof(out)); + + *min_inline_mode = MLX5_GET(query_nic_vport_context_out, out, + nic_vport_context.min_wqe_inline_mode); +} +EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline); + int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, u8 *addr) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 9b5ebf84c..d20ae1838 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -7,5 +7,6 @@ obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o mlxsw_switchx2-objs := switchx2.o obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ - spectrum_switchdev.o + spectrum_switchdev.o spectrum_router.o \ + spectrum_kvdl.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index cd63b8263..28271bedd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -105,6 +105,7 @@ enum mlxsw_cmd_opcode { MLXSW_CMD_OPCODE_SW2HW_EQ = 0x013, MLXSW_CMD_OPCODE_HW2SW_EQ = 0x014, MLXSW_CMD_OPCODE_QUERY_EQ = 0x015, + MLXSW_CMD_OPCODE_QUERY_RESOURCES = 0x101, }; static inline const char *mlxsw_cmd_opcode_str(u16 opcode) @@ -144,6 +145,8 @@ static inline const char *mlxsw_cmd_opcode_str(u16 opcode) return "HW2SW_EQ"; case MLXSW_CMD_OPCODE_QUERY_EQ: return "QUERY_EQ"; + case MLXSW_CMD_OPCODE_QUERY_RESOURCES: + return "QUERY_RESOURCES"; default: return "*UNKNOWN*"; } @@ -500,6 +503,35 @@ static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core) return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0); } +/* QUERY_RESOURCES - Query chip resources + * -------------------------------------- + * OpMod == 0 (N/A) , INMmod is index + * ---------------------------------- + * The QUERY_RESOURCES command retrieves information related to chip resources + * by resource ID. Every command returns 32 entries. INmod is being use as base. + * for example, index 1 will return entries 32-63. When the tables end and there + * are no more sources in the table, will return resource id 0xFFF to indicate + * it. + */ +static inline int mlxsw_cmd_query_resources(struct mlxsw_core *mlxsw_core, + char *out_mbox, int index) +{ + return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_RESOURCES, + 0, index, false, out_mbox, + MLXSW_CMD_MBOX_SIZE); +} + +/* cmd_mbox_query_resource_id + * The resource id. 0xFFFF indicates table's end. + */ +MLXSW_ITEM32_INDEXED(cmd_mbox, query_resource, id, 0x00, 16, 16, 0x8, 0, false); + +/* cmd_mbox_query_resource_data + * The resource + */ +MLXSW_ITEM64_INDEXED(cmd_mbox, query_resource, data, + 0x00, 0, 40, 0x8, 0, false); + /* CONFIG_PROFILE (Set) - Configure Switch Profile * ------------------------------ * OpMod == 1 (Set), INMmod == 0 (N/A) @@ -607,6 +639,24 @@ MLXSW_ITEM32(cmd_mbox, config_profile, */ MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1); +/* cmd_mbox_config_set_kvd_linear_size + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_linear_size, 0x0C, 24, 1); + +/* cmd_mbox_config_set_kvd_hash_single_size + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1); + +/* cmd_mbox_config_set_kvd_hash_double_size + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1); + /* cmd_mbox_config_profile_max_vepa_channels * Maximum number of VEPA channels per port (0 through 16) * 0 - multi-channel VEPA is disabled @@ -733,6 +783,31 @@ MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16); */ MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1); +/* cmd_mbox_config_kvd_linear_size + * KVD Linear Size + * Valid for Spectrum only + * Allowed values are 128*N where N=0 or higher + */ +MLXSW_ITEM32(cmd_mbox, config_profile, kvd_linear_size, 0x54, 0, 24); + +/* cmd_mbox_config_kvd_hash_single_size + * KVD Hash single-entries size + * Valid for Spectrum only + * Allowed values are 128*N where N=0 or higher + * Must be greater or equal to cap_min_kvd_hash_single_size + * Must be smaller or equal to cap_kvd_size - kvd_linear_size + */ +MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_single_size, 0x58, 0, 24); + +/* cmd_mbox_config_kvd_hash_double_size + * KVD Hash double-entries size (units of single-size entries) + * Valid for Spectrum only + * Allowed values are 128*N where N=0 or higher + * Must be either 0 or greater or equal to cap_min_kvd_hash_double_size + * Must be smaller or equal to cap_kvd_size - kvd_linear_size + */ +MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_double_size, 0x5C, 0, 24); + /* cmd_mbox_config_profile_swid_config_mask * Modify Switch Partition Configuration mask. When set, the configu- * ration value for the Switch Partition are taken from the mailbox. diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index b0a0b01bb..068ee65a9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -58,6 +58,7 @@ #include #include #include +#include #include "core.h" #include "item.h" @@ -110,6 +111,7 @@ struct mlxsw_core { struct { u8 *mapping; /* lag_id+port_index to local_port mapping */ } lag; + struct mlxsw_resources resources; struct mlxsw_hwmon *hwmon; unsigned long driver_priv[0]; /* driver_priv has to be always the last item */ @@ -447,6 +449,10 @@ static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, if (!skb) return -ENOMEM; + trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, + skb->data + mlxsw_core->driver->txhdr_len, + skb->len - mlxsw_core->driver->txhdr_len); + atomic_set(&trans->active, 1); err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info); if (err) { @@ -529,6 +535,9 @@ static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, struct mlxsw_core *mlxsw_core = priv; struct mlxsw_reg_trans *trans; + trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0, + skb->data, skb->len); + if (!mlxsw_emad_is_resp(skb)) goto free_skb; @@ -1102,7 +1111,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, } } - err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile); + err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, + &mlxsw_core->resources); if (err) goto err_bus_init; @@ -1110,14 +1120,14 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_emad_init; - err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); - if (err) - goto err_hwmon_init; - err = devlink_register(devlink, mlxsw_bus_info->dev); if (err) goto err_devlink_register; + err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); + if (err) + goto err_hwmon_init; + err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info); if (err) goto err_driver_init; @@ -1131,9 +1141,9 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, err_debugfs_init: mlxsw_core->driver->fini(mlxsw_core); err_driver_init: +err_hwmon_init: devlink_unregister(devlink); err_devlink_register: -err_hwmon_init: mlxsw_emad_fini(mlxsw_core); err_emad_init: mlxsw_bus->fini(bus_priv); @@ -1644,6 +1654,12 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); +struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core) +{ + return &mlxsw_core->resources; +} +EXPORT_SYMBOL(mlxsw_core_resources_get); + int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, struct mlxsw_core_port *mlxsw_core_port, u8 local_port, struct net_device *dev, bool split, u32 split_group) @@ -1736,7 +1752,7 @@ static int __init mlxsw_core_module_init(void) { int err; - mlxsw_wq = create_workqueue(mlxsw_core_driver_name); + mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0); if (!mlxsw_wq) return -ENOMEM; mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 436bc49df..d3476ead9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -190,7 +190,8 @@ struct mlxsw_config_profile { used_max_ib_mc:1, used_max_pkey:1, used_ar_sec:1, - used_adaptive_routing_group_cap:1; + used_adaptive_routing_group_cap:1, + used_kvd_sizes:1; u8 max_vepa_channels; u16 max_lag; u16 max_port_per_lag; @@ -211,6 +212,10 @@ struct mlxsw_config_profile { u8 ar_sec; u16 adaptive_routing_group_cap; u8 arn; + u32 kvd_linear_size; + u32 kvd_hash_single_size; + u32 kvd_hash_double_size; + u8 resource_query_enable; struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT]; }; @@ -262,10 +267,18 @@ struct mlxsw_driver { const struct mlxsw_config_profile *profile; }; +struct mlxsw_resources { + u8 max_span_valid:1; + u8 max_span; +}; + +struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core); + struct mlxsw_bus { const char *kind; int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core, - const struct mlxsw_config_profile *profile); + const struct mlxsw_config_profile *profile, + struct mlxsw_resources *resources); void (*fini)(void *bus_priv); bool (*skb_transmit_busy)(void *bus_priv, const struct mlxsw_tx_info *tx_info); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 7f4173c8e..1d1360c17 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1154,6 +1154,61 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci, mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask); } +#define MLXSW_RESOURCES_TABLE_END_ID 0xffff +#define MLXSW_MAX_SPAN_ID 0x2420 +#define MLXSW_RESOURCES_QUERY_MAX_QUERIES 100 +#define MLXSW_RESOURCES_PER_QUERY 32 + +static void mlxsw_pci_resources_query_parse(int id, u64 val, + struct mlxsw_resources *resources) +{ + switch (id) { + case MLXSW_MAX_SPAN_ID: + resources->max_span = val; + resources->max_span_valid = 1; + break; + default: + break; + } +} + +static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox, + struct mlxsw_resources *resources, + u8 query_enabled) +{ + int index, i; + u64 data; + u16 id; + int err; + + /* Not all the versions support resources query */ + if (!query_enabled) + return 0; + + mlxsw_cmd_mbox_zero(mbox); + + for (index = 0; index < MLXSW_RESOURCES_QUERY_MAX_QUERIES; index++) { + err = mlxsw_cmd_query_resources(mlxsw_pci->core, mbox, index); + if (err) + return err; + + for (i = 0; i < MLXSW_RESOURCES_PER_QUERY; i++) { + id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i); + data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i); + + if (id == MLXSW_RESOURCES_TABLE_END_ID) + return 0; + + mlxsw_pci_resources_query_parse(id, data, resources); + } + } + + /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get + * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW. + */ + return -EIO; +} + static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, const struct mlxsw_config_profile *profile) { @@ -1255,6 +1310,20 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set( mbox, profile->adaptive_routing_group_cap); } + if (profile->used_kvd_sizes) { + mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_kvd_linear_size_set( + mbox, profile->kvd_linear_size); + mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set( + mbox, profile->kvd_hash_single_size); + mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set( + mbox, profile->kvd_hash_double_size); + } for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++) mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i, @@ -1390,7 +1459,8 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci, } static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, - const struct mlxsw_config_profile *profile) + const struct mlxsw_config_profile *profile, + struct mlxsw_resources *resources) { struct mlxsw_pci *mlxsw_pci = bus_priv; struct pci_dev *pdev = mlxsw_pci->pdev; @@ -1449,6 +1519,11 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_boardinfo; + err = mlxsw_pci_resources_query(mlxsw_pci, mbox, resources, + profile->resource_query_enable); + if (err) + goto err_query_resources; + err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile); if (err) goto err_config_profile; @@ -1471,6 +1546,7 @@ err_request_eq_irq: mlxsw_pci_aqs_fini(mlxsw_pci); err_aqs_init: err_config_profile: +err_query_resources: err_boardinfo: mlxsw_pci_fw_area_fini(mlxsw_pci); err_fw_area_init: diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h index f33b997f2..af371a82c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/port.h +++ b/drivers/net/ethernet/mellanox/mlxsw/port.h @@ -56,6 +56,7 @@ #define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1) #define MLXSW_PORT_CPU_PORT 0x0 +#define MLXSW_PORT_ROUTER_PORT (MLXSW_PORT_MAX_PHY_PORTS + 2) #define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 57d48da70..1721098ee 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1,9 +1,10 @@ /* * drivers/net/ethernet/mellanox/mlxsw/reg.h * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Ido Schimmel + * Copyright (c) 2015-2016 Ido Schimmel * Copyright (c) 2015 Elad Raz - * Copyright (c) 2015 Jiri Pirko + * Copyright (c) 2015-2016 Jiri Pirko + * Copyright (c) 2016 Yotam Gigi * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -386,7 +387,9 @@ enum mlxsw_reg_sfd_rec_action { /* forward and trap, trap_id is FDB_TRAP */ MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1, /* trap and do not forward, trap_id is FDB_TRAP */ - MLXSW_REG_SFD_REC_ACTION_TRAP = 3, + MLXSW_REG_SFD_REC_ACTION_TRAP = 2, + /* forward to IP router */ + MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER = 3, MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15, }; @@ -2500,6 +2503,7 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2); enum mlxsw_reg_ppcnt_grp { MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0, MLXSW_REG_PPCNT_PRIO_CNT = 0x10, + MLXSW_REG_PPCNT_TC_CNT = 0x11, }; /* reg_ppcnt_grp @@ -2700,6 +2704,23 @@ MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, 0x08 + 0x68, 0, 64); */ MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64); +/* Ethernet Per Traffic Group Counters */ + +/* reg_ppcnt_tc_transmit_queue + * Contains the transmit queue depth in cells of traffic class + * selected by prio_tc and the port selected by local_port. + * The field cannot be cleared. + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, tc_transmit_queue, 0x08 + 0x00, 0, 64); + +/* reg_ppcnt_tc_no_buffer_discard_uc + * The number of unicast packets dropped due to lack of shared + * buffer resources. + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc, 0x08 + 0x08, 0, 64); + static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port, enum mlxsw_reg_ppcnt_grp grp, u8 prio_tc) @@ -3201,6 +3222,1194 @@ static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id) mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT); } +/* RGCR - Router General Configuration Register + * -------------------------------------------- + * The register is used for setting up the router configuration. + */ +#define MLXSW_REG_RGCR_ID 0x8001 +#define MLXSW_REG_RGCR_LEN 0x28 + +static const struct mlxsw_reg_info mlxsw_reg_rgcr = { + .id = MLXSW_REG_RGCR_ID, + .len = MLXSW_REG_RGCR_LEN, +}; + +/* reg_rgcr_ipv4_en + * IPv4 router enable. + * Access: RW + */ +MLXSW_ITEM32(reg, rgcr, ipv4_en, 0x00, 31, 1); + +/* reg_rgcr_ipv6_en + * IPv6 router enable. + * Access: RW + */ +MLXSW_ITEM32(reg, rgcr, ipv6_en, 0x00, 30, 1); + +/* reg_rgcr_max_router_interfaces + * Defines the maximum number of active router interfaces for all virtual + * routers. + * Access: RW + */ +MLXSW_ITEM32(reg, rgcr, max_router_interfaces, 0x10, 0, 16); + +/* reg_rgcr_usp + * Update switch priority and packet color. + * 0 - Preserve the value of Switch Priority and packet color. + * 1 - Recalculate the value of Switch Priority and packet color. + * Access: RW + * + * Note: Not supported by SwitchX and SwitchX-2. + */ +MLXSW_ITEM32(reg, rgcr, usp, 0x18, 20, 1); + +/* reg_rgcr_pcp_rw + * Indicates how to handle the pcp_rewrite_en value: + * 0 - Preserve the value of pcp_rewrite_en. + * 2 - Disable PCP rewrite. + * 3 - Enable PCP rewrite. + * Access: RW + * + * Note: Not supported by SwitchX and SwitchX-2. + */ +MLXSW_ITEM32(reg, rgcr, pcp_rw, 0x18, 16, 2); + +/* reg_rgcr_activity_dis + * Activity disable: + * 0 - Activity will be set when an entry is hit (default). + * 1 - Activity will not be set when an entry is hit. + * + * Bit 0 - Disable activity bit in Router Algorithmic LPM Unicast Entry + * (RALUE). + * Bit 1 - Disable activity bit in Router Algorithmic LPM Unicast Host + * Entry (RAUHT). + * Bits 2:7 are reserved. + * Access: RW + * + * Note: Not supported by SwitchX, SwitchX-2 and Switch-IB. + */ +MLXSW_ITEM32(reg, rgcr, activity_dis, 0x20, 0, 8); + +static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en) +{ + MLXSW_REG_ZERO(rgcr, payload); + mlxsw_reg_rgcr_ipv4_en_set(payload, ipv4_en); +} + +/* RITR - Router Interface Table Register + * -------------------------------------- + * The register is used to configure the router interface table. + */ +#define MLXSW_REG_RITR_ID 0x8002 +#define MLXSW_REG_RITR_LEN 0x40 + +static const struct mlxsw_reg_info mlxsw_reg_ritr = { + .id = MLXSW_REG_RITR_ID, + .len = MLXSW_REG_RITR_LEN, +}; + +/* reg_ritr_enable + * Enables routing on the router interface. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, enable, 0x00, 31, 1); + +/* reg_ritr_ipv4 + * IPv4 routing enable. Enables routing of IPv4 traffic on the router + * interface. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ipv4, 0x00, 29, 1); + +/* reg_ritr_ipv6 + * IPv6 routing enable. Enables routing of IPv6 traffic on the router + * interface. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1); + +enum mlxsw_reg_ritr_if_type { + MLXSW_REG_RITR_VLAN_IF, + MLXSW_REG_RITR_FID_IF, + MLXSW_REG_RITR_SP_IF, +}; + +/* reg_ritr_type + * Router interface type. + * 0 - VLAN interface. + * 1 - FID interface. + * 2 - Sub-port interface. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, type, 0x00, 23, 3); + +enum { + MLXSW_REG_RITR_RIF_CREATE, + MLXSW_REG_RITR_RIF_DEL, +}; + +/* reg_ritr_op + * Opcode: + * 0 - Create or edit RIF. + * 1 - Delete RIF. + * Reserved for SwitchX-2. For Spectrum, editing of interface properties + * is not supported. An interface must be deleted and re-created in order + * to update properties. + * Access: WO + */ +MLXSW_ITEM32(reg, ritr, op, 0x00, 20, 2); + +/* reg_ritr_rif + * Router interface index. A pointer to the Router Interface Table. + * Access: Index + */ +MLXSW_ITEM32(reg, ritr, rif, 0x00, 0, 16); + +/* reg_ritr_ipv4_fe + * IPv4 Forwarding Enable. + * Enables routing of IPv4 traffic on the router interface. When disabled, + * forwarding is blocked but local traffic (traps and IP2ME) will be enabled. + * Not supported in SwitchX-2. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1); + +/* reg_ritr_ipv6_fe + * IPv6 Forwarding Enable. + * Enables routing of IPv6 traffic on the router interface. When disabled, + * forwarding is blocked but local traffic (traps and IP2ME) will be enabled. + * Not supported in SwitchX-2. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); + +/* reg_ritr_lb_en + * Loop-back filter enable for unicast packets. + * If the flag is set then loop-back filter for unicast packets is + * implemented on the RIF. Multicast packets are always subject to + * loop-back filtering. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1); + +/* reg_ritr_virtual_router + * Virtual router ID associated with the router interface. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, virtual_router, 0x04, 0, 16); + +/* reg_ritr_mtu + * Router interface MTU. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, mtu, 0x34, 0, 16); + +/* reg_ritr_if_swid + * Switch partition ID. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8); + +/* reg_ritr_if_mac + * Router interface MAC address. + * In Spectrum, all MAC addresses must have the same 38 MSBits. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6); + +/* VLAN Interface */ + +/* reg_ritr_vlan_if_vid + * VLAN ID. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, vlan_if_vid, 0x08, 0, 12); + +/* FID Interface */ + +/* reg_ritr_fid_if_fid + * Filtering ID. Used to connect a bridge to the router. Only FIDs from + * the vFID range are supported. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, fid_if_fid, 0x08, 0, 16); + +static inline void mlxsw_reg_ritr_fid_set(char *payload, + enum mlxsw_reg_ritr_if_type rif_type, + u16 fid) +{ + if (rif_type == MLXSW_REG_RITR_FID_IF) + mlxsw_reg_ritr_fid_if_fid_set(payload, fid); + else + mlxsw_reg_ritr_vlan_if_vid_set(payload, fid); +} + +/* Sub-port Interface */ + +/* reg_ritr_sp_if_lag + * LAG indication. When this bit is set the system_port field holds the + * LAG identifier. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, sp_if_lag, 0x08, 24, 1); + +/* reg_ritr_sp_system_port + * Port unique indentifier. When lag bit is set, this field holds the + * lag_id in bits 0:9. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16); + +/* reg_ritr_sp_if_vid + * VLAN ID. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, sp_if_vid, 0x18, 0, 12); + +static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif) +{ + MLXSW_REG_ZERO(ritr, payload); + mlxsw_reg_ritr_rif_set(payload, rif); +} + +static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag, + u16 system_port, u16 vid) +{ + mlxsw_reg_ritr_sp_if_lag_set(payload, lag); + mlxsw_reg_ritr_sp_if_system_port_set(payload, system_port); + mlxsw_reg_ritr_sp_if_vid_set(payload, vid); +} + +static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, + enum mlxsw_reg_ritr_if_type type, + u16 rif, u16 mtu, const char *mac) +{ + bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL; + + MLXSW_REG_ZERO(ritr, payload); + mlxsw_reg_ritr_enable_set(payload, enable); + mlxsw_reg_ritr_ipv4_set(payload, 1); + mlxsw_reg_ritr_type_set(payload, type); + mlxsw_reg_ritr_op_set(payload, op); + mlxsw_reg_ritr_rif_set(payload, rif); + mlxsw_reg_ritr_ipv4_fe_set(payload, 1); + mlxsw_reg_ritr_lb_en_set(payload, 1); + mlxsw_reg_ritr_mtu_set(payload, mtu); + mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); +} + +/* RATR - Router Adjacency Table Register + * -------------------------------------- + * The RATR register is used to configure the Router Adjacency (next-hop) + * Table. + */ +#define MLXSW_REG_RATR_ID 0x8008 +#define MLXSW_REG_RATR_LEN 0x2C + +static const struct mlxsw_reg_info mlxsw_reg_ratr = { + .id = MLXSW_REG_RATR_ID, + .len = MLXSW_REG_RATR_LEN, +}; + +enum mlxsw_reg_ratr_op { + /* Read */ + MLXSW_REG_RATR_OP_QUERY_READ = 0, + /* Read and clear activity */ + MLXSW_REG_RATR_OP_QUERY_READ_CLEAR = 2, + /* Write Adjacency entry */ + MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY = 1, + /* Write Adjacency entry only if the activity is cleared. + * The write may not succeed if the activity is set. There is not + * direct feedback if the write has succeeded or not, however + * the get will reveal the actual entry (SW can compare the get + * response to the set command). + */ + MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY = 3, +}; + +/* reg_ratr_op + * Note that Write operation may also be used for updating + * counter_set_type and counter_index. In this case all other + * fields must not be updated. + * Access: OP + */ +MLXSW_ITEM32(reg, ratr, op, 0x00, 28, 4); + +/* reg_ratr_v + * Valid bit. Indicates if the adjacency entry is valid. + * Note: the device may need some time before reusing an invalidated + * entry. During this time the entry can not be reused. It is + * recommended to use another entry before reusing an invalidated + * entry (e.g. software can put it at the end of the list for + * reusing). Trying to access an invalidated entry not yet cleared + * by the device results with failure indicating "Try Again" status. + * When valid is '0' then egress_router_interface,trap_action, + * adjacency_parameters and counters are reserved + * Access: RW + */ +MLXSW_ITEM32(reg, ratr, v, 0x00, 24, 1); + +/* reg_ratr_a + * Activity. Set for new entries. Set if a packet lookup has hit on + * the specific entry. To clear the a bit, use "clear activity". + * Access: RO + */ +MLXSW_ITEM32(reg, ratr, a, 0x00, 16, 1); + +/* reg_ratr_adjacency_index_low + * Bits 15:0 of index into the adjacency table. + * For SwitchX and SwitchX-2, the adjacency table is linear and + * used for adjacency entries only. + * For Spectrum, the index is to the KVD linear. + * Access: Index + */ +MLXSW_ITEM32(reg, ratr, adjacency_index_low, 0x04, 0, 16); + +/* reg_ratr_egress_router_interface + * Range is 0 .. cap_max_router_interfaces - 1 + * Access: RW + */ +MLXSW_ITEM32(reg, ratr, egress_router_interface, 0x08, 0, 16); + +enum mlxsw_reg_ratr_trap_action { + MLXSW_REG_RATR_TRAP_ACTION_NOP, + MLXSW_REG_RATR_TRAP_ACTION_TRAP, + MLXSW_REG_RATR_TRAP_ACTION_MIRROR_TO_CPU, + MLXSW_REG_RATR_TRAP_ACTION_MIRROR, + MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS, +}; + +/* reg_ratr_trap_action + * see mlxsw_reg_ratr_trap_action + * Access: RW + */ +MLXSW_ITEM32(reg, ratr, trap_action, 0x0C, 28, 4); + +enum mlxsw_reg_ratr_trap_id { + MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS0 = 0, + MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS1 = 1, +}; + +/* reg_ratr_adjacency_index_high + * Bits 23:16 of the adjacency_index. + * Access: Index + */ +MLXSW_ITEM32(reg, ratr, adjacency_index_high, 0x0C, 16, 8); + +/* reg_ratr_trap_id + * Trap ID to be reported to CPU. + * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1. + * For trap_action of NOP, MIRROR and DISCARD_ERROR + * Access: RW + */ +MLXSW_ITEM32(reg, ratr, trap_id, 0x0C, 0, 8); + +/* reg_ratr_eth_destination_mac + * MAC address of the destination next-hop. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ratr, eth_destination_mac, 0x12, 6); + +static inline void +mlxsw_reg_ratr_pack(char *payload, + enum mlxsw_reg_ratr_op op, bool valid, + u32 adjacency_index, u16 egress_rif) +{ + MLXSW_REG_ZERO(ratr, payload); + mlxsw_reg_ratr_op_set(payload, op); + mlxsw_reg_ratr_v_set(payload, valid); + mlxsw_reg_ratr_adjacency_index_low_set(payload, adjacency_index); + mlxsw_reg_ratr_adjacency_index_high_set(payload, adjacency_index >> 16); + mlxsw_reg_ratr_egress_router_interface_set(payload, egress_rif); +} + +static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload, + const char *dest_mac) +{ + mlxsw_reg_ratr_eth_destination_mac_memcpy_to(payload, dest_mac); +} + +/* RALTA - Router Algorithmic LPM Tree Allocation Register + * ------------------------------------------------------- + * RALTA is used to allocate the LPM trees of the SHSPM method. + */ +#define MLXSW_REG_RALTA_ID 0x8010 +#define MLXSW_REG_RALTA_LEN 0x04 + +static const struct mlxsw_reg_info mlxsw_reg_ralta = { + .id = MLXSW_REG_RALTA_ID, + .len = MLXSW_REG_RALTA_LEN, +}; + +/* reg_ralta_op + * opcode (valid for Write, must be 0 on Read) + * 0 - allocate a tree + * 1 - deallocate a tree + * Access: OP + */ +MLXSW_ITEM32(reg, ralta, op, 0x00, 28, 2); + +enum mlxsw_reg_ralxx_protocol { + MLXSW_REG_RALXX_PROTOCOL_IPV4, + MLXSW_REG_RALXX_PROTOCOL_IPV6, +}; + +/* reg_ralta_protocol + * Protocol. + * Deallocation opcode: Reserved. + * Access: RW + */ +MLXSW_ITEM32(reg, ralta, protocol, 0x00, 24, 4); + +/* reg_ralta_tree_id + * An identifier (numbered from 1..cap_shspm_max_trees-1) representing + * the tree identifier (managed by software). + * Note that tree_id 0 is allocated for a default-route tree. + * Access: Index + */ +MLXSW_ITEM32(reg, ralta, tree_id, 0x00, 0, 8); + +static inline void mlxsw_reg_ralta_pack(char *payload, bool alloc, + enum mlxsw_reg_ralxx_protocol protocol, + u8 tree_id) +{ + MLXSW_REG_ZERO(ralta, payload); + mlxsw_reg_ralta_op_set(payload, !alloc); + mlxsw_reg_ralta_protocol_set(payload, protocol); + mlxsw_reg_ralta_tree_id_set(payload, tree_id); +} + +/* RALST - Router Algorithmic LPM Structure Tree Register + * ------------------------------------------------------ + * RALST is used to set and query the structure of an LPM tree. + * The structure of the tree must be sorted as a sorted binary tree, while + * each node is a bin that is tagged as the length of the prefixes the lookup + * will refer to. Therefore, bin X refers to a set of entries with prefixes + * of X bits to match with the destination address. The bin 0 indicates + * the default action, when there is no match of any prefix. + */ +#define MLXSW_REG_RALST_ID 0x8011 +#define MLXSW_REG_RALST_LEN 0x104 + +static const struct mlxsw_reg_info mlxsw_reg_ralst = { + .id = MLXSW_REG_RALST_ID, + .len = MLXSW_REG_RALST_LEN, +}; + +/* reg_ralst_root_bin + * The bin number of the root bin. + * 064 the entry consumes + * two entries in the physical HW table. + * Access: Index + */ +MLXSW_ITEM32(reg, ralue, prefix_len, 0x08, 0, 8); + +/* reg_ralue_dip* + * The prefix of the route or of the marker that the object of the LPM + * is compared with. The most significant bits of the dip are the prefix. + * The list significant bits must be '0' if the prefix_len is smaller + * than 128 for IPv6 or smaller than 32 for IPv4. + * IPv4 address uses bits dip[31:0] and bits dip[127:32] are reserved. + * Access: Index + */ +MLXSW_ITEM32(reg, ralue, dip4, 0x18, 0, 32); + +enum mlxsw_reg_ralue_entry_type { + MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_ENTRY = 1, + MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY = 2, + MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_AND_ROUTE_ENTRY = 3, +}; + +/* reg_ralue_entry_type + * Entry type. + * Note - for Marker entries, the action_type and action fields are reserved. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, entry_type, 0x1C, 30, 2); + +/* reg_ralue_bmp_len + * The best match prefix length in the case that there is no match for + * longer prefixes. + * If (entry_type != MARKER_ENTRY), bmp_len must be equal to prefix_len + * Note for any update operation with entry_type modification this + * field must be set. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, bmp_len, 0x1C, 16, 8); + +enum mlxsw_reg_ralue_action_type { + MLXSW_REG_RALUE_ACTION_TYPE_REMOTE, + MLXSW_REG_RALUE_ACTION_TYPE_LOCAL, + MLXSW_REG_RALUE_ACTION_TYPE_IP2ME, +}; + +/* reg_ralue_action_type + * Action Type + * Indicates how the IP address is connected. + * It can be connected to a local subnet through local_erif or can be + * on a remote subnet connected through a next-hop router, + * or transmitted to the CPU. + * Reserved when entry_type = MARKER_ENTRY + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, action_type, 0x1C, 0, 2); + +enum mlxsw_reg_ralue_trap_action { + MLXSW_REG_RALUE_TRAP_ACTION_NOP, + MLXSW_REG_RALUE_TRAP_ACTION_TRAP, + MLXSW_REG_RALUE_TRAP_ACTION_MIRROR_TO_CPU, + MLXSW_REG_RALUE_TRAP_ACTION_MIRROR, + MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR, +}; + +/* reg_ralue_trap_action + * Trap action. + * For IP2ME action, only NOP and MIRROR are possible. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, trap_action, 0x20, 28, 4); + +/* reg_ralue_trap_id + * Trap ID to be reported to CPU. + * Trap ID is RTR_INGRESS0 or RTR_INGRESS1. + * For trap_action of NOP, MIRROR and DISCARD_ERROR, trap_id is reserved. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, trap_id, 0x20, 0, 9); + +/* reg_ralue_adjacency_index + * Points to the first entry of the group-based ECMP. + * Only relevant in case of REMOTE action. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, adjacency_index, 0x24, 0, 24); + +/* reg_ralue_ecmp_size + * Amount of sequential entries starting + * from the adjacency_index (the number of ECMPs). + * The valid range is 1-64, 512, 1024, 2048 and 4096. + * Reserved when trap_action is TRAP or DISCARD_ERROR. + * Only relevant in case of REMOTE action. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, ecmp_size, 0x28, 0, 13); + +/* reg_ralue_local_erif + * Egress Router Interface. + * Only relevant in case of LOCAL action. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16); + +/* reg_ralue_v + * Valid bit for the tunnel_ptr field. + * If valid = 0 then trap to CPU as IP2ME trap ID. + * If valid = 1 and the packet format allows NVE or IPinIP tunnel + * decapsulation then tunnel decapsulation is done. + * If valid = 1 and packet format does not allow NVE or IPinIP tunnel + * decapsulation then trap as IP2ME trap ID. + * Only relevant in case of IP2ME action. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, v, 0x24, 31, 1); + +/* reg_ralue_tunnel_ptr + * Tunnel Pointer for NVE or IPinIP tunnel decapsulation. + * For Spectrum, pointer to KVD Linear. + * Only relevant in case of IP2ME action. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, tunnel_ptr, 0x24, 0, 24); + +static inline void mlxsw_reg_ralue_pack(char *payload, + enum mlxsw_reg_ralxx_protocol protocol, + enum mlxsw_reg_ralue_op op, + u16 virtual_router, u8 prefix_len) +{ + MLXSW_REG_ZERO(ralue, payload); + mlxsw_reg_ralue_protocol_set(payload, protocol); + mlxsw_reg_ralue_op_set(payload, op); + mlxsw_reg_ralue_virtual_router_set(payload, virtual_router); + mlxsw_reg_ralue_prefix_len_set(payload, prefix_len); + mlxsw_reg_ralue_entry_type_set(payload, + MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY); + mlxsw_reg_ralue_bmp_len_set(payload, prefix_len); +} + +static inline void mlxsw_reg_ralue_pack4(char *payload, + enum mlxsw_reg_ralxx_protocol protocol, + enum mlxsw_reg_ralue_op op, + u16 virtual_router, u8 prefix_len, + u32 dip) +{ + mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len); + mlxsw_reg_ralue_dip4_set(payload, dip); +} + +static inline void +mlxsw_reg_ralue_act_remote_pack(char *payload, + enum mlxsw_reg_ralue_trap_action trap_action, + u16 trap_id, u32 adjacency_index, u16 ecmp_size) +{ + mlxsw_reg_ralue_action_type_set(payload, + MLXSW_REG_RALUE_ACTION_TYPE_REMOTE); + mlxsw_reg_ralue_trap_action_set(payload, trap_action); + mlxsw_reg_ralue_trap_id_set(payload, trap_id); + mlxsw_reg_ralue_adjacency_index_set(payload, adjacency_index); + mlxsw_reg_ralue_ecmp_size_set(payload, ecmp_size); +} + +static inline void +mlxsw_reg_ralue_act_local_pack(char *payload, + enum mlxsw_reg_ralue_trap_action trap_action, + u16 trap_id, u16 local_erif) +{ + mlxsw_reg_ralue_action_type_set(payload, + MLXSW_REG_RALUE_ACTION_TYPE_LOCAL); + mlxsw_reg_ralue_trap_action_set(payload, trap_action); + mlxsw_reg_ralue_trap_id_set(payload, trap_id); + mlxsw_reg_ralue_local_erif_set(payload, local_erif); +} + +static inline void +mlxsw_reg_ralue_act_ip2me_pack(char *payload) +{ + mlxsw_reg_ralue_action_type_set(payload, + MLXSW_REG_RALUE_ACTION_TYPE_IP2ME); +} + +/* RAUHT - Router Algorithmic LPM Unicast Host Table Register + * ---------------------------------------------------------- + * The RAUHT register is used to configure and query the Unicast Host table in + * devices that implement the Algorithmic LPM. + */ +#define MLXSW_REG_RAUHT_ID 0x8014 +#define MLXSW_REG_RAUHT_LEN 0x74 + +static const struct mlxsw_reg_info mlxsw_reg_rauht = { + .id = MLXSW_REG_RAUHT_ID, + .len = MLXSW_REG_RAUHT_LEN, +}; + +enum mlxsw_reg_rauht_type { + MLXSW_REG_RAUHT_TYPE_IPV4, + MLXSW_REG_RAUHT_TYPE_IPV6, +}; + +/* reg_rauht_type + * Access: Index + */ +MLXSW_ITEM32(reg, rauht, type, 0x00, 24, 2); + +enum mlxsw_reg_rauht_op { + MLXSW_REG_RAUHT_OP_QUERY_READ = 0, + /* Read operation */ + MLXSW_REG_RAUHT_OP_QUERY_CLEAR_ON_READ = 1, + /* Clear on read operation. Used to read entry and clear + * activity bit. + */ + MLXSW_REG_RAUHT_OP_WRITE_ADD = 0, + /* Add. Used to write a new entry to the table. All R/W fields are + * relevant for new entry. Activity bit is set for new entries. + */ + MLXSW_REG_RAUHT_OP_WRITE_UPDATE = 1, + /* Update action. Used to update an existing route entry and + * only update the following fields: + * trap_action, trap_id, mac, counter_set_type, counter_index + */ + MLXSW_REG_RAUHT_OP_WRITE_CLEAR_ACTIVITY = 2, + /* Clear activity. A bit is cleared for the entry. */ + MLXSW_REG_RAUHT_OP_WRITE_DELETE = 3, + /* Delete entry */ + MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL = 4, + /* Delete all host entries on a RIF. In this command, dip + * field is reserved. + */ +}; + +/* reg_rauht_op + * Access: OP + */ +MLXSW_ITEM32(reg, rauht, op, 0x00, 20, 3); + +/* reg_rauht_a + * Activity. Set for new entries. Set if a packet lookup has hit on + * the specific entry. + * To clear the a bit, use "clear activity" op. + * Enabled by activity_dis in RGCR + * Access: RO + */ +MLXSW_ITEM32(reg, rauht, a, 0x00, 16, 1); + +/* reg_rauht_rif + * Router Interface + * Access: Index + */ +MLXSW_ITEM32(reg, rauht, rif, 0x00, 0, 16); + +/* reg_rauht_dip* + * Destination address. + * Access: Index + */ +MLXSW_ITEM32(reg, rauht, dip4, 0x1C, 0x0, 32); + +enum mlxsw_reg_rauht_trap_action { + MLXSW_REG_RAUHT_TRAP_ACTION_NOP, + MLXSW_REG_RAUHT_TRAP_ACTION_TRAP, + MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR_TO_CPU, + MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR, + MLXSW_REG_RAUHT_TRAP_ACTION_DISCARD_ERRORS, +}; + +/* reg_rauht_trap_action + * Access: RW + */ +MLXSW_ITEM32(reg, rauht, trap_action, 0x60, 28, 4); + +enum mlxsw_reg_rauht_trap_id { + MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS0, + MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS1, +}; + +/* reg_rauht_trap_id + * Trap ID to be reported to CPU. + * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1. + * For trap_action of NOP, MIRROR and DISCARD_ERROR, + * trap_id is reserved. + * Access: RW + */ +MLXSW_ITEM32(reg, rauht, trap_id, 0x60, 0, 9); + +/* reg_rauht_counter_set_type + * Counter set type for flow counters + * Access: RW + */ +MLXSW_ITEM32(reg, rauht, counter_set_type, 0x68, 24, 8); + +/* reg_rauht_counter_index + * Counter index for flow counters + * Access: RW + */ +MLXSW_ITEM32(reg, rauht, counter_index, 0x68, 0, 24); + +/* reg_rauht_mac + * MAC address. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, rauht, mac, 0x6E, 6); + +static inline void mlxsw_reg_rauht_pack(char *payload, + enum mlxsw_reg_rauht_op op, u16 rif, + const char *mac) +{ + MLXSW_REG_ZERO(rauht, payload); + mlxsw_reg_rauht_op_set(payload, op); + mlxsw_reg_rauht_rif_set(payload, rif); + mlxsw_reg_rauht_mac_memcpy_to(payload, mac); +} + +static inline void mlxsw_reg_rauht_pack4(char *payload, + enum mlxsw_reg_rauht_op op, u16 rif, + const char *mac, u32 dip) +{ + mlxsw_reg_rauht_pack(payload, op, rif, mac); + mlxsw_reg_rauht_dip4_set(payload, dip); +} + +/* RALEU - Router Algorithmic LPM ECMP Update Register + * --------------------------------------------------- + * The register enables updating the ECMP section in the action for multiple + * LPM Unicast entries in a single operation. The update is executed to + * all entries of a {virtual router, protocol} tuple using the same ECMP group. + */ +#define MLXSW_REG_RALEU_ID 0x8015 +#define MLXSW_REG_RALEU_LEN 0x28 + +static const struct mlxsw_reg_info mlxsw_reg_raleu = { + .id = MLXSW_REG_RALEU_ID, + .len = MLXSW_REG_RALEU_LEN, +}; + +/* reg_raleu_protocol + * Protocol. + * Access: Index + */ +MLXSW_ITEM32(reg, raleu, protocol, 0x00, 24, 4); + +/* reg_raleu_virtual_router + * Virtual Router ID + * Range is 0..cap_max_virtual_routers-1 + * Access: Index + */ +MLXSW_ITEM32(reg, raleu, virtual_router, 0x00, 0, 16); + +/* reg_raleu_adjacency_index + * Adjacency Index used for matching on the existing entries. + * Access: Index + */ +MLXSW_ITEM32(reg, raleu, adjacency_index, 0x10, 0, 24); + +/* reg_raleu_ecmp_size + * ECMP Size used for matching on the existing entries. + * Access: Index + */ +MLXSW_ITEM32(reg, raleu, ecmp_size, 0x14, 0, 13); + +/* reg_raleu_new_adjacency_index + * New Adjacency Index. + * Access: WO + */ +MLXSW_ITEM32(reg, raleu, new_adjacency_index, 0x20, 0, 24); + +/* reg_raleu_new_ecmp_size + * New ECMP Size. + * Access: WO + */ +MLXSW_ITEM32(reg, raleu, new_ecmp_size, 0x24, 0, 13); + +static inline void mlxsw_reg_raleu_pack(char *payload, + enum mlxsw_reg_ralxx_protocol protocol, + u16 virtual_router, + u32 adjacency_index, u16 ecmp_size, + u32 new_adjacency_index, + u16 new_ecmp_size) +{ + MLXSW_REG_ZERO(raleu, payload); + mlxsw_reg_raleu_protocol_set(payload, protocol); + mlxsw_reg_raleu_virtual_router_set(payload, virtual_router); + mlxsw_reg_raleu_adjacency_index_set(payload, adjacency_index); + mlxsw_reg_raleu_ecmp_size_set(payload, ecmp_size); + mlxsw_reg_raleu_new_adjacency_index_set(payload, new_adjacency_index); + mlxsw_reg_raleu_new_ecmp_size_set(payload, new_ecmp_size); +} + +/* RAUHTD - Router Algorithmic LPM Unicast Host Table Dump Register + * ---------------------------------------------------------------- + * The RAUHTD register allows dumping entries from the Router Unicast Host + * Table. For a given session an entry is dumped no more than one time. The + * first RAUHTD access after reset is a new session. A session ends when the + * num_rec response is smaller than num_rec request or for IPv4 when the + * num_entries is smaller than 4. The clear activity affect the current session + * or the last session if a new session has not started. + */ +#define MLXSW_REG_RAUHTD_ID 0x8018 +#define MLXSW_REG_RAUHTD_BASE_LEN 0x20 +#define MLXSW_REG_RAUHTD_REC_LEN 0x20 +#define MLXSW_REG_RAUHTD_REC_MAX_NUM 32 +#define MLXSW_REG_RAUHTD_LEN (MLXSW_REG_RAUHTD_BASE_LEN + \ + MLXSW_REG_RAUHTD_REC_MAX_NUM * MLXSW_REG_RAUHTD_REC_LEN) +#define MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC 4 + +static const struct mlxsw_reg_info mlxsw_reg_rauhtd = { + .id = MLXSW_REG_RAUHTD_ID, + .len = MLXSW_REG_RAUHTD_LEN, +}; + +#define MLXSW_REG_RAUHTD_FILTER_A BIT(0) +#define MLXSW_REG_RAUHTD_FILTER_RIF BIT(3) + +/* reg_rauhtd_filter_fields + * if a bit is '0' then the relevant field is ignored and dump is done + * regardless of the field value + * Bit0 - filter by activity: entry_a + * Bit3 - filter by entry rip: entry_rif + * Access: Index + */ +MLXSW_ITEM32(reg, rauhtd, filter_fields, 0x00, 0, 8); + +enum mlxsw_reg_rauhtd_op { + MLXSW_REG_RAUHTD_OP_DUMP, + MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR, +}; + +/* reg_rauhtd_op + * Access: OP + */ +MLXSW_ITEM32(reg, rauhtd, op, 0x04, 24, 2); + +/* reg_rauhtd_num_rec + * At request: number of records requested + * At response: number of records dumped + * For IPv4, each record has 4 entries at request and up to 4 entries + * at response + * Range is 0..MLXSW_REG_RAUHTD_REC_MAX_NUM + * Access: Index + */ +MLXSW_ITEM32(reg, rauhtd, num_rec, 0x04, 0, 8); + +/* reg_rauhtd_entry_a + * Dump only if activity has value of entry_a + * Reserved if filter_fields bit0 is '0' + * Access: Index + */ +MLXSW_ITEM32(reg, rauhtd, entry_a, 0x08, 16, 1); + +enum mlxsw_reg_rauhtd_type { + MLXSW_REG_RAUHTD_TYPE_IPV4, + MLXSW_REG_RAUHTD_TYPE_IPV6, +}; + +/* reg_rauhtd_type + * Dump only if record type is: + * 0 - IPv4 + * 1 - IPv6 + * Access: Index + */ +MLXSW_ITEM32(reg, rauhtd, type, 0x08, 0, 4); + +/* reg_rauhtd_entry_rif + * Dump only if RIF has value of entry_rif + * Reserved if filter_fields bit3 is '0' + * Access: Index + */ +MLXSW_ITEM32(reg, rauhtd, entry_rif, 0x0C, 0, 16); + +static inline void mlxsw_reg_rauhtd_pack(char *payload, + enum mlxsw_reg_rauhtd_type type) +{ + MLXSW_REG_ZERO(rauhtd, payload); + mlxsw_reg_rauhtd_filter_fields_set(payload, MLXSW_REG_RAUHTD_FILTER_A); + mlxsw_reg_rauhtd_op_set(payload, MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR); + mlxsw_reg_rauhtd_num_rec_set(payload, MLXSW_REG_RAUHTD_REC_MAX_NUM); + mlxsw_reg_rauhtd_entry_a_set(payload, 1); + mlxsw_reg_rauhtd_type_set(payload, type); +} + +/* reg_rauhtd_ipv4_rec_num_entries + * Number of valid entries in this record: + * 0 - 1 valid entry + * 1 - 2 valid entries + * 2 - 3 valid entries + * 3 - 4 valid entries + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_rec_num_entries, + MLXSW_REG_RAUHTD_BASE_LEN, 28, 2, + MLXSW_REG_RAUHTD_REC_LEN, 0x00, false); + +/* reg_rauhtd_rec_type + * Record type. + * 0 - IPv4 + * 1 - IPv6 + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, rauhtd, rec_type, MLXSW_REG_RAUHTD_BASE_LEN, 24, 2, + MLXSW_REG_RAUHTD_REC_LEN, 0x00, false); + +#define MLXSW_REG_RAUHTD_IPV4_ENT_LEN 0x8 + +/* reg_rauhtd_ipv4_ent_a + * Activity. Set for new entries. Set if a packet lookup has hit on the + * specific entry. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_a, MLXSW_REG_RAUHTD_BASE_LEN, 16, 1, + MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false); + +/* reg_rauhtd_ipv4_ent_rif + * Router interface. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_rif, MLXSW_REG_RAUHTD_BASE_LEN, 0, + 16, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false); + +/* reg_rauhtd_ipv4_ent_dip + * Destination IPv4 address. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_dip, MLXSW_REG_RAUHTD_BASE_LEN, 0, + 32, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x04, false); + +static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload, + int ent_index, u16 *p_rif, + u32 *p_dip) +{ + *p_rif = mlxsw_reg_rauhtd_ipv4_ent_rif_get(payload, ent_index); + *p_dip = mlxsw_reg_rauhtd_ipv4_ent_dip_get(payload, ent_index); +} + /* MFCR - Management Fan Control Register * -------------------------------------- * This register controls the settings of the Fan Speed PWM mechanism. @@ -3435,6 +4644,123 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp, mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name); } +/* MPAT - Monitoring Port Analyzer Table + * ------------------------------------- + * MPAT Register is used to query and configure the Switch PortAnalyzer Table. + * For an enabled analyzer, all fields except e (enable) cannot be modified. + */ +#define MLXSW_REG_MPAT_ID 0x901A +#define MLXSW_REG_MPAT_LEN 0x78 + +static const struct mlxsw_reg_info mlxsw_reg_mpat = { + .id = MLXSW_REG_MPAT_ID, + .len = MLXSW_REG_MPAT_LEN, +}; + +/* reg_mpat_pa_id + * Port Analyzer ID. + * Access: Index + */ +MLXSW_ITEM32(reg, mpat, pa_id, 0x00, 28, 4); + +/* reg_mpat_system_port + * A unique port identifier for the final destination of the packet. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, system_port, 0x00, 0, 16); + +/* reg_mpat_e + * Enable. Indicating the Port Analyzer is enabled. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, e, 0x04, 31, 1); + +/* reg_mpat_qos + * Quality Of Service Mode. + * 0: CONFIGURED - QoS parameters (Switch Priority, and encapsulation + * PCP, DEI, DSCP or VL) are configured. + * 1: MAINTAIN - QoS parameters (Switch Priority, Color) are the + * same as in the original packet that has triggered the mirroring. For + * SPAN also the pcp,dei are maintained. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, qos, 0x04, 26, 1); + +/* reg_mpat_be + * Best effort mode. Indicates mirroring traffic should not cause packet + * drop or back pressure, but will discard the mirrored packets. Mirrored + * packets will be forwarded on a best effort manner. + * 0: Do not discard mirrored packets + * 1: Discard mirrored packets if causing congestion + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, be, 0x04, 25, 1); + +static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id, + u16 system_port, bool e) +{ + MLXSW_REG_ZERO(mpat, payload); + mlxsw_reg_mpat_pa_id_set(payload, pa_id); + mlxsw_reg_mpat_system_port_set(payload, system_port); + mlxsw_reg_mpat_e_set(payload, e); + mlxsw_reg_mpat_qos_set(payload, 1); + mlxsw_reg_mpat_be_set(payload, 1); +} + +/* MPAR - Monitoring Port Analyzer Register + * ---------------------------------------- + * MPAR register is used to query and configure the port analyzer port mirroring + * properties. + */ +#define MLXSW_REG_MPAR_ID 0x901B +#define MLXSW_REG_MPAR_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_mpar = { + .id = MLXSW_REG_MPAR_ID, + .len = MLXSW_REG_MPAR_LEN, +}; + +/* reg_mpar_local_port + * The local port to mirror the packets from. + * Access: Index + */ +MLXSW_ITEM32(reg, mpar, local_port, 0x00, 16, 8); + +enum mlxsw_reg_mpar_i_e { + MLXSW_REG_MPAR_TYPE_EGRESS, + MLXSW_REG_MPAR_TYPE_INGRESS, +}; + +/* reg_mpar_i_e + * Ingress/Egress + * Access: Index + */ +MLXSW_ITEM32(reg, mpar, i_e, 0x00, 0, 4); + +/* reg_mpar_enable + * Enable mirroring + * By default, port mirroring is disabled for all ports. + * Access: RW + */ +MLXSW_ITEM32(reg, mpar, enable, 0x04, 31, 1); + +/* reg_mpar_pa_id + * Port Analyzer ID. + * Access: RW + */ +MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4); + +static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port, + enum mlxsw_reg_mpar_i_e i_e, + bool enable, u8 pa_id) +{ + MLXSW_REG_ZERO(mpar, payload); + mlxsw_reg_mpar_local_port_set(payload, local_port); + mlxsw_reg_mpar_enable_set(payload, enable); + mlxsw_reg_mpar_i_e_set(payload, i_e); + mlxsw_reg_mpar_pa_id_set(payload, pa_id); +} + /* MLCR - Management LED Control Register * -------------------------------------- * Controls the system LEDs. @@ -3864,6 +5190,45 @@ static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index, mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index); } +/* SBIB - Shared Buffer Internal Buffer Register + * --------------------------------------------- + * The SBIB register configures per port buffers for internal use. The internal + * buffers consume memory on the port buffers (note that the port buffers are + * used also by PBMC). + * + * For Spectrum this is used for egress mirroring. + */ +#define MLXSW_REG_SBIB_ID 0xB006 +#define MLXSW_REG_SBIB_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_sbib = { + .id = MLXSW_REG_SBIB_ID, + .len = MLXSW_REG_SBIB_LEN, +}; + +/* reg_sbib_local_port + * Local port number + * Not supported for CPU port and router port + * Access: Index + */ +MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8); + +/* reg_sbib_buff_size + * Units represented in cells + * Allowed range is 0 to (cap_max_headroom_size - 1) + * Default is 0 + * Access: RW + */ +MLXSW_ITEM32(reg, sbib, buff_size, 0x08, 0, 24); + +static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port, + u32 buff_size) +{ + MLXSW_REG_ZERO(sbib, payload); + mlxsw_reg_sbib_local_port_set(payload, local_port); + mlxsw_reg_sbib_buff_size_set(payload, buff_size); +} + static inline const char *mlxsw_reg_id_str(u16 reg_id) { switch (reg_id) { @@ -3939,6 +5304,26 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "HTGT"; case MLXSW_REG_HPKT_ID: return "HPKT"; + case MLXSW_REG_RGCR_ID: + return "RGCR"; + case MLXSW_REG_RITR_ID: + return "RITR"; + case MLXSW_REG_RATR_ID: + return "RATR"; + case MLXSW_REG_RALTA_ID: + return "RALTA"; + case MLXSW_REG_RALST_ID: + return "RALST"; + case MLXSW_REG_RALTB_ID: + return "RALTB"; + case MLXSW_REG_RALUE_ID: + return "RALUE"; + case MLXSW_REG_RAUHT_ID: + return "RAUHT"; + case MLXSW_REG_RALEU_ID: + return "RALEU"; + case MLXSW_REG_RAUHTD_ID: + return "RAUHTD"; case MLXSW_REG_MFCR_ID: return "MFCR"; case MLXSW_REG_MFSC_ID: @@ -3947,6 +5332,10 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "MFSM"; case MLXSW_REG_MTCAP_ID: return "MTCAP"; + case MLXSW_REG_MPAT_ID: + return "MPAT"; + case MLXSW_REG_MPAR_ID: + return "MPAR"; case MLXSW_REG_MTMP_ID: return "MTMP"; case MLXSW_REG_MLCR_ID: @@ -3961,6 +5350,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SBMM"; case MLXSW_REG_SBSR_ID: return "SBSR"; + case MLXSW_REG_SBIB_ID: + return "SBIB"; default: return "*UNKNOWN*"; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 374080027..d48873bcb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -49,9 +49,14 @@ #include #include #include +#include #include +#include #include #include +#include +#include +#include #include "spectrum.h" #include "core.h" @@ -131,6 +136,8 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); */ MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); +static bool mlxsw_sp_port_dev_check(const struct net_device *dev); + static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, const struct mlxsw_tx_info *tx_info) { @@ -159,6 +166,303 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) return 0; } +static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_resources *resources; + int i; + + resources = mlxsw_core_resources_get(mlxsw_sp->core); + if (!resources->max_span_valid) + return -EIO; + + mlxsw_sp->span.entries_count = resources->max_span; + mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, + sizeof(struct mlxsw_sp_span_entry), + GFP_KERNEL); + if (!mlxsw_sp->span.entries) + return -ENOMEM; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) + INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); + + return 0; +} + +static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) +{ + int i; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); + } + kfree(mlxsw_sp->span.entries); +} + +static struct mlxsw_sp_span_entry * +mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + struct mlxsw_sp_span_entry *span_entry; + char mpat_pl[MLXSW_REG_MPAT_LEN]; + u8 local_port = port->local_port; + int index; + int i; + int err; + + /* find a free entry to use */ + index = -1; + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + if (!mlxsw_sp->span.entries[i].used) { + index = i; + span_entry = &mlxsw_sp->span.entries[i]; + break; + } + } + if (index < 0) + return NULL; + + /* create a new port analayzer entry for local_port */ + mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); + if (err) + return NULL; + + span_entry->used = true; + span_entry->id = index; + span_entry->ref_count = 0; + span_entry->local_port = local_port; + return span_entry; +} + +static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_span_entry *span_entry) +{ + u8 local_port = span_entry->local_port; + char mpat_pl[MLXSW_REG_MPAT_LEN]; + int pa_id = span_entry->id; + + mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); + span_entry->used = false; +} + +struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + int i; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + if (curr->used && curr->local_port == port->local_port) + return curr; + } + return NULL; +} + +struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) +{ + struct mlxsw_sp_span_entry *span_entry; + + span_entry = mlxsw_sp_span_entry_find(port); + if (span_entry) { + span_entry->ref_count++; + return span_entry; + } + + return mlxsw_sp_span_entry_create(port); +} + +static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_span_entry *span_entry) +{ + if (--span_entry->ref_count == 0) + mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); + return 0; +} + +static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + struct mlxsw_sp_span_inspected_port *p; + int i; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + list_for_each_entry(p, &curr->bound_ports_list, list) + if (p->local_port == port->local_port && + p->type == MLXSW_SP_SPAN_EGRESS) + return true; + } + + return false; +} + +static int mlxsw_sp_span_mtu_to_buffsize(int mtu) +{ + return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1; +} + +static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char sbib_pl[MLXSW_REG_SBIB_LEN]; + int err; + + /* If port is egress mirrored, the shared buffer size should be + * updated according to the mtu value + */ + if (mlxsw_sp_span_is_egress_mirror(port)) { + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, + mlxsw_sp_span_mtu_to_buffsize(mtu)); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + if (err) { + netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); + return err; + } + } + + return 0; +} + +static struct mlxsw_sp_span_inspected_port * +mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, + struct mlxsw_sp_span_entry *span_entry) +{ + struct mlxsw_sp_span_inspected_port *p; + + list_for_each_entry(p, &span_entry->bound_ports_list, list) + if (port->local_port == p->local_port) + return p; + return NULL; +} + +static int +mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, + struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_sp_span_type type) +{ + struct mlxsw_sp_span_inspected_port *inspected_port; + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char mpar_pl[MLXSW_REG_MPAR_LEN]; + char sbib_pl[MLXSW_REG_SBIB_LEN]; + int pa_id = span_entry->id; + int err; + + /* if it is an egress SPAN, bind a shared buffer to it */ + if (type == MLXSW_SP_SPAN_EGRESS) { + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, + mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu)); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + if (err) { + netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); + return err; + } + } + + /* bind the port to the SPAN entry */ + mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, true, pa_id); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); + if (err) + goto err_mpar_reg_write; + + inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); + if (!inspected_port) { + err = -ENOMEM; + goto err_inspected_port_alloc; + } + inspected_port->local_port = port->local_port; + inspected_port->type = type; + list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); + + return 0; + +err_mpar_reg_write: +err_inspected_port_alloc: + if (type == MLXSW_SP_SPAN_EGRESS) { + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + } + return err; +} + +static void +mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port, + struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_sp_span_type type) +{ + struct mlxsw_sp_span_inspected_port *inspected_port; + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char mpar_pl[MLXSW_REG_MPAR_LEN]; + char sbib_pl[MLXSW_REG_SBIB_LEN]; + int pa_id = span_entry->id; + + inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); + if (!inspected_port) + return; + + /* remove the inspected port */ + mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, false, pa_id); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); + + /* remove the SBIB buffer if it was egress SPAN */ + if (type == MLXSW_SP_SPAN_EGRESS) { + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + } + + mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); + + list_del(&inspected_port->list); + kfree(inspected_port); +} + +static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, + struct mlxsw_sp_port *to, + enum mlxsw_sp_span_type type) +{ + struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; + struct mlxsw_sp_span_entry *span_entry; + int err; + + span_entry = mlxsw_sp_span_entry_get(to); + if (!span_entry) + return -ENOENT; + + netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", + span_entry->id); + + err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type); + if (err) + goto err_port_bind; + + return 0; + +err_port_bind: + mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); + return err; +} + +static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, + struct mlxsw_sp_port *to, + enum mlxsw_sp_span_type type) +{ + struct mlxsw_sp_span_entry *span_entry; + + span_entry = mlxsw_sp_span_entry_find(to); + if (!span_entry) { + netdev_err(from->dev, "no span entry found\n"); + return; + } + + netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", + span_entry->id); + mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); +} + static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, bool is_up) { @@ -192,23 +496,6 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); } -static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 vid, enum mlxsw_reg_spms_state state) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char *spms_pl; - int err; - - spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); - if (!spms_pl) - return -ENOMEM; - mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); - mlxsw_reg_spms_vid_pack(spms_pl, vid, state); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); - kfree(spms_pl); - return err; -} - static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; @@ -508,6 +795,9 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); if (err) return err; + err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); + if (err) + goto err_span_port_mtu_update; err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); if (err) goto err_port_mtu_set; @@ -515,6 +805,8 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) return 0; err_port_mtu_set: + mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); +err_span_port_mtu_update: mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); return err; } @@ -619,94 +911,8 @@ static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } -static struct mlxsw_sp_vfid * -mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid) -{ - struct mlxsw_sp_vfid *vfid; - - list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) { - if (vfid->vid == vid) - return vfid; - } - - return NULL; -} - -static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) -{ - return find_first_zero_bit(mlxsw_sp->port_vfids.mapped, - MLXSW_SP_VFID_PORT_MAX); -} - -static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) -{ - u16 fid = mlxsw_sp_vfid_to_fid(vfid); - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); -} - -static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) -{ - u16 fid = mlxsw_sp_vfid_to_fid(vfid); - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); -} - -static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, - u16 vid) -{ - struct device *dev = mlxsw_sp->bus_info->dev; - struct mlxsw_sp_vfid *vfid; - u16 n_vfid; - int err; - - n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); - if (n_vfid == MLXSW_SP_VFID_PORT_MAX) { - dev_err(dev, "No available vFIDs\n"); - return ERR_PTR(-ERANGE); - } - - err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); - if (err) { - dev_err(dev, "Failed to create vFID=%d\n", n_vfid); - return ERR_PTR(err); - } - - vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); - if (!vfid) - goto err_allocate_vfid; - - vfid->vfid = n_vfid; - vfid->vid = vid; - - list_add(&vfid->list, &mlxsw_sp->port_vfids.list); - set_bit(n_vfid, mlxsw_sp->port_vfids.mapped); - - return vfid; - -err_allocate_vfid: - __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); - return ERR_PTR(-ENOMEM); -} - -static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vfid *vfid) -{ - clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped); - list_del(&vfid->list); - - __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); - - kfree(vfid); -} - static struct mlxsw_sp_port * -mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_vfid *vfid) +mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_vport; @@ -724,8 +930,7 @@ mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; - mlxsw_sp_vport->vport.vfid = vfid; - mlxsw_sp_vport->vport.vid = vfid->vid; + mlxsw_sp_vport->vport.vid = vid; list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list); @@ -738,13 +943,12 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) kfree(mlxsw_sp_vport); } -int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, - u16 vid) +static int mlxsw_sp_port_add_vid(struct net_device *dev, + __be16 __always_unused proto, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_port *mlxsw_sp_vport; - struct mlxsw_sp_vfid *vfid; + bool untagged = vid == 1; int err; /* VLAN 0 is added to HW filter when device goes up, but it is @@ -753,37 +957,12 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, if (!vid) return 0; - if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { - netdev_warn(dev, "VID=%d already configured\n", vid); + if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) return 0; - } - - vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); - if (!vfid) { - vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); - if (IS_ERR(vfid)) { - netdev_err(dev, "Failed to create vFID for VID=%d\n", - vid); - return PTR_ERR(vfid); - } - } - - mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid); - if (!mlxsw_sp_vport) { - netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); - err = -ENOMEM; - goto err_port_vport_create; - } - if (!vfid->nr_vports) { - err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, - true, false); - if (err) { - netdev_err(dev, "Failed to setup flooding for vFID=%d\n", - vfid->vfid); - goto err_vport_flood_set; - } - } + mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); + if (!mlxsw_sp_vport) + return -ENOMEM; /* When adding the first VLAN interface on a bridged port we need to * transition all the active 802.1Q bridge VLANs to use explicit @@ -791,77 +970,36 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, */ if (list_is_singular(&mlxsw_sp_port->vports_list)) { err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); - if (err) { - netdev_err(dev, "Failed to set to Virtual mode\n"); + if (err) goto err_port_vp_mode_trans; - } - } - - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", - vid, vfid->vfid); - goto err_port_vid_to_fid_set; } err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); - if (err) { - netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); + if (err) goto err_port_vid_learning_set; - } - err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false); - if (err) { - netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", - vid); + err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); + if (err) goto err_port_add_vid; - } - - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, - MLXSW_REG_SPMS_STATE_FORWARDING); - if (err) { - netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); - goto err_port_stp_state_set; - } - - vfid->nr_vports++; return 0; -err_port_stp_state_set: - mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); err_port_add_vid: mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); err_port_vid_learning_set: - mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, - mlxsw_sp_vfid_to_fid(vfid->vfid), vid); -err_port_vid_to_fid_set: if (list_is_singular(&mlxsw_sp_port->vports_list)) mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); err_port_vp_mode_trans: - if (!vfid->nr_vports) - mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, - false); -err_vport_flood_set: mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); -err_port_vport_create: - if (!vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); return err; } -int mlxsw_sp_port_kill_vid(struct net_device *dev, - __be16 __always_unused proto, u16 vid) +static int mlxsw_sp_port_kill_vid(struct net_device *dev, + __be16 __always_unused proto, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp_port *mlxsw_sp_vport; - struct mlxsw_sp_vfid *vfid; - int err; + struct mlxsw_sp_fid *f; /* VLAN 0 is removed from HW filter when device goes down, but * it is reserved in our case, so simply return. @@ -870,63 +1008,29 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, return 0; mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); - if (!mlxsw_sp_vport) { - netdev_warn(dev, "VID=%d does not exist\n", vid); + if (WARN_ON(!mlxsw_sp_vport)) return 0; - } - - vfid = mlxsw_sp_vport->vport.vfid; - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, - MLXSW_REG_SPMS_STATE_DISCARDING); - if (err) { - netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); - return err; - } - - err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); - if (err) { - netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", - vid); - return err; - } + mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); - err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); - if (err) { - netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); - return err; - } + mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n", - vid, vfid->vfid); - return err; - } + /* Drop FID reference. If this was the last reference the + * resources will be freed. + */ + f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + if (f && !WARN_ON(!f->leave)) + f->leave(mlxsw_sp_vport); /* When removing the last VLAN interface on a bridged port we need to * transition all active 802.1Q bridge VLANs to use VID to FID * mappings and set port's mode to VLAN mode. */ - if (list_is_singular(&mlxsw_sp_port->vports_list)) { - err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); - if (err) { - netdev_err(dev, "Failed to set to VLAN mode\n"); - return err; - } - } + if (list_is_singular(&mlxsw_sp_port->vports_list)) + mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); - vfid->nr_vports--; mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); - /* Destroy the vFID if no vPorts are assigned to it anymore. */ - if (!vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid); - return 0; } @@ -951,16 +1055,164 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, return 0; } -static const struct net_device_ops mlxsw_sp_port_netdev_ops = { - .ndo_open = mlxsw_sp_port_open, - .ndo_stop = mlxsw_sp_port_stop, - .ndo_start_xmit = mlxsw_sp_port_xmit, - .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, - .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, - .ndo_change_mtu = mlxsw_sp_port_change_mtu, +static struct mlxsw_sp_port_mall_tc_entry * +mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port, + unsigned long cookie) { + struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; + + list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) + if (mall_tc_entry->cookie == cookie) + return mall_tc_entry; + + return NULL; +} + +static int +mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_cls_matchall_offload *cls, + const struct tc_action *a, + bool ingress) +{ + struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; + struct net *net = dev_net(mlxsw_sp_port->dev); + enum mlxsw_sp_span_type span_type; + struct mlxsw_sp_port *to_port; + struct net_device *to_dev; + int ifindex; + int err; + + ifindex = tcf_mirred_ifindex(a); + to_dev = __dev_get_by_index(net, ifindex); + if (!to_dev) { + netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); + return -EINVAL; + } + + if (!mlxsw_sp_port_dev_check(to_dev)) { + netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); + return -ENOTSUPP; + } + to_port = netdev_priv(to_dev); + + mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); + if (!mall_tc_entry) + return -ENOMEM; + + mall_tc_entry->cookie = cls->cookie; + mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; + mall_tc_entry->mirror.to_local_port = to_port->local_port; + mall_tc_entry->mirror.ingress = ingress; + list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); + + span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; + err = mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); + if (err) + goto err_mirror_add; + return 0; + +err_mirror_add: + list_del(&mall_tc_entry->list); + kfree(mall_tc_entry); + return err; +} + +static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, + __be16 protocol, + struct tc_cls_matchall_offload *cls, + bool ingress) +{ + const struct tc_action *a; + LIST_HEAD(actions); + int err; + + if (!tc_single_action(cls->exts)) { + netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); + return -ENOTSUPP; + } + + tcf_exts_to_list(cls->exts, &actions); + list_for_each_entry(a, &actions, list) { + if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) + return -ENOTSUPP; + + err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls, + a, ingress); + if (err) + return err; + } + + return 0; +} + +static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_cls_matchall_offload *cls) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; + enum mlxsw_sp_span_type span_type; + struct mlxsw_sp_port *to_port; + + mall_tc_entry = mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port, + cls->cookie); + if (!mall_tc_entry) { + netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); + return; + } + + switch (mall_tc_entry->type) { + case MLXSW_SP_PORT_MALL_MIRROR: + to_port = mlxsw_sp->ports[mall_tc_entry->mirror.to_local_port]; + span_type = mall_tc_entry->mirror.ingress ? + MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; + + mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); + break; + default: + WARN_ON(1); + } + + list_del(&mall_tc_entry->list); + kfree(mall_tc_entry); +} + +static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, + __be16 proto, struct tc_to_netdev *tc) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); + + if (tc->type == TC_SETUP_MATCHALL) { + switch (tc->cls_mall->command) { + case TC_CLSMATCHALL_REPLACE: + return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, + proto, + tc->cls_mall, + ingress); + case TC_CLSMATCHALL_DESTROY: + mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, + tc->cls_mall); + return 0; + default: + return -EINVAL; + } + } + + return -ENOTSUPP; +} + +static const struct net_device_ops mlxsw_sp_port_netdev_ops = { + .ndo_open = mlxsw_sp_port_open, + .ndo_stop = mlxsw_sp_port_stop, + .ndo_start_xmit = mlxsw_sp_port_xmit, + .ndo_setup_tc = mlxsw_sp_setup_tc, + .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, + .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, + .ndo_change_mtu = mlxsw_sp_port_change_mtu, .ndo_get_stats64 = mlxsw_sp_port_get_stats64, .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, + .ndo_neigh_construct = mlxsw_sp_router_neigh_construct, + .ndo_neigh_destroy = mlxsw_sp_router_neigh_destroy, .ndo_fdb_add = switchdev_port_fdb_add, .ndo_fdb_del = switchdev_port_fdb_del, .ndo_fdb_dump = switchdev_port_fdb_dump, @@ -1055,7 +1307,7 @@ struct mlxsw_sp_port_hw_stats { u64 (*getter)(char *payload); }; -static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { { .str = "a_frames_transmitted_ok", .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, @@ -1136,6 +1388,90 @@ static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { + { + .str = "rx_octets_prio", + .getter = mlxsw_reg_ppcnt_rx_octets_get, + }, + { + .str = "rx_frames_prio", + .getter = mlxsw_reg_ppcnt_rx_frames_get, + }, + { + .str = "tx_octets_prio", + .getter = mlxsw_reg_ppcnt_tx_octets_get, + }, + { + .str = "tx_frames_prio", + .getter = mlxsw_reg_ppcnt_tx_frames_get, + }, + { + .str = "rx_pause_prio", + .getter = mlxsw_reg_ppcnt_rx_pause_get, + }, + { + .str = "rx_pause_duration_prio", + .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, + }, + { + .str = "tx_pause_prio", + .getter = mlxsw_reg_ppcnt_tx_pause_get, + }, + { + .str = "tx_pause_duration_prio", + .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, + }, +}; + +#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) + +static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(char *ppcnt_pl) +{ + u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); + + return MLXSW_SP_CELLS_TO_BYTES(transmit_queue); +} + +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { + { + .str = "tc_transmit_queue_tc", + .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get, + }, + { + .str = "tc_no_buffer_discard_uc_tc", + .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, + }, +}; + +#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) + +#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ + (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ + MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ + IEEE_8021QAZ_MAX_TCS) + +static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) +{ + int i; + + for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { + snprintf(*p, ETH_GSTRING_LEN, "%s_%d", + mlxsw_sp_port_hw_prio_stats[i].str, prio); + *p += ETH_GSTRING_LEN; + } +} + +static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) +{ + int i; + + for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { + snprintf(*p, ETH_GSTRING_LEN, "%s_%d", + mlxsw_sp_port_hw_tc_stats[i].str, tc); + *p += ETH_GSTRING_LEN; + } +} + static void mlxsw_sp_port_get_strings(struct net_device *dev, u32 stringset, u8 *data) { @@ -1149,6 +1485,13 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + mlxsw_sp_port_get_prio_strings(&p, i); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + mlxsw_sp_port_get_tc_strings(&p, i); + break; } } @@ -1176,27 +1519,80 @@ static int mlxsw_sp_port_set_phys_id(struct net_device *dev, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); } -static void mlxsw_sp_port_get_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) +static int +mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, + int *p_len, enum mlxsw_reg_ppcnt_grp grp) +{ + switch (grp) { + case MLXSW_REG_PPCNT_IEEE_8023_CNT: + *p_hw_stats = mlxsw_sp_port_hw_stats; + *p_len = MLXSW_SP_PORT_HW_STATS_LEN; + break; + case MLXSW_REG_PPCNT_PRIO_CNT: + *p_hw_stats = mlxsw_sp_port_hw_prio_stats; + *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; + break; + case MLXSW_REG_PPCNT_TC_CNT: + *p_hw_stats = mlxsw_sp_port_hw_tc_stats; + *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; + break; + default: + WARN_ON(1); + return -ENOTSUPP; + } + return 0; +} + +static void __mlxsw_sp_port_get_stats(struct net_device *dev, + enum mlxsw_reg_ppcnt_grp grp, int prio, + u64 *data, int data_index) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_port_hw_stats *hw_stats; char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; - int i; + int i, len; int err; - mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, - MLXSW_REG_PPCNT_IEEE_8023_CNT, 0); + err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); + if (err) + return; + mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); - for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) - data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0; + for (i = 0; i < len; i++) + data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0; +} + +static void mlxsw_sp_port_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + int i, data_index = 0; + + /* IEEE 802.3 Counters */ + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, + data, data_index); + data_index = MLXSW_SP_PORT_HW_STATS_LEN; + + /* Per-Priority Counters */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, + data, data_index); + data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; + } + + /* Per-TC Counters */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, + data, data_index); + data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; + } } static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: - return MLXSW_SP_PORT_HW_STATS_LEN; + return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; default: return -EOPNOTSUPP; } @@ -1655,6 +2051,18 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } +static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_port->pvid = 1; + + return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1); +} + +static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port) +{ + return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); +} + static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, bool split, u8 module, u8 width, u8 lane) { @@ -1686,6 +2094,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_port_untagged_vlans_alloc; } INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); + INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); mlxsw_sp_port->pcpu_stats = netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); @@ -1697,6 +2106,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, dev->netdev_ops = &mlxsw_sp_port_netdev_ops; dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; + err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", + mlxsw_sp_port->local_port); + goto err_port_swid_set; + } + err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", @@ -1707,7 +2123,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, netif_carrier_off(dev); dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; + dev->hw_features |= NETIF_F_HW_TC; /* Each packet needs to have a Tx header (metadata) on top all other * headers. @@ -1721,13 +2138,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_port_system_port_mapping_set; } - err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", - mlxsw_sp_port->local_port); - goto err_port_swid_set; - } - err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", @@ -1768,7 +2178,15 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_port_dcb_init; } + err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n", + mlxsw_sp_port->local_port); + goto err_port_pvid_vport_create; + } + mlxsw_sp_port_switchdev_init(mlxsw_sp_port); + mlxsw_sp->ports[local_port] = mlxsw_sp_port; err = register_netdev(dev); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", @@ -1785,27 +2203,26 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_core_port_init; } - err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); - if (err) - goto err_port_vlan_init; - - mlxsw_sp->ports[local_port] = mlxsw_sp_port; return 0; -err_port_vlan_init: - mlxsw_core_port_fini(&mlxsw_sp_port->core_port); err_core_port_init: unregister_netdev(dev); err_register_netdev: + mlxsw_sp->ports[local_port] = NULL; + mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); + mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); +err_port_pvid_vport_create: + mlxsw_sp_port_dcb_fini(mlxsw_sp_port); err_port_dcb_init: err_port_ets_init: err_port_buffers_init: err_port_admin_status_set: err_port_mtu_set: err_port_speed_by_width_set: -err_port_swid_set: err_port_system_port_mapping_set: err_dev_addr_init: + mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); +err_port_swid_set: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: kfree(mlxsw_sp_port->untagged_vlans); @@ -1816,40 +2233,24 @@ err_port_active_vlans_alloc: return err; } -static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) -{ - struct net_device *dev = mlxsw_sp_port->dev; - struct mlxsw_sp_port *mlxsw_sp_vport, *tmp; - - list_for_each_entry_safe(mlxsw_sp_vport, tmp, - &mlxsw_sp_port->vports_list, vport.list) { - u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); - - /* vPorts created for VLAN devices should already be gone - * by now, since we unregistered the port netdev. - */ - WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev)); - mlxsw_sp_port_kill_vid(dev, 0, vid); - } -} - static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; if (!mlxsw_sp_port) return; - mlxsw_sp->ports[local_port] = NULL; mlxsw_core_port_fini(&mlxsw_sp_port->core_port); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ - mlxsw_sp_port_dcb_fini(mlxsw_sp_port); - mlxsw_sp_port_vports_fini(mlxsw_sp_port); + mlxsw_sp->ports[local_port] = NULL; mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); + mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); + mlxsw_sp_port_dcb_fini(mlxsw_sp_port); mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); free_percpu(mlxsw_sp_port->pcpu_stats); kfree(mlxsw_sp_port->untagged_vlans); kfree(mlxsw_sp_port->active_vlans); + WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list)); free_netdev(mlxsw_sp_port->dev); } @@ -2086,11 +2487,8 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, local_port = mlxsw_reg_pude_local_port_get(pude_pl); mlxsw_sp_port = mlxsw_sp->ports[local_port]; - if (!mlxsw_sp_port) { - dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n", - local_port); + if (!mlxsw_sp_port) return; - } status = mlxsw_reg_pude_oper_status_get(pude_pl); if (status == MLXSW_PORT_OPER_STATUS_UP) { @@ -2245,6 +2643,51 @@ static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { .local_port = MLXSW_PORT_DONT_CARE, .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_ARPBC, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_ARPUC, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_MTUERROR, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_TTLERROR, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_LBERROR, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_OSPF, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IP2ME, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4, + }, }; static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) @@ -2285,7 +2728,7 @@ err_rx_trap_set: mlxsw_sp); err_rx_listener_register: for (i--; i >= 0; i--) { - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, mlxsw_sp_rx_listener[i].trap_id); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); @@ -2302,7 +2745,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) int i; for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, mlxsw_sp_rx_listener[i].trap_id); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); @@ -2381,8 +2824,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->core = mlxsw_core; mlxsw_sp->bus_info = mlxsw_bus_info; - INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); - INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list); + INIT_LIST_HEAD(&mlxsw_sp->fids); + INIT_LIST_HEAD(&mlxsw_sp->vfids.list); INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); err = mlxsw_sp_base_mac_get(mlxsw_sp); @@ -2391,16 +2834,10 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, return err; } - err = mlxsw_sp_ports_create(mlxsw_sp); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); - return err; - } - err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n"); - goto err_event_register; + return err; } err = mlxsw_sp_traps_init(mlxsw_sp); @@ -2433,8 +2870,32 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_switchdev_init; } + err = mlxsw_sp_router_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); + goto err_router_init; + } + + err = mlxsw_sp_span_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); + goto err_span_init; + } + + err = mlxsw_sp_ports_create(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); + goto err_ports_create; + } + return 0; +err_ports_create: + mlxsw_sp_span_fini(mlxsw_sp); +err_span_init: + mlxsw_sp_router_fini(mlxsw_sp); +err_router_init: + mlxsw_sp_switchdev_fini(mlxsw_sp); err_switchdev_init: err_lag_init: mlxsw_sp_buffers_fini(mlxsw_sp); @@ -2443,20 +2904,25 @@ err_flood_init: mlxsw_sp_traps_fini(mlxsw_sp); err_rx_listener_register: mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); -err_event_register: - mlxsw_sp_ports_remove(mlxsw_sp); return err; } static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + int i; + mlxsw_sp_ports_remove(mlxsw_sp); + mlxsw_sp_span_fini(mlxsw_sp); + mlxsw_sp_router_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); mlxsw_sp_buffers_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); - mlxsw_sp_ports_remove(mlxsw_sp); + WARN_ON(!list_empty(&mlxsw_sp->vfids.list)); + WARN_ON(!list_empty(&mlxsw_sp->fids)); + for (i = 0; i < MLXSW_SP_RIF_MAX; i++) + WARN_ON_ONCE(mlxsw_sp->rifs[i]); } static struct mlxsw_config_profile mlxsw_sp_config_profile = { @@ -2487,12 +2953,17 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = { .max_ib_mc = 0, .used_max_pkey = 1, .max_pkey = 0, + .used_kvd_sizes = 1, + .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, + .kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE, + .kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE, .swid_config = { { .used_type = 1, .type = MLXSW_PORT_SWID_TYPE_ETH, } }, + .resource_query_enable = 1, }; static struct mlxsw_driver mlxsw_sp_driver = { @@ -2518,41 +2989,647 @@ static struct mlxsw_driver mlxsw_sp_driver = { .profile = &mlxsw_sp_config_profile, }; -static int -mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port) +static bool mlxsw_sp_port_dev_check(const struct net_device *dev) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char sfdf_pl[MLXSW_REG_SFDF_LEN]; - - mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT); - mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port); - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); + return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; } -static int -mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port, - u16 fid) +static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) +{ + struct net_device *lower_dev; + struct list_head *iter; + + if (mlxsw_sp_port_dev_check(dev)) + return netdev_priv(dev); + + netdev_for_each_all_lower_dev(dev, lower_dev, iter) { + if (mlxsw_sp_port_dev_check(lower_dev)) + return netdev_priv(lower_dev); + } + return NULL; +} + +static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + + mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); + return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; +} + +static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) +{ + struct net_device *lower_dev; + struct list_head *iter; + + if (mlxsw_sp_port_dev_check(dev)) + return netdev_priv(dev); + + netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) { + if (mlxsw_sp_port_dev_check(lower_dev)) + return netdev_priv(lower_dev); + } + return NULL; +} + +struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + + rcu_read_lock(); + mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); + if (mlxsw_sp_port) + dev_hold(mlxsw_sp_port->dev); + rcu_read_unlock(); + return mlxsw_sp_port; +} + +void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) +{ + dev_put(mlxsw_sp_port->dev); +} + +static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r, + unsigned long event) +{ + switch (event) { + case NETDEV_UP: + if (!r) + return true; + r->ref_count++; + return false; + case NETDEV_DOWN: + if (r && --r->ref_count == 0) + return true; + /* It is possible we already removed the RIF ourselves + * if it was assigned to a netdev that is now a bridge + * or LAG slave. + */ + return false; + } + + return false; +} + +static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp) +{ + int i; + + for (i = 0; i < MLXSW_SP_RIF_MAX; i++) + if (!mlxsw_sp->rifs[i]) + return i; + + return MLXSW_SP_RIF_MAX; +} + +static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport, + bool *p_lagged, u16 *p_system_port) +{ + u8 local_port = mlxsw_sp_vport->local_port; + + *p_lagged = mlxsw_sp_vport->lagged; + *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port; +} + +static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *l3_dev, u16 rif, + bool create) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + bool lagged = mlxsw_sp_vport->lagged; + char ritr_pl[MLXSW_REG_RITR_LEN]; + u16 system_port; + + mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif, + l3_dev->mtu, l3_dev->dev_addr); + + mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port); + mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port, + mlxsw_sp_vport_vid_get(mlxsw_sp_vport)); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport); + +static struct mlxsw_sp_fid * +mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev) +{ + struct mlxsw_sp_fid *f; + + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return NULL; + + f->leave = mlxsw_sp_vport_rif_sp_leave; + f->ref_count = 0; + f->dev = l3_dev; + f->fid = fid; + + return f; +} + +static struct mlxsw_sp_rif * +mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f) +{ + struct mlxsw_sp_rif *r; + + r = kzalloc(sizeof(*r), GFP_KERNEL); + if (!r) + return NULL; + + ether_addr_copy(r->addr, l3_dev->dev_addr); + r->mtu = l3_dev->mtu; + r->ref_count = 1; + r->dev = l3_dev; + r->rif = rif; + r->f = f; + + return r; +} + +static struct mlxsw_sp_rif * +mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *l3_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + struct mlxsw_sp_fid *f; + struct mlxsw_sp_rif *r; + u16 fid, rif; + int err; + + rif = mlxsw_sp_avail_rif_get(mlxsw_sp); + if (rif == MLXSW_SP_RIF_MAX) + return ERR_PTR(-ERANGE); + + err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true); + if (err) + return ERR_PTR(err); + + fid = mlxsw_sp_rif_sp_to_fid(rif); + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true); + if (err) + goto err_rif_fdb_op; + + f = mlxsw_sp_rfid_alloc(fid, l3_dev); + if (!f) { + err = -ENOMEM; + goto err_rfid_alloc; + } + + r = mlxsw_sp_rif_alloc(rif, l3_dev, f); + if (!r) { + err = -ENOMEM; + goto err_rif_alloc; + } + + f->r = r; + mlxsw_sp->rifs[rif] = r; + + return r; + +err_rif_alloc: + kfree(f); +err_rfid_alloc: + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); +err_rif_fdb_op: + mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); + return ERR_PTR(err); +} + +static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport, + struct mlxsw_sp_rif *r) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + struct net_device *l3_dev = r->dev; + struct mlxsw_sp_fid *f = r->f; + u16 fid = f->fid; + u16 rif = r->rif; + + mlxsw_sp->rifs[rif] = NULL; + f->r = NULL; + + kfree(r); + + kfree(f); + + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); + + mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); +} + +static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *l3_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + struct mlxsw_sp_rif *r; + + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); + if (!r) { + r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev); + if (IS_ERR(r)) + return PTR_ERR(r); + } + + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f); + r->f->ref_count++; + + netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid); + + return 0; +} + +static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + + netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); + + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); + if (--f->ref_count == 0) + mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r); +} + +static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev, + struct net_device *port_dev, + unsigned long event, u16 vid) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev); + struct mlxsw_sp_port *mlxsw_sp_vport; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (WARN_ON(!mlxsw_sp_vport)) + return -EINVAL; + + switch (event) { + case NETDEV_UP: + return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev); + case NETDEV_DOWN: + mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport); + break; + } + + return 0; +} + +static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev, + unsigned long event) +{ + if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev)) + return 0; + + return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1); +} + +static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, + struct net_device *lag_dev, + unsigned long event, u16 vid) +{ + struct net_device *port_dev; + struct list_head *iter; + int err; + + netdev_for_each_lower_dev(lag_dev, port_dev, iter) { + if (mlxsw_sp_port_dev_check(port_dev)) { + err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev, + event, vid); + if (err) + return err; + } + } + + return 0; +} + +static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, + unsigned long event) +{ + if (netif_is_bridge_port(lag_dev)) + return 0; + + return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1); +} + +static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev) +{ + u16 fid; + + if (is_vlan_dev(l3_dev)) + fid = vlan_dev_vlan_id(l3_dev); + else if (mlxsw_sp->master_bridge.dev == l3_dev) + fid = 1; + else + return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev); + + return mlxsw_sp_fid_find(mlxsw_sp, fid); +} + +static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid) +{ + return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID : + MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; +} + +static u16 mlxsw_sp_flood_table_index_get(u16 fid) +{ + return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid; +} + +static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid, + bool set) +{ + enum mlxsw_flood_table_type table_type; + char *sftr_pl; + u16 index; + int err; + + sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); + if (!sftr_pl) + return -ENOMEM; + + table_type = mlxsw_sp_flood_table_type_get(fid); + index = mlxsw_sp_flood_table_index_get(fid); + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type, + 1, MLXSW_PORT_ROUTER_PORT, set); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); + + kfree(sftr_pl); + return err; +} + +static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid) +{ + if (mlxsw_sp_fid_is_vfid(fid)) + return MLXSW_REG_RITR_FID_IF; + else + return MLXSW_REG_RITR_VLAN_IF; +} + +static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev, + u16 fid, u16 rif, + bool create) +{ + enum mlxsw_reg_ritr_if_type rif_type; + char ritr_pl[MLXSW_REG_RITR_LEN]; + + rif_type = mlxsw_sp_rif_type_get(fid); + mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu, + l3_dev->dev_addr); + mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev, + struct mlxsw_sp_fid *f) +{ + struct mlxsw_sp_rif *r; + u16 rif; + int err; + + rif = mlxsw_sp_avail_rif_get(mlxsw_sp); + if (rif == MLXSW_SP_RIF_MAX) + return -ERANGE; + + err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true); + if (err) + return err; + + err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true); + if (err) + goto err_rif_bridge_op; + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true); + if (err) + goto err_rif_fdb_op; + + r = mlxsw_sp_rif_alloc(rif, l3_dev, f); + if (!r) { + err = -ENOMEM; + goto err_rif_alloc; + } + + f->r = r; + mlxsw_sp->rifs[rif] = r; + + netdev_dbg(l3_dev, "RIF=%d created\n", rif); + + return 0; + +err_rif_alloc: + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); +err_rif_fdb_op: + mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); +err_rif_bridge_op: + mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); + return err; +} + +void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *r) +{ + struct net_device *l3_dev = r->dev; + struct mlxsw_sp_fid *f = r->f; + u16 rif = r->rif; + + mlxsw_sp->rifs[rif] = NULL; + f->r = NULL; + + kfree(r); + + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); + + mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); + + mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); + + netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif); +} + +static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev, + struct net_device *br_dev, + unsigned long event) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); + struct mlxsw_sp_fid *f; + + /* FID can either be an actual FID if the L3 device is the + * VLAN-aware bridge or a VLAN device on top. Otherwise, the + * L3 device is a VLAN-unaware bridge and we get a vFID. + */ + f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev); + if (WARN_ON(!f)) + return -EINVAL; + + switch (event) { + case NETDEV_UP: + return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f); + case NETDEV_DOWN: + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); + break; + } + + return 0; +} + +static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, + unsigned long event) +{ + struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); + u16 vid = vlan_dev_vlan_id(vlan_dev); + + if (mlxsw_sp_port_dev_check(real_dev)) + return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event, + vid); + else if (netif_is_lag_master(real_dev)) + return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, + vid); + else if (netif_is_bridge_master(real_dev) && + mlxsw_sp->master_bridge.dev == real_dev) + return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev, + event); + + return 0; +} + +static int mlxsw_sp_inetaddr_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; + struct net_device *dev = ifa->ifa_dev->dev; + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_rif *r; + int err = 0; + + mlxsw_sp = mlxsw_sp_lower_get(dev); + if (!mlxsw_sp) + goto out; + + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!mlxsw_sp_rif_should_config(r, event)) + goto out; + + if (mlxsw_sp_port_dev_check(dev)) + err = mlxsw_sp_inetaddr_port_event(dev, event); + else if (netif_is_lag_master(dev)) + err = mlxsw_sp_inetaddr_lag_event(dev, event); + else if (netif_is_bridge_master(dev)) + err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event); + else if (is_vlan_dev(dev)) + err = mlxsw_sp_inetaddr_vlan_event(dev, event); + +out: + return notifier_from_errno(err); +} + +static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif, + const char *mac, int mtu) +{ + char ritr_pl[MLXSW_REG_RITR_LEN]; + int err; + + mlxsw_reg_ritr_rif_pack(ritr_pl, rif); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + if (err) + return err; + + mlxsw_reg_ritr_mtu_set(ritr_pl, mtu); + mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac); + mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev) +{ + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_rif *r; + int err; + + mlxsw_sp = mlxsw_sp_lower_get(dev); + if (!mlxsw_sp) + return 0; + + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!r) + return 0; + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false); + if (err) + return err; + + err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu); + if (err) + goto err_rif_edit; + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true); + if (err) + goto err_rif_fdb_op; + + ether_addr_copy(r->addr, dev->dev_addr); + r->mtu = dev->mtu; + + netdev_dbg(dev, "Updated RIF=%d\n", r->rif); + + return 0; + +err_rif_fdb_op: + mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu); +err_rif_edit: + mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true); + return err; +} + +static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, + u16 fid) +{ + if (mlxsw_sp_fid_is_vfid(fid)) + return mlxsw_sp_port_vport_find_by_fid(lag_port, fid); + else + return test_bit(fid, lag_port->active_vlans); +} + +static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char sfdf_pl[MLXSW_REG_SFDF_LEN]; + u8 local_port = mlxsw_sp_port->local_port; + u16 lag_id = mlxsw_sp_port->lag_id; + int i, count = 0; - mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID); - mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); - mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, - mlxsw_sp_port->local_port); + if (!mlxsw_sp_port->lagged) + return true; - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); + for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { + struct mlxsw_sp_port *lag_port; + + lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); + if (!lag_port || lag_port->local_port == local_port) + continue; + if (mlxsw_sp_lag_port_fid_member(lag_port, fid)) + count++; + } + + return !count; } static int -mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port) +mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char sfdf_pl[MLXSW_REG_SFDF_LEN]; - mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG); - mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); + mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID); + mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); + mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, + mlxsw_sp_port->local_port); + + netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n", + mlxsw_sp_port->local_port, fid); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); } @@ -2568,71 +3645,64 @@ mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); + netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n", + mlxsw_sp_port->lag_id, fid); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); } -static int -__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port) +int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) { - int err, last_err = 0; - u16 vid; - - for (vid = 1; vid < VLAN_N_VID - 1; vid++) { - err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid); - if (err) - last_err = err; - } + if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid)) + return 0; - return last_err; + if (mlxsw_sp_port->lagged) + return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, + fid); + else + return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid); } -static int -__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port) +static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp) { - int err, last_err = 0; - u16 vid; - - for (vid = 1; vid < VLAN_N_VID - 1; vid++) { - err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid); - if (err) - last_err = err; - } + struct mlxsw_sp_fid *f, *tmp; - return last_err; + list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list) + if (--f->ref_count == 0) + mlxsw_sp_fid_destroy(mlxsw_sp, f); + else + WARN_ON_ONCE(1); } -static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port) +static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) { - if (!list_empty(&mlxsw_sp_port->vports_list)) - if (mlxsw_sp_port->lagged) - return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port); - else - return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port); - else - if (mlxsw_sp_port->lagged) - return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port); - else - return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port); + return !mlxsw_sp->master_bridge.dev || + mlxsw_sp->master_bridge.dev == br_dev; } -static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport) +static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) { - u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport); - u16 fid = mlxsw_sp_vfid_to_fid(vfid); - - if (mlxsw_sp_vport->lagged) - return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport, - fid); - else - return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid); + mlxsw_sp->master_bridge.dev = br_dev; + mlxsw_sp->master_bridge.ref_count++; } -static bool mlxsw_sp_port_dev_check(const struct net_device *dev) +static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp) { - return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; + if (--mlxsw_sp->master_bridge.ref_count == 0) { + mlxsw_sp->master_bridge.dev = NULL; + /* It's possible upper VLAN devices are still holding + * references to underlying FIDs. Drop the reference + * and release the resources if it was the last one. + * If it wasn't, then something bad happened. + */ + mlxsw_sp_master_bridge_gone_sync(mlxsw_sp); + } } -static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) +static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *br_dev) { struct net_device *dev = mlxsw_sp_port->dev; int err; @@ -2646,6 +3716,8 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) if (err) return err; + mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev); + mlxsw_sp_port->learning = 1; mlxsw_sp_port->learning_sync = 1; mlxsw_sp_port->uc_flood = 1; @@ -2654,16 +3726,14 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } -static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, - bool flush_fdb) +static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) { struct net_device *dev = mlxsw_sp_port->dev; - if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port)) - netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); - mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); + mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp); + mlxsw_sp_port->learning = 0; mlxsw_sp_port->learning_sync = 0; mlxsw_sp_port->uc_flood = 0; @@ -2672,28 +3742,7 @@ static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, /* Add implicit VLAN interface in the device, so that untagged * packets will be classified to the default vFID. */ - return mlxsw_sp_port_add_vid(dev, 0, 1); -} - -static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) -{ - return !mlxsw_sp->master_bridge.dev || - mlxsw_sp->master_bridge.dev == br_dev; -} - -static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) -{ - mlxsw_sp->master_bridge.dev = br_dev; - mlxsw_sp->master_bridge.ref_count++; -} - -static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) -{ - if (--mlxsw_sp->master_bridge.ref_count == 0) - mlxsw_sp->master_bridge.dev = NULL; + mlxsw_sp_port_add_vid(dev, 0, 1); } static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) @@ -2809,6 +3858,45 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, return -EBUSY; } +static void +mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + struct mlxsw_sp_fid *f; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); + if (WARN_ON(!mlxsw_sp_vport)) + return; + + /* If vPort is assigned a RIF, then leave it since it's no + * longer valid. + */ + f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + if (f) + f->leave(mlxsw_sp_vport); + + mlxsw_sp_vport->lag_id = lag_id; + mlxsw_sp_vport->lagged = 1; +} + +static void +mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + struct mlxsw_sp_fid *f; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); + if (WARN_ON(!mlxsw_sp_vport)) + return; + + f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + if (f) + f->leave(mlxsw_sp_vport); + + mlxsw_sp_vport->lagged = 0; +} + static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *lag_dev) { @@ -2844,6 +3932,9 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port->lag_id = lag_id; mlxsw_sp_port->lagged = 1; lag->ref_count++; + + mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id); + return 0; err_col_port_enable: @@ -2854,65 +3945,35 @@ err_col_port_add: return err; } -static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *br_dev, - bool flush_fdb); - -static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, - struct net_device *lag_dev) +static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - struct mlxsw_sp_port *mlxsw_sp_vport; - struct mlxsw_sp_upper *lag; u16 lag_id = mlxsw_sp_port->lag_id; - int err; + struct mlxsw_sp_upper *lag; if (!mlxsw_sp_port->lagged) - return 0; + return; lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); WARN_ON(lag->ref_count == 0); - err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); - if (err) - return err; - err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); - if (err) - return err; - - /* In case we leave a LAG device that has bridges built on top, - * then their teardown sequence is never issued and we need to - * invoke the necessary cleanup routines ourselves. - */ - list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, - vport.list) { - struct net_device *br_dev; - - if (!mlxsw_sp_vport->bridged) - continue; - - br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); - mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false); - } + mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); + mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); if (mlxsw_sp_port->bridged) { mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); - mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false); - mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL); + mlxsw_sp_port_bridge_leave(mlxsw_sp_port); } - if (lag->ref_count == 1) { - if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port)) - netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); - err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); - if (err) - return err; - } + if (lag->ref_count == 1) + mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, mlxsw_sp_port->local_port); mlxsw_sp_port->lagged = 0; lag->ref_count--; - return 0; + + mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port); } static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, @@ -2961,42 +4022,25 @@ static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid = vlan_dev_vlan_id(vlan_dev); mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); - if (!mlxsw_sp_vport) { - WARN_ON(!mlxsw_sp_vport); + if (WARN_ON(!mlxsw_sp_vport)) return -EINVAL; - } mlxsw_sp_vport->dev = vlan_dev; return 0; } -static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, - struct net_device *vlan_dev) +static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *vlan_dev) { struct mlxsw_sp_port *mlxsw_sp_vport; u16 vid = vlan_dev_vlan_id(vlan_dev); mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); - if (!mlxsw_sp_vport) { - WARN_ON(!mlxsw_sp_vport); - return -EINVAL; - } - - /* When removing a VLAN device while still bridged we should first - * remove it from the bridge, as we receive the bridge's notification - * when the vPort is already gone. - */ - if (mlxsw_sp_vport->bridged) { - struct net_device *br_dev; - - br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); - mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true); - } + if (WARN_ON(!mlxsw_sp_vport)) + return; mlxsw_sp_vport->dev = mlxsw_sp_port->dev; - - return 0; } static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, @@ -3006,7 +4050,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port; struct net_device *upper_dev; struct mlxsw_sp *mlxsw_sp; - int err; + int err = 0; mlxsw_sp_port = netdev_priv(dev); mlxsw_sp = mlxsw_sp_port->mlxsw_sp; @@ -3015,73 +4059,56 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; - if (!info->master || !info->linking) + if (!is_vlan_dev(upper_dev) && + !netif_is_lag_master(upper_dev) && + !netif_is_bridge_master(upper_dev)) + return -EINVAL; + if (!info->linking) break; /* HW limitation forbids to put ports to multiple bridges. */ if (netif_is_bridge_master(upper_dev) && !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) - return NOTIFY_BAD; + return -EINVAL; if (netif_is_lag_master(upper_dev) && !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, info->upper_info)) - return NOTIFY_BAD; + return -EINVAL; + if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) + return -EINVAL; + if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && + !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) + return -EINVAL; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; if (is_vlan_dev(upper_dev)) { - if (info->linking) { + if (info->linking) err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, upper_dev); - if (err) { - netdev_err(dev, "Failed to link VLAN device\n"); - return NOTIFY_BAD; - } - } else { - err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, - upper_dev); - if (err) { - netdev_err(dev, "Failed to unlink VLAN device\n"); - return NOTIFY_BAD; - } - } + else + mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, + upper_dev); } else if (netif_is_bridge_master(upper_dev)) { - if (info->linking) { - err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); - if (err) { - netdev_err(dev, "Failed to join bridge\n"); - return NOTIFY_BAD; - } - mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); - } else { - err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port, - true); - mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); - if (err) { - netdev_err(dev, "Failed to leave bridge\n"); - return NOTIFY_BAD; - } - } + if (info->linking) + err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, + upper_dev); + else + mlxsw_sp_port_bridge_leave(mlxsw_sp_port); } else if (netif_is_lag_master(upper_dev)) { - if (info->linking) { + if (info->linking) err = mlxsw_sp_port_lag_join(mlxsw_sp_port, upper_dev); - if (err) { - netdev_err(dev, "Failed to join link aggregation\n"); - return NOTIFY_BAD; - } - } else { - err = mlxsw_sp_port_lag_leave(mlxsw_sp_port, - upper_dev); - if (err) { - netdev_err(dev, "Failed to leave link aggregation\n"); - return NOTIFY_BAD; - } - } + else + mlxsw_sp_port_lag_leave(mlxsw_sp_port, + upper_dev); + } else { + err = -EINVAL; + WARN_ON(1); } break; } - return NOTIFY_DONE; + return err; } static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, @@ -3105,7 +4132,7 @@ static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, break; } - return NOTIFY_DONE; + return 0; } static int mlxsw_sp_netdevice_port_event(struct net_device *dev, @@ -3119,7 +4146,7 @@ static int mlxsw_sp_netdevice_port_event(struct net_device *dev, return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); } - return NOTIFY_DONE; + return 0; } static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, @@ -3132,218 +4159,230 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, netdev_for_each_lower_dev(lag_dev, dev, iter) { if (mlxsw_sp_port_dev_check(dev)) { ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); - if (ret == NOTIFY_BAD) + if (ret) return ret; } } - return NOTIFY_DONE; + return 0; } -static struct mlxsw_sp_vfid * -mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *br_dev) +static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp, + struct net_device *vlan_dev) { - struct mlxsw_sp_vfid *vfid; + u16 fid = vlan_dev_vlan_id(vlan_dev); + struct mlxsw_sp_fid *f; - list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) { - if (vfid->br_dev == br_dev) - return vfid; + f = mlxsw_sp_fid_find(mlxsw_sp, fid); + if (!f) { + f = mlxsw_sp_fid_create(mlxsw_sp, fid); + if (IS_ERR(f)) + return PTR_ERR(f); } - return NULL; + f->ref_count++; + + return 0; +} + +static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp, + struct net_device *vlan_dev) +{ + u16 fid = vlan_dev_vlan_id(vlan_dev); + struct mlxsw_sp_fid *f; + + f = mlxsw_sp_fid_find(mlxsw_sp, fid); + if (f && f->r) + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); + if (f && --f->ref_count == 0) + mlxsw_sp_fid_destroy(mlxsw_sp, f); } -static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid) +static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, + unsigned long event, void *ptr) { - return vfid - MLXSW_SP_VFID_PORT_MAX; + struct netdev_notifier_changeupper_info *info; + struct net_device *upper_dev; + struct mlxsw_sp *mlxsw_sp; + int err; + + mlxsw_sp = mlxsw_sp_lower_get(br_dev); + if (!mlxsw_sp) + return 0; + if (br_dev != mlxsw_sp->master_bridge.dev) + return 0; + + info = ptr; + + switch (event) { + case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; + if (!is_vlan_dev(upper_dev)) + break; + if (info->linking) { + err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp, + upper_dev); + if (err) + return err; + } else { + mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev); + } + break; + } + + return 0; } -static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid) +static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) { - return MLXSW_SP_VFID_PORT_MAX + br_vfid; + return find_first_zero_bit(mlxsw_sp->vfids.mapped, + MLXSW_SP_VFID_MAX); } -static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp) +static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) { - return find_first_zero_bit(mlxsw_sp->br_vfids.mapped, - MLXSW_SP_VFID_BR_MAX); + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); } -static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) +static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport); + +static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) { struct device *dev = mlxsw_sp->bus_info->dev; - struct mlxsw_sp_vfid *vfid; - u16 n_vfid; + struct mlxsw_sp_fid *f; + u16 vfid, fid; int err; - n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); - if (n_vfid == MLXSW_SP_VFID_MAX) { + vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); + if (vfid == MLXSW_SP_VFID_MAX) { dev_err(dev, "No available vFIDs\n"); return ERR_PTR(-ERANGE); } - err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); + fid = mlxsw_sp_vfid_to_fid(vfid); + err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true); if (err) { - dev_err(dev, "Failed to create vFID=%d\n", n_vfid); + dev_err(dev, "Failed to create FID=%d\n", fid); return ERR_PTR(err); } - vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); - if (!vfid) + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) goto err_allocate_vfid; - vfid->vfid = n_vfid; - vfid->br_dev = br_dev; + f->leave = mlxsw_sp_vport_vfid_leave; + f->fid = fid; + f->dev = br_dev; - list_add(&vfid->list, &mlxsw_sp->br_vfids.list); - set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped); + list_add(&f->list, &mlxsw_sp->vfids.list); + set_bit(vfid, mlxsw_sp->vfids.mapped); - return vfid; + return f; err_allocate_vfid: - __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); + mlxsw_sp_vfid_op(mlxsw_sp, fid, false); return ERR_PTR(-ENOMEM); } -static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vfid *vfid) +static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fid *f) { - u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid); + u16 vfid = mlxsw_sp_fid_to_vfid(f->fid); + u16 fid = f->fid; + + clear_bit(vfid, mlxsw_sp->vfids.mapped); + list_del(&f->list); - clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped); - list_del(&vfid->list); + if (f->r) + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); - __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); + kfree(f); - kfree(vfid); + mlxsw_sp_vfid_op(mlxsw_sp, fid, false); } -static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *br_dev, - bool flush_fdb) +static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, + bool valid) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); - struct net_device *dev = mlxsw_sp_vport->dev; - struct mlxsw_sp_vfid *vfid, *new_vfid; - int err; - - vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); - if (!vfid) { - WARN_ON(!vfid); - return -EINVAL; - } - - /* We need a vFID to go back to after leaving the bridge's vFID. */ - new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); - if (!new_vfid) { - new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); - if (IS_ERR(new_vfid)) { - netdev_err(dev, "Failed to create vFID for VID=%d\n", - vid); - return PTR_ERR(new_vfid); - } - } - /* Invalidate existing {Port, VID} to vFID mapping and create a new - * one for the new vFID. - */ - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", - vfid->vfid); - goto err_port_vid_to_fid_invalidate; - } + return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid, + vid); +} - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, - mlxsw_sp_vfid_to_fid(new_vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", - new_vfid->vfid); - goto err_port_vid_to_fid_validate; - } +static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev) +{ + struct mlxsw_sp_fid *f; + int err; - err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); - if (err) { - netdev_err(dev, "Failed to disable learning\n"); - goto err_port_vid_learning_set; + f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev); + if (!f) { + f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev); + if (IS_ERR(f)) + return PTR_ERR(f); } - err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, - false); - if (err) { - netdev_err(dev, "Failed clear to clear flooding\n"); + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true); + if (err) goto err_vport_flood_set; - } - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, - MLXSW_REG_SPMS_STATE_FORWARDING); - if (err) { - netdev_err(dev, "Failed to set STP state\n"); - goto err_port_stp_state_set; - } - - if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport)) - netdev_err(dev, "Failed to flush FDB\n"); + err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true); + if (err) + goto err_vport_fid_map; - /* Switch between the vFIDs and destroy the old one if needed. */ - new_vfid->nr_vports++; - mlxsw_sp_vport->vport.vfid = new_vfid; - vfid->nr_vports--; - if (!vfid->nr_vports) - mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f); + f->ref_count++; - mlxsw_sp_vport->learning = 0; - mlxsw_sp_vport->learning_sync = 0; - mlxsw_sp_vport->uc_flood = 0; - mlxsw_sp_vport->bridged = 0; + netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid); return 0; -err_port_stp_state_set: +err_vport_fid_map: + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); err_vport_flood_set: -err_port_vid_learning_set: -err_port_vid_to_fid_validate: -err_port_vid_to_fid_invalidate: - /* Rollback vFID only if new. */ - if (!new_vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid); + if (!f->ref_count) + mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); return err; } +static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + + netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); + + mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false); + + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); + + mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid); + + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); + if (--f->ref_count == 0) + mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); +} + static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, struct net_device *br_dev) { - struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid; - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); struct net_device *dev = mlxsw_sp_vport->dev; - struct mlxsw_sp_vfid *vfid; int err; - vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); - if (!vfid) { - vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev); - if (IS_ERR(vfid)) { - netdev_err(dev, "Failed to create bridge vFID\n"); - return PTR_ERR(vfid); - } - } + if (f && !WARN_ON(!f->leave)) + f->leave(mlxsw_sp_vport); - err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false); + err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev); if (err) { - netdev_err(dev, "Failed to setup flooding for vFID=%d\n", - vfid->vfid); - goto err_port_flood_set; + netdev_err(dev, "Failed to join vFID\n"); + return err; } err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); @@ -3352,38 +4391,6 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, goto err_port_vid_learning_set; } - /* We need to invalidate existing {Port, VID} to vFID mapping and - * create a new one for the bridge's vFID. - */ - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, - mlxsw_sp_vfid_to_fid(old_vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", - old_vfid->vfid); - goto err_port_vid_to_fid_invalidate; - } - - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", - vfid->vfid); - goto err_port_vid_to_fid_validate; - } - - /* Switch between the vFIDs and destroy the old one if needed. */ - vfid->nr_vports++; - mlxsw_sp_vport->vport.vfid = vfid; - old_vfid->nr_vports--; - if (!old_vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid); - mlxsw_sp_vport->learning = 1; mlxsw_sp_vport->learning_sync = 1; mlxsw_sp_vport->uc_flood = 1; @@ -3391,20 +4398,25 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, return 0; -err_port_vid_to_fid_validate: - mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, - mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid); -err_port_vid_to_fid_invalidate: - mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); err_port_vid_learning_set: - mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false); -err_port_flood_set: - if (!vfid->nr_vports) - mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); return err; } +static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + + mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); + + mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); + + mlxsw_sp_vport->learning = 0; + mlxsw_sp_vport->learning_sync = 0; + mlxsw_sp_vport->uc_flood = 0; + mlxsw_sp_vport->bridged = 0; +} + static bool mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, const struct net_device *br_dev) @@ -3413,7 +4425,9 @@ mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, vport.list) { - if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev) + struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport); + + if (dev && dev == br_dev) return false; } @@ -3428,56 +4442,39 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, struct netdev_notifier_changeupper_info *info = ptr; struct mlxsw_sp_port *mlxsw_sp_vport; struct net_device *upper_dev; - int err; + int err = 0; mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; - if (!info->master || !info->linking) - break; if (!netif_is_bridge_master(upper_dev)) - return NOTIFY_BAD; + return -EINVAL; + if (!info->linking) + break; /* We can't have multiple VLAN interfaces configured on * the same port and being members in the same bridge. */ if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, upper_dev)) - return NOTIFY_BAD; + return -EINVAL; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; - if (!info->master) - break; if (info->linking) { - if (!mlxsw_sp_vport) { - WARN_ON(!mlxsw_sp_vport); - return NOTIFY_BAD; - } + if (WARN_ON(!mlxsw_sp_vport)) + return -EINVAL; err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, upper_dev); - if (err) { - netdev_err(dev, "Failed to join bridge\n"); - return NOTIFY_BAD; - } } else { - /* We ignore bridge's unlinking notifications if vPort - * is gone, since we already left the bridge when the - * VLAN device was unlinked from the real device. - */ if (!mlxsw_sp_vport) - return NOTIFY_DONE; - err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, - upper_dev, true); - if (err) { - netdev_err(dev, "Failed to leave bridge\n"); - return NOTIFY_BAD; - } + return 0; + mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport); } } - return NOTIFY_DONE; + return err; } static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, @@ -3492,12 +4489,12 @@ static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, if (mlxsw_sp_port_dev_check(dev)) { ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, vid); - if (ret == NOTIFY_BAD) + if (ret) return ret; } } - return NOTIFY_DONE; + return 0; } static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, @@ -3513,41 +4510,58 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, vid); - return NOTIFY_DONE; + return 0; } static int mlxsw_sp_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); + int err = 0; - if (mlxsw_sp_port_dev_check(dev)) - return mlxsw_sp_netdevice_port_event(dev, event, ptr); - - if (netif_is_lag_master(dev)) - return mlxsw_sp_netdevice_lag_event(dev, event, ptr); + if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) + err = mlxsw_sp_netdevice_router_port_event(dev); + else if (mlxsw_sp_port_dev_check(dev)) + err = mlxsw_sp_netdevice_port_event(dev, event, ptr); + else if (netif_is_lag_master(dev)) + err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); + else if (netif_is_bridge_master(dev)) + err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); + else if (is_vlan_dev(dev)) + err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); - if (is_vlan_dev(dev)) - return mlxsw_sp_netdevice_vlan_event(dev, event, ptr); - - return NOTIFY_DONE; + return notifier_from_errno(err); } static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { .notifier_call = mlxsw_sp_netdevice_event, }; +static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { + .notifier_call = mlxsw_sp_inetaddr_event, + .priority = 10, /* Must be called before FIB notifier block */ +}; + +static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = { + .notifier_call = mlxsw_sp_router_netevent_event, +}; + static int __init mlxsw_sp_module_init(void) { int err; register_netdevice_notifier(&mlxsw_sp_netdevice_nb); + register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); + register_netevent_notifier(&mlxsw_sp_router_netevent_nb); + err = mlxsw_core_driver_register(&mlxsw_sp_driver); if (err) goto err_core_driver_register; return 0; err_core_driver_register: + unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); + unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); return err; } @@ -3555,6 +4569,8 @@ err_core_driver_register: static void __exit mlxsw_sp_module_exit(void) { mlxsw_core_driver_unregister(&mlxsw_sp_driver); + unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); + unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 13b30eaa1..ac48abebe 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -39,19 +39,22 @@ #include #include +#include #include #include #include #include +#include #include #include "port.h" #include "core.h" #define MLXSW_SP_VFID_BASE VLAN_N_VID -#define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */ -#define MLXSW_SP_VFID_BR_MAX 6144 /* Bridged VLAN interfaces */ -#define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX) +#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */ + +#define MLXSW_SP_RFID_BASE 15360 +#define MLXSW_SP_RIF_MAX 800 #define MLXSW_SP_LAG_MAX 64 #define MLXSW_SP_PORT_PER_LAG_MAX 16 @@ -60,6 +63,12 @@ #define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4 +#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */ +#define MLXSW_SP_LPM_TREE_MAX 22 +#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN) + +#define MLXSW_SP_VIRTUAL_ROUTER_MAX 256 + #define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */ #define MLXSW_SP_BYTES_PER_CELL 96 @@ -67,6 +76,10 @@ #define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL) #define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL) +#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */ +#define MLXSW_SP_KVD_HASH_SINGLE_SIZE 163840 /* entries */ +#define MLXSW_SP_KVD_HASH_DOUBLE_SIZE 32768 /* entries */ + /* Maximum delay buffer needed in case of PAUSE frames, in cells. * Assumes 100m cable and maximum MTU. */ @@ -87,12 +100,22 @@ struct mlxsw_sp_upper { unsigned int ref_count; }; -struct mlxsw_sp_vfid { +struct mlxsw_sp_fid { + void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport); struct list_head list; - u16 nr_vports; - u16 vfid; /* Starting at 0 */ - struct net_device *br_dev; - u16 vid; + unsigned int ref_count; + struct net_device *dev; + struct mlxsw_sp_rif *r; + u16 fid; +}; + +struct mlxsw_sp_rif { + struct net_device *dev; + unsigned int ref_count; + struct mlxsw_sp_fid *f; + unsigned char addr[ETH_ALEN]; + int mtu; + u16 rif; }; struct mlxsw_sp_mid { @@ -115,7 +138,17 @@ static inline u16 mlxsw_sp_fid_to_vfid(u16 fid) static inline bool mlxsw_sp_fid_is_vfid(u16 fid) { - return fid >= MLXSW_SP_VFID_BASE; + return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE; +} + +static inline bool mlxsw_sp_fid_is_rfid(u16 fid) +{ + return fid >= MLXSW_SP_RFID_BASE; +} + +static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif) +{ + return MLXSW_SP_RFID_BASE + rif; } struct mlxsw_sp_sb_pr { @@ -152,20 +185,97 @@ struct mlxsw_sp_sb { } ports[MLXSW_PORT_MAX_PORTS]; }; -struct mlxsw_sp { +#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE) + +struct mlxsw_sp_prefix_usage { + DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT); +}; + +enum mlxsw_sp_l3proto { + MLXSW_SP_L3_PROTO_IPV4, + MLXSW_SP_L3_PROTO_IPV6, +}; + +struct mlxsw_sp_lpm_tree { + u8 id; /* tree ID */ + unsigned int ref_count; + enum mlxsw_sp_l3proto proto; + struct mlxsw_sp_prefix_usage prefix_usage; +}; + +struct mlxsw_sp_fib; + +struct mlxsw_sp_vr { + u16 id; /* virtual router ID */ + bool used; + enum mlxsw_sp_l3proto proto; + u32 tb_id; /* kernel fib table id */ + struct mlxsw_sp_lpm_tree *lpm_tree; + struct mlxsw_sp_fib *fib; +}; + +enum mlxsw_sp_span_type { + MLXSW_SP_SPAN_EGRESS, + MLXSW_SP_SPAN_INGRESS +}; + +struct mlxsw_sp_span_inspected_port { + struct list_head list; + enum mlxsw_sp_span_type type; + u8 local_port; +}; + +struct mlxsw_sp_span_entry { + u8 local_port; + bool used; + struct list_head bound_ports_list; + int ref_count; + int id; +}; + +enum mlxsw_sp_port_mall_action_type { + MLXSW_SP_PORT_MALL_MIRROR, +}; + +struct mlxsw_sp_port_mall_mirror_tc_entry { + u8 to_local_port; + bool ingress; +}; + +struct mlxsw_sp_port_mall_tc_entry { + struct list_head list; + unsigned long cookie; + enum mlxsw_sp_port_mall_action_type type; + union { + struct mlxsw_sp_port_mall_mirror_tc_entry mirror; + }; +}; + +struct mlxsw_sp_router { + struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT]; + struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX]; + struct rhashtable neigh_ht; struct { - struct list_head list; - unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_PORT_MAX)]; - } port_vfids; + struct delayed_work dw; + unsigned long interval; /* ms */ + } neighs_update; + struct delayed_work nexthop_probe_dw; +#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */ + struct list_head nexthop_group_list; + struct list_head nexthop_neighs_list; +}; + +struct mlxsw_sp { struct { struct list_head list; - unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_BR_MAX)]; - } br_vfids; + DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX); + } vfids; struct { struct list_head list; - unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_MID_MAX)]; + DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX); } br_mids; - unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)]; + struct list_head fids; /* VLAN-aware bridge FIDs */ + struct mlxsw_sp_rif *rifs[MLXSW_SP_RIF_MAX]; struct mlxsw_sp_port **ports; struct mlxsw_core *core; const struct mlxsw_bus_info *bus_info; @@ -183,6 +293,15 @@ struct mlxsw_sp { struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; u8 port_to_module[MLXSW_PORT_MAX_PORTS]; struct mlxsw_sp_sb sb; + struct mlxsw_sp_router router; + struct { + DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE); + } kvdl; + + struct { + struct mlxsw_sp_span_entry *entries; + int entries_count; + } span; }; static inline struct mlxsw_sp_upper * @@ -217,7 +336,7 @@ struct mlxsw_sp_port { u16 lag_id; struct { struct list_head list; - struct mlxsw_sp_vfid *vfid; + struct mlxsw_sp_fid *f; u16 vid; } vport; struct { @@ -239,8 +358,13 @@ struct mlxsw_sp_port { unsigned long *untagged_vlans; /* VLAN interfaces */ struct list_head vports_list; + /* TC handles */ + struct list_head mall_tc_list; }; +struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); +void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); + static inline bool mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port) { @@ -259,28 +383,38 @@ mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index) return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL; } +static inline u16 +mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) +{ + return mlxsw_sp_vport->vport.vid; +} + static inline bool mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port) { - return mlxsw_sp_port->vport.vfid; + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + + return vid != 0; } -static inline struct net_device * -mlxsw_sp_vport_br_get(const struct mlxsw_sp_port *mlxsw_sp_vport) +static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport, + struct mlxsw_sp_fid *f) { - return mlxsw_sp_vport->vport.vfid->br_dev; + mlxsw_sp_vport->vport.f = f; } -static inline u16 -mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) +static inline struct mlxsw_sp_fid * +mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) { - return mlxsw_sp_vport->vport.vid; + return mlxsw_sp_vport->vport.f; } -static inline u16 -mlxsw_sp_vport_vfid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) +static inline struct net_device * +mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport) { - return mlxsw_sp_vport->vport.vfid->vfid; + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + + return f ? f->dev : NULL; } static inline struct mlxsw_sp_port * @@ -298,20 +432,60 @@ mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) } static inline struct mlxsw_sp_port * -mlxsw_sp_port_vport_find_by_vfid(const struct mlxsw_sp_port *mlxsw_sp_port, - u16 vfid) +mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) { struct mlxsw_sp_port *mlxsw_sp_vport; list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, vport.list) { - if (mlxsw_sp_vport_vfid_get(mlxsw_sp_vport) == vfid) + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + + if (f && f->fid == fid) return mlxsw_sp_vport; } return NULL; } +static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp, + u16 fid) +{ + struct mlxsw_sp_fid *f; + + list_for_each_entry(f, &mlxsw_sp->fids, list) + if (f->fid == fid) + return f; + + return NULL; +} + +static inline struct mlxsw_sp_fid * +mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev) +{ + struct mlxsw_sp_fid *f; + + list_for_each_entry(f, &mlxsw_sp->vfids.list, list) + if (f->dev == br_dev) + return f; + + return NULL; +} + +static inline struct mlxsw_sp_rif * +mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev) +{ + int i; + + for (i = 0; i < MLXSW_SP_RIF_MAX; i++) + if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev) + return mlxsw_sp->rifs[i]; + + return NULL; +} + enum mlxsw_sp_flood_table { MLXSW_SP_FLOOD_TABLE_UC, MLXSW_SP_FLOOD_TABLE_BM, @@ -362,14 +536,17 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool is_member, bool untagged); -int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, - u16 vid); -int mlxsw_sp_port_kill_vid(struct net_device *dev, - __be16 __always_unused proto, u16 vid); -int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, - bool set, bool only_uc); +int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, + bool set); void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); +int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid); +int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, + bool adding); +struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid); +void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f); +void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *r); int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, bool dwrr, u8 dwrr_weight); @@ -399,4 +576,21 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) #endif +int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_ipv4_fib *fib4, + struct switchdev_trans *trans); +int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_ipv4_fib *fib4); +int mlxsw_sp_router_neigh_construct(struct net_device *dev, + struct neighbour *n); +void mlxsw_sp_router_neigh_destroy(struct net_device *dev, + struct neighbour *n); +int mlxsw_sp_router_netevent_event(struct notifier_block *unused, + unsigned long event, void *ptr); + +int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count); +void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 074cdda7b..953b214f3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -330,7 +330,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0), MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, @@ -717,22 +717,18 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, u8 local_port = mlxsw_sp_port->local_port; u8 pg_buff = tc_index; enum mlxsw_reg_sbxx_dir dir = pool_type; - u8 pool = pool_index; + u8 pool = pool_get(pool_index); u32 max_buff; int err; + if (dir != dir_get(pool_index)) + return -EINVAL; + err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir, threshold, &max_buff); if (err) return err; - if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) { - if (pool < MLXSW_SP_SB_POOL_COUNT) - return -EINVAL; - pool -= MLXSW_SP_SB_POOL_COUNT; - } else if (pool >= MLXSW_SP_SB_POOL_COUNT) { - return -EINVAL; - } return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir, 0, max_buff, pool); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index 01cfb7512..b6ed7f7c5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -341,6 +341,8 @@ static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port, char pfcc_pl[MLXSW_REG_PFCC_LEN]; mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); + mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause); + mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause); mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en); return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), @@ -351,17 +353,17 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); int err; - if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) && - pfc->pfc_en) { + if (pause_en && pfc->pfc_en) { netdev_err(dev, "PAUSE frames already enabled on port\n"); return -EINVAL; } err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, mlxsw_sp_port->dcb.ets->prio_tc, - false, pfc); + pause_en, pfc); if (err) { netdev_err(dev, "Failed to configure port's headroom for PFC\n"); return err; @@ -380,7 +382,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, err_port_pfc_set: __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, - mlxsw_sp_port->dcb.ets->prio_tc, false, + mlxsw_sp_port->dcb.ets->prio_tc, pause_en, mlxsw_sp_port->dcb.pfc); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c new file mode 100644 index 000000000..ac321e8e5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c @@ -0,0 +1,91 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "spectrum.h" + +#define MLXSW_SP_KVDL_SINGLE_BASE 0 +#define MLXSW_SP_KVDL_SINGLE_SIZE 16384 +#define MLXSW_SP_KVDL_CHUNKS_BASE \ + (MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE) +#define MLXSW_SP_KVDL_CHUNKS_SIZE \ + (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_CHUNKS_BASE) +#define MLXSW_SP_CHUNK_MAX 32 + +int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count) +{ + int entry_index; + int size; + int type_base; + int type_size; + int type_entries; + + if (entry_count == 0 || entry_count > MLXSW_SP_CHUNK_MAX) { + return -EINVAL; + } else if (entry_count == 1) { + type_base = MLXSW_SP_KVDL_SINGLE_BASE; + type_size = MLXSW_SP_KVDL_SINGLE_SIZE; + type_entries = 1; + } else { + type_base = MLXSW_SP_KVDL_CHUNKS_BASE; + type_size = MLXSW_SP_KVDL_CHUNKS_SIZE; + type_entries = MLXSW_SP_CHUNK_MAX; + } + + entry_index = type_base; + size = type_base + type_size; + for_each_clear_bit_from(entry_index, mlxsw_sp->kvdl.usage, size) { + int i; + + for (i = 0; i < type_entries; i++) + set_bit(entry_index + i, mlxsw_sp->kvdl.usage); + return entry_index; + } + return -ENOBUFS; +} + +void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index) +{ + int type_entries; + int i; + + if (entry_index < MLXSW_SP_KVDL_CHUNKS_BASE) + type_entries = 1; + else + type_entries = MLXSW_SP_CHUNK_MAX; + for (i = 0; i < type_entries; i++) + clear_bit(entry_index + i, mlxsw_sp->kvdl.usage); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c new file mode 100644 index 000000000..3f5c51da6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -0,0 +1,1863 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Jiri Pirko + * Copyright (c) 2016 Ido Schimmel + * Copyright (c) 2016 Yotam Gigi + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spectrum.h" +#include "core.h" +#include "reg.h" + +#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \ + for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT) + +static bool +mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1, + struct mlxsw_sp_prefix_usage *prefix_usage2) +{ + unsigned char prefix; + + mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) { + if (!test_bit(prefix, prefix_usage2->b)) + return false; + } + return true; +} + +static bool +mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1, + struct mlxsw_sp_prefix_usage *prefix_usage2) +{ + return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1)); +} + +static bool +mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage) +{ + struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } }; + + return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none); +} + +static void +mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1, + struct mlxsw_sp_prefix_usage *prefix_usage2) +{ + memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1)); +} + +static void +mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage) +{ + memset(prefix_usage, 0, sizeof(*prefix_usage)); +} + +static void +mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage, + unsigned char prefix_len) +{ + set_bit(prefix_len, prefix_usage->b); +} + +static void +mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage, + unsigned char prefix_len) +{ + clear_bit(prefix_len, prefix_usage->b); +} + +struct mlxsw_sp_fib_key { + struct net_device *dev; + unsigned char addr[sizeof(struct in6_addr)]; + unsigned char prefix_len; +}; + +enum mlxsw_sp_fib_entry_type { + MLXSW_SP_FIB_ENTRY_TYPE_REMOTE, + MLXSW_SP_FIB_ENTRY_TYPE_LOCAL, + MLXSW_SP_FIB_ENTRY_TYPE_TRAP, +}; + +struct mlxsw_sp_nexthop_group; + +struct mlxsw_sp_fib_entry { + struct rhash_head ht_node; + struct mlxsw_sp_fib_key key; + enum mlxsw_sp_fib_entry_type type; + unsigned int ref_count; + u16 rif; /* used for action local */ + struct mlxsw_sp_vr *vr; + struct list_head nexthop_group_node; + struct mlxsw_sp_nexthop_group *nh_group; +}; + +struct mlxsw_sp_fib { + struct rhashtable ht; + unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT]; + struct mlxsw_sp_prefix_usage prefix_usage; +}; + +static const struct rhashtable_params mlxsw_sp_fib_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_fib_entry, key), + .head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node), + .key_len = sizeof(struct mlxsw_sp_fib_key), + .automatic_shrinking = true, +}; + +static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib, + struct mlxsw_sp_fib_entry *fib_entry) +{ + unsigned char prefix_len = fib_entry->key.prefix_len; + int err; + + err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node, + mlxsw_sp_fib_ht_params); + if (err) + return err; + if (fib->prefix_ref_count[prefix_len]++ == 0) + mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len); + return 0; +} + +static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib, + struct mlxsw_sp_fib_entry *fib_entry) +{ + unsigned char prefix_len = fib_entry->key.prefix_len; + + if (--fib->prefix_ref_count[prefix_len] == 0) + mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len); + rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node, + mlxsw_sp_fib_ht_params); +} + +static struct mlxsw_sp_fib_entry * +mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr, + size_t addr_len, unsigned char prefix_len, + struct net_device *dev) +{ + struct mlxsw_sp_fib_entry *fib_entry; + + fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL); + if (!fib_entry) + return NULL; + fib_entry->key.dev = dev; + memcpy(fib_entry->key.addr, addr, addr_len); + fib_entry->key.prefix_len = prefix_len; + return fib_entry; +} + +static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry) +{ + kfree(fib_entry); +} + +static struct mlxsw_sp_fib_entry * +mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr, + size_t addr_len, unsigned char prefix_len, + struct net_device *dev) +{ + struct mlxsw_sp_fib_key key; + + memset(&key, 0, sizeof(key)); + key.dev = dev; + memcpy(key.addr, addr, addr_len); + key.prefix_len = prefix_len; + return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params); +} + +static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void) +{ + struct mlxsw_sp_fib *fib; + int err; + + fib = kzalloc(sizeof(*fib), GFP_KERNEL); + if (!fib) + return ERR_PTR(-ENOMEM); + err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params); + if (err) + goto err_rhashtable_init; + return fib; + +err_rhashtable_init: + kfree(fib); + return ERR_PTR(err); +} + +static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib) +{ + rhashtable_destroy(&fib->ht); + kfree(fib); +} + +static struct mlxsw_sp_lpm_tree * +mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved) +{ + static struct mlxsw_sp_lpm_tree *lpm_tree; + int i; + + for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { + lpm_tree = &mlxsw_sp->router.lpm_trees[i]; + if (lpm_tree->ref_count == 0) { + if (one_reserved) + one_reserved = false; + else + return lpm_tree; + } + } + return NULL; +} + +static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_lpm_tree *lpm_tree) +{ + char ralta_pl[MLXSW_REG_RALTA_LEN]; + + mlxsw_reg_ralta_pack(ralta_pl, true, lpm_tree->proto, lpm_tree->id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); +} + +static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_lpm_tree *lpm_tree) +{ + char ralta_pl[MLXSW_REG_RALTA_LEN]; + + mlxsw_reg_ralta_pack(ralta_pl, false, lpm_tree->proto, lpm_tree->id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); +} + +static int +mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_prefix_usage *prefix_usage, + struct mlxsw_sp_lpm_tree *lpm_tree) +{ + char ralst_pl[MLXSW_REG_RALST_LEN]; + u8 root_bin = 0; + u8 prefix; + u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD; + + mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) + root_bin = prefix; + + mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id); + mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) { + if (prefix == 0) + continue; + mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix, + MLXSW_REG_RALST_BIN_NO_CHILD); + last_prefix = prefix; + } + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl); +} + +static struct mlxsw_sp_lpm_tree * +mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_prefix_usage *prefix_usage, + enum mlxsw_sp_l3proto proto, bool one_reserved) +{ + struct mlxsw_sp_lpm_tree *lpm_tree; + int err; + + lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved); + if (!lpm_tree) + return ERR_PTR(-EBUSY); + lpm_tree->proto = proto; + err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree); + if (err) + return ERR_PTR(err); + + err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage, + lpm_tree); + if (err) + goto err_left_struct_set; + return lpm_tree; + +err_left_struct_set: + mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree); + return ERR_PTR(err); +} + +static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_lpm_tree *lpm_tree) +{ + return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree); +} + +static struct mlxsw_sp_lpm_tree * +mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_prefix_usage *prefix_usage, + enum mlxsw_sp_l3proto proto, bool one_reserved) +{ + struct mlxsw_sp_lpm_tree *lpm_tree; + int i; + + for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { + lpm_tree = &mlxsw_sp->router.lpm_trees[i]; + if (lpm_tree->proto == proto && + mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage, + prefix_usage)) + goto inc_ref_count; + } + lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, + proto, one_reserved); + if (IS_ERR(lpm_tree)) + return lpm_tree; + +inc_ref_count: + lpm_tree->ref_count++; + return lpm_tree; +} + +static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_lpm_tree *lpm_tree) +{ + if (--lpm_tree->ref_count == 0) + return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree); + return 0; +} + +static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_lpm_tree *lpm_tree; + int i; + + for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { + lpm_tree = &mlxsw_sp->router.lpm_trees[i]; + lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN; + } +} + +static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_vr *vr; + int i; + + for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) { + vr = &mlxsw_sp->router.vrs[i]; + if (!vr->used) + return vr; + } + return NULL; +} + +static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vr *vr) +{ + char raltb_pl[MLXSW_REG_RALTB_LEN]; + + mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, vr->lpm_tree->id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); +} + +static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vr *vr) +{ + char raltb_pl[MLXSW_REG_RALTB_LEN]; + + /* Bind to tree 0 which is default */ + mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); +} + +static u32 mlxsw_sp_fix_tb_id(u32 tb_id) +{ + /* For our purpose, squash main and local table into one */ + if (tb_id == RT_TABLE_LOCAL) + tb_id = RT_TABLE_MAIN; + return tb_id; +} + +static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp, + u32 tb_id, + enum mlxsw_sp_l3proto proto) +{ + struct mlxsw_sp_vr *vr; + int i; + + tb_id = mlxsw_sp_fix_tb_id(tb_id); + for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) { + vr = &mlxsw_sp->router.vrs[i]; + if (vr->used && vr->proto == proto && vr->tb_id == tb_id) + return vr; + } + return NULL; +} + +static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, + unsigned char prefix_len, + u32 tb_id, + enum mlxsw_sp_l3proto proto) +{ + struct mlxsw_sp_prefix_usage req_prefix_usage; + struct mlxsw_sp_lpm_tree *lpm_tree; + struct mlxsw_sp_vr *vr; + int err; + + vr = mlxsw_sp_vr_find_unused(mlxsw_sp); + if (!vr) + return ERR_PTR(-EBUSY); + vr->fib = mlxsw_sp_fib_create(); + if (IS_ERR(vr->fib)) + return ERR_CAST(vr->fib); + + vr->proto = proto; + vr->tb_id = tb_id; + mlxsw_sp_prefix_usage_zero(&req_prefix_usage); + mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len); + lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, + proto, true); + if (IS_ERR(lpm_tree)) { + err = PTR_ERR(lpm_tree); + goto err_tree_get; + } + vr->lpm_tree = lpm_tree; + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); + if (err) + goto err_tree_bind; + + vr->used = true; + return vr; + +err_tree_bind: + mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); +err_tree_get: + mlxsw_sp_fib_destroy(vr->fib); + + return ERR_PTR(err); +} + +static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vr *vr) +{ + mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr); + mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); + mlxsw_sp_fib_destroy(vr->fib); + vr->used = false; +} + +static int +mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, + struct mlxsw_sp_prefix_usage *req_prefix_usage) +{ + struct mlxsw_sp_lpm_tree *lpm_tree; + + if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, + &vr->lpm_tree->prefix_usage)) + return 0; + + lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, + vr->proto, false); + if (IS_ERR(lpm_tree)) { + /* We failed to get a tree according to the required + * prefix usage. However, the current tree might be still good + * for us if our requirement is subset of the prefixes used + * in the tree. + */ + if (mlxsw_sp_prefix_usage_subset(req_prefix_usage, + &vr->lpm_tree->prefix_usage)) + return 0; + return PTR_ERR(lpm_tree); + } + + mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr); + mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); + vr->lpm_tree = lpm_tree; + return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); +} + +static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, + unsigned char prefix_len, + u32 tb_id, + enum mlxsw_sp_l3proto proto) +{ + struct mlxsw_sp_vr *vr; + int err; + + tb_id = mlxsw_sp_fix_tb_id(tb_id); + vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto); + if (!vr) { + vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto); + if (IS_ERR(vr)) + return vr; + } else { + struct mlxsw_sp_prefix_usage req_prefix_usage; + + mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, + &vr->fib->prefix_usage); + mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len); + /* Need to replace LPM tree in case new prefix is required. */ + err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr, + &req_prefix_usage); + if (err) + return ERR_PTR(err); + } + return vr; +} + +static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr) +{ + /* Destroy virtual router entity in case the associated FIB is empty + * and allow it to be used for other tables in future. Otherwise, + * check if some prefix usage did not disappear and change tree if + * that is the case. Note that in case new, smaller tree cannot be + * allocated, the original one will be kept being used. + */ + if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage)) + mlxsw_sp_vr_destroy(mlxsw_sp, vr); + else + mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr, + &vr->fib->prefix_usage); +} + +static void mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_vr *vr; + int i; + + for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) { + vr = &mlxsw_sp->router.vrs[i]; + vr->id = i; + } +} + +struct mlxsw_sp_neigh_key { + unsigned char addr[sizeof(struct in6_addr)]; + struct net_device *dev; +}; + +struct mlxsw_sp_neigh_entry { + struct rhash_head ht_node; + struct mlxsw_sp_neigh_key key; + u16 rif; + struct neighbour *n; + bool offloaded; + struct delayed_work dw; + struct mlxsw_sp_port *mlxsw_sp_port; + unsigned char ha[ETH_ALEN]; + struct list_head nexthop_list; /* list of nexthops using + * this neigh entry + */ + struct list_head nexthop_neighs_list_node; +}; + +static const struct rhashtable_params mlxsw_sp_neigh_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key), + .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node), + .key_len = sizeof(struct mlxsw_sp_neigh_key), +}; + +static int +mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry) +{ + return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht, + &neigh_entry->ht_node, + mlxsw_sp_neigh_ht_params); +} + +static void +mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry) +{ + rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht, + &neigh_entry->ht_node, + mlxsw_sp_neigh_ht_params); +} + +static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work); + +static struct mlxsw_sp_neigh_entry * +mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len, + struct net_device *dev, u16 rif, + struct neighbour *n) +{ + struct mlxsw_sp_neigh_entry *neigh_entry; + + neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC); + if (!neigh_entry) + return NULL; + memcpy(neigh_entry->key.addr, addr, addr_len); + neigh_entry->key.dev = dev; + neigh_entry->rif = rif; + neigh_entry->n = n; + INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw); + INIT_LIST_HEAD(&neigh_entry->nexthop_list); + return neigh_entry; +} + +static void +mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry) +{ + kfree(neigh_entry); +} + +static struct mlxsw_sp_neigh_entry * +mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr, + size_t addr_len, struct net_device *dev) +{ + struct mlxsw_sp_neigh_key key = {{ 0 } }; + + memcpy(key.addr, addr, addr_len); + key.dev = dev; + return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht, + &key, mlxsw_sp_neigh_ht_params); +} + +int mlxsw_sp_router_neigh_construct(struct net_device *dev, + struct neighbour *n) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_neigh_entry *neigh_entry; + struct mlxsw_sp_rif *r; + u32 dip; + int err; + + if (n->tbl != &arp_tbl) + return 0; + + dip = ntohl(*((__be32 *) n->primary_key)); + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip), + n->dev); + if (neigh_entry) { + WARN_ON(neigh_entry->n != n); + return 0; + } + + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); + if (WARN_ON(!r)) + return -EINVAL; + + neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev, + r->rif, n); + if (!neigh_entry) + return -ENOMEM; + err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); + if (err) + goto err_neigh_entry_insert; + return 0; + +err_neigh_entry_insert: + mlxsw_sp_neigh_entry_destroy(neigh_entry); + return err; +} + +void mlxsw_sp_router_neigh_destroy(struct net_device *dev, + struct neighbour *n) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_neigh_entry *neigh_entry; + u32 dip; + + if (n->tbl != &arp_tbl) + return; + + dip = ntohl(*((__be32 *) n->primary_key)); + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip), + n->dev); + if (!neigh_entry) + return; + mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); + mlxsw_sp_neigh_entry_destroy(neigh_entry); +} + +static void +mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp) +{ + unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME); + + mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval); +} + +static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, + int ent_index) +{ + struct net_device *dev; + struct neighbour *n; + __be32 dipn; + u32 dip; + u16 rif; + + mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip); + + if (!mlxsw_sp->rifs[rif]) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n"); + return; + } + + dipn = htonl(dip); + dev = mlxsw_sp->rifs[rif]->dev; + n = neigh_lookup(&arp_tbl, &dipn, dev); + if (!n) { + netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n", + &dip); + return; + } + + netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip); + neigh_event_send(n, NULL); + neigh_release(n); +} + +static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, + int rec_index) +{ + u8 num_entries; + int i; + + num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl, + rec_index); + /* Hardware starts counting at 0, so add 1. */ + num_entries++; + + /* Each record consists of several neighbour entries. */ + for (i = 0; i < num_entries; i++) { + int ent_index; + + ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i; + mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl, + ent_index); + } + +} + +static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, int rec_index) +{ + switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) { + case MLXSW_REG_RAUHTD_TYPE_IPV4: + mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl, + rec_index); + break; + case MLXSW_REG_RAUHTD_TYPE_IPV6: + WARN_ON_ONCE(1); + break; + } +} + +static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) +{ + char *rauhtd_pl; + u8 num_rec; + int i, err; + + rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL); + if (!rauhtd_pl) + return -ENOMEM; + + /* Make sure the neighbour's netdev isn't removed in the + * process. + */ + rtnl_lock(); + do { + mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd), + rauhtd_pl); + if (err) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n"); + break; + } + num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl); + for (i = 0; i < num_rec; i++) + mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl, + i); + } while (num_rec); + rtnl_unlock(); + + kfree(rauhtd_pl); + return err; +} + +static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_neigh_entry *neigh_entry; + + /* Take RTNL mutex here to prevent lists from changes */ + rtnl_lock(); + list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, + nexthop_neighs_list_node) { + /* If this neigh have nexthops, make the kernel think this neigh + * is active regardless of the traffic. + */ + if (!list_empty(&neigh_entry->nexthop_list)) + neigh_event_send(neigh_entry->n, NULL); + } + rtnl_unlock(); +} + +static void +mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp) +{ + unsigned long interval = mlxsw_sp->router.neighs_update.interval; + + mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, + msecs_to_jiffies(interval)); +} + +static void mlxsw_sp_router_neighs_update_work(struct work_struct *work) +{ + struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp, + router.neighs_update.dw.work); + int err; + + err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp); + if (err) + dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity"); + + mlxsw_sp_router_neighs_update_nh(mlxsw_sp); + + mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp); +} + +static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work) +{ + struct mlxsw_sp_neigh_entry *neigh_entry; + struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp, + router.nexthop_probe_dw.work); + + /* Iterate over nexthop neighbours, find those who are unresolved and + * send arp on them. This solves the chicken-egg problem when + * the nexthop wouldn't get offloaded until the neighbor is resolved + * but it wouldn't get resolved ever in case traffic is flowing in HW + * using different nexthop. + * + * Take RTNL mutex here to prevent lists from changes. + */ + rtnl_lock(); + list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, + nexthop_neighs_list_node) { + if (!(neigh_entry->n->nud_state & NUD_VALID) && + !list_empty(&neigh_entry->nexthop_list)) + neigh_event_send(neigh_entry->n, NULL); + } + rtnl_unlock(); + + mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, + MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL); +} + +static void +mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry, + bool removing); + +static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work) +{ + struct mlxsw_sp_neigh_entry *neigh_entry = + container_of(work, struct mlxsw_sp_neigh_entry, dw.work); + struct neighbour *n = neigh_entry->n; + struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char rauht_pl[MLXSW_REG_RAUHT_LEN]; + struct net_device *dev; + bool entry_connected; + u8 nud_state; + bool updating; + bool removing; + bool adding; + u32 dip; + int err; + + read_lock_bh(&n->lock); + dip = ntohl(*((__be32 *) n->primary_key)); + memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha)); + nud_state = n->nud_state; + dev = n->dev; + read_unlock_bh(&n->lock); + + entry_connected = nud_state & NUD_VALID; + adding = (!neigh_entry->offloaded) && entry_connected; + updating = neigh_entry->offloaded && entry_connected; + removing = neigh_entry->offloaded && !entry_connected; + + if (adding || updating) { + mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_ADD, + neigh_entry->rif, + neigh_entry->ha, dip); + err = mlxsw_reg_write(mlxsw_sp->core, + MLXSW_REG(rauht), rauht_pl); + if (err) { + netdev_err(dev, "Could not add neigh %pI4h\n", &dip); + neigh_entry->offloaded = false; + } else { + neigh_entry->offloaded = true; + } + mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, false); + } else if (removing) { + mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE, + neigh_entry->rif, + neigh_entry->ha, dip); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), + rauht_pl); + if (err) { + netdev_err(dev, "Could not delete neigh %pI4h\n", &dip); + neigh_entry->offloaded = true; + } else { + neigh_entry->offloaded = false; + } + mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, true); + } + + neigh_release(n); + mlxsw_sp_port_dev_put(mlxsw_sp_port); +} + +int mlxsw_sp_router_netevent_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct mlxsw_sp_neigh_entry *neigh_entry; + struct mlxsw_sp_port *mlxsw_sp_port; + struct mlxsw_sp *mlxsw_sp; + unsigned long interval; + struct net_device *dev; + struct neigh_parms *p; + struct neighbour *n; + u32 dip; + + switch (event) { + case NETEVENT_DELAY_PROBE_TIME_UPDATE: + p = ptr; + + /* We don't care about changes in the default table. */ + if (!p->dev || p->tbl != &arp_tbl) + return NOTIFY_DONE; + + /* We are in atomic context and can't take RTNL mutex, + * so use RCU variant to walk the device chain. + */ + mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev); + if (!mlxsw_sp_port) + return NOTIFY_DONE; + + mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME)); + mlxsw_sp->router.neighs_update.interval = interval; + + mlxsw_sp_port_dev_put(mlxsw_sp_port); + break; + case NETEVENT_NEIGH_UPDATE: + n = ptr; + dev = n->dev; + + if (n->tbl != &arp_tbl) + return NOTIFY_DONE; + + mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(dev); + if (!mlxsw_sp_port) + return NOTIFY_DONE; + + mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + dip = ntohl(*((__be32 *) n->primary_key)); + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, + &dip, + sizeof(__be32), + dev); + if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) { + mlxsw_sp_port_dev_put(mlxsw_sp_port); + return NOTIFY_DONE; + } + neigh_entry->mlxsw_sp_port = mlxsw_sp_port; + + /* Take a reference to ensure the neighbour won't be + * destructed until we drop the reference in delayed + * work. + */ + neigh_clone(n); + if (!mlxsw_core_schedule_dw(&neigh_entry->dw, 0)) { + neigh_release(n); + mlxsw_sp_port_dev_put(mlxsw_sp_port); + } + break; + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp) +{ + int err; + + err = rhashtable_init(&mlxsw_sp->router.neigh_ht, + &mlxsw_sp_neigh_ht_params); + if (err) + return err; + + /* Initialize the polling interval according to the default + * table. + */ + mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp); + + /* Create the delayed works for the activity_update */ + INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw, + mlxsw_sp_router_neighs_update_work); + INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw, + mlxsw_sp_router_probe_unresolved_nexthops); + mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0); + mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0); + return 0; +} + +static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) +{ + cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw); + cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw); + rhashtable_destroy(&mlxsw_sp->router.neigh_ht); +} + +struct mlxsw_sp_nexthop { + struct list_head neigh_list_node; /* member of neigh entry list */ + struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group + * this belongs to + */ + u8 should_offload:1, /* set indicates this neigh is connected and + * should be put to KVD linear area of this group. + */ + offloaded:1, /* set in case the neigh is actually put into + * KVD linear area of this group. + */ + update:1; /* set indicates that MAC of this neigh should be + * updated in HW + */ + struct mlxsw_sp_neigh_entry *neigh_entry; +}; + +struct mlxsw_sp_nexthop_group { + struct list_head list; /* node in mlxsw->router.nexthop_group_list */ + struct list_head fib_list; /* list of fib entries that use this group */ + u8 adj_index_valid:1; + u32 adj_index; + u16 ecmp_size; + u16 count; + struct mlxsw_sp_nexthop nexthops[0]; +}; + +static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vr *vr, + u32 adj_index, u16 ecmp_size, + u32 new_adj_index, + u16 new_ecmp_size) +{ + char raleu_pl[MLXSW_REG_RALEU_LEN]; + + mlxsw_reg_raleu_pack(raleu_pl, vr->proto, vr->id, + adj_index, ecmp_size, + new_adj_index, new_ecmp_size); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl); +} + +static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + u32 old_adj_index, u16 old_ecmp_size) +{ + struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_vr *vr = NULL; + int err; + + list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { + if (vr == fib_entry->vr) + continue; + vr = fib_entry->vr; + err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr, + old_adj_index, + old_ecmp_size, + nh_grp->adj_index, + nh_grp->ecmp_size); + if (err) + return err; + } + return 0; +} + +static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_nexthop *nh) +{ + struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; + char ratr_pl[MLXSW_REG_RATR_LEN]; + + mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, + true, adj_index, neigh_entry->rif); + mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); +} + +static int +mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + u32 adj_index = nh_grp->adj_index; /* base */ + struct mlxsw_sp_nexthop *nh; + int i; + int err; + + for (i = 0; i < nh_grp->count; i++) { + nh = &nh_grp->nexthops[i]; + + if (!nh->should_offload) { + nh->offloaded = 0; + continue; + } + + if (nh->update) { + err = mlxsw_sp_nexthop_mac_update(mlxsw_sp, + adj_index, nh); + if (err) + return err; + nh->update = 0; + nh->offloaded = 1; + } + adj_index++; + } + return 0; +} + +static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry); + +static int +mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + struct mlxsw_sp_fib_entry *fib_entry; + int err; + + list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { + err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); + if (err) + return err; + } + return 0; +} + +static void +mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + struct mlxsw_sp_nexthop *nh; + bool offload_change = false; + u32 adj_index; + u16 ecmp_size = 0; + bool old_adj_index_valid; + u32 old_adj_index; + u16 old_ecmp_size; + int ret; + int i; + int err; + + for (i = 0; i < nh_grp->count; i++) { + nh = &nh_grp->nexthops[i]; + + if (nh->should_offload ^ nh->offloaded) { + offload_change = true; + if (nh->should_offload) + nh->update = 1; + } + if (nh->should_offload) + ecmp_size++; + } + if (!offload_change) { + /* Nothing was added or removed, so no need to reallocate. Just + * update MAC on existing adjacency indexes. + */ + err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); + if (err) { + dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); + goto set_trap; + } + return; + } + if (!ecmp_size) + /* No neigh of this group is connected so we just set + * the trap and let everthing flow through kernel. + */ + goto set_trap; + + ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size); + if (ret < 0) { + /* We ran out of KVD linear space, just set the + * trap and let everything flow through kernel. + */ + dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n"); + goto set_trap; + } + adj_index = ret; + old_adj_index_valid = nh_grp->adj_index_valid; + old_adj_index = nh_grp->adj_index; + old_ecmp_size = nh_grp->ecmp_size; + nh_grp->adj_index_valid = 1; + nh_grp->adj_index = adj_index; + nh_grp->ecmp_size = ecmp_size; + err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); + if (err) { + dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); + goto set_trap; + } + + if (!old_adj_index_valid) { + /* The trap was set for fib entries, so we have to call + * fib entry update to unset it and use adjacency index. + */ + err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp); + if (err) { + dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n"); + goto set_trap; + } + return; + } + + err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp, + old_adj_index, old_ecmp_size); + mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index); + if (err) { + dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n"); + goto set_trap; + } + return; + +set_trap: + old_adj_index_valid = nh_grp->adj_index_valid; + nh_grp->adj_index_valid = 0; + for (i = 0; i < nh_grp->count; i++) { + nh = &nh_grp->nexthops[i]; + nh->offloaded = 0; + } + err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp); + if (err) + dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n"); + if (old_adj_index_valid) + mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index); +} + +static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, + bool removing) +{ + if (!removing && !nh->should_offload) + nh->should_offload = 1; + else if (removing && nh->offloaded) + nh->should_offload = 0; + nh->update = 1; +} + +static void +mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry, + bool removing) +{ + struct mlxsw_sp_nexthop *nh; + + /* Take RTNL mutex here to prevent lists from changes */ + rtnl_lock(); + list_for_each_entry(nh, &neigh_entry->nexthop_list, + neigh_list_node) { + __mlxsw_sp_nexthop_neigh_update(nh, removing); + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); + } + rtnl_unlock(); +} + +static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + struct mlxsw_sp_nexthop *nh, + struct fib_nh *fib_nh) +{ + struct mlxsw_sp_neigh_entry *neigh_entry; + u32 gwip = ntohl(fib_nh->nh_gw); + struct net_device *dev = fib_nh->nh_dev; + struct neighbour *n; + u8 nud_state; + + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, + sizeof(gwip), dev); + if (!neigh_entry) { + __be32 gwipn = htonl(gwip); + + n = neigh_create(&arp_tbl, &gwipn, dev); + if (IS_ERR(n)) + return PTR_ERR(n); + neigh_event_send(n, NULL); + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, + sizeof(gwip), dev); + if (!neigh_entry) { + neigh_release(n); + return -EINVAL; + } + } else { + /* Take a reference of neigh here ensuring that neigh would + * not be detructed before the nexthop entry is finished. + * The second branch takes the reference in neith_create() + */ + n = neigh_entry->n; + neigh_clone(n); + } + + /* If that is the first nexthop connected to that neigh, add to + * nexthop_neighs_list + */ + if (list_empty(&neigh_entry->nexthop_list)) + list_add_tail(&neigh_entry->nexthop_neighs_list_node, + &mlxsw_sp->router.nexthop_neighs_list); + + nh->nh_grp = nh_grp; + nh->neigh_entry = neigh_entry; + list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list); + read_lock_bh(&n->lock); + nud_state = n->nud_state; + read_unlock_bh(&n->lock); + __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID)); + + return 0; +} + +static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; + + list_del(&nh->neigh_list_node); + + /* If that is the last nexthop connected to that neigh, remove from + * nexthop_neighs_list + */ + if (list_empty(&nh->neigh_entry->nexthop_list)) + list_del(&nh->neigh_entry->nexthop_neighs_list_node); + + neigh_release(neigh_entry->n); +} + +static struct mlxsw_sp_nexthop_group * +mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) +{ + struct mlxsw_sp_nexthop_group *nh_grp; + struct mlxsw_sp_nexthop *nh; + struct fib_nh *fib_nh; + size_t alloc_size; + int i; + int err; + + alloc_size = sizeof(*nh_grp) + + fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop); + nh_grp = kzalloc(alloc_size, GFP_KERNEL); + if (!nh_grp) + return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&nh_grp->fib_list); + nh_grp->count = fi->fib_nhs; + for (i = 0; i < nh_grp->count; i++) { + nh = &nh_grp->nexthops[i]; + fib_nh = &fi->fib_nh[i]; + err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh); + if (err) + goto err_nexthop_init; + } + list_add_tail(&nh_grp->list, &mlxsw_sp->router.nexthop_group_list); + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); + return nh_grp; + +err_nexthop_init: + for (i--; i >= 0; i--) + mlxsw_sp_nexthop_fini(mlxsw_sp, nh); + kfree(nh_grp); + return ERR_PTR(err); +} + +static void +mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + struct mlxsw_sp_nexthop *nh; + int i; + + list_del(&nh_grp->list); + for (i = 0; i < nh_grp->count; i++) { + nh = &nh_grp->nexthops[i]; + mlxsw_sp_nexthop_fini(mlxsw_sp, nh); + } + kfree(nh_grp); +} + +static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh, + struct fib_info *fi) +{ + int i; + + for (i = 0; i < fi->fib_nhs; i++) { + struct fib_nh *fib_nh = &fi->fib_nh[i]; + u32 gwip = ntohl(fib_nh->nh_gw); + + if (memcmp(nh->neigh_entry->key.addr, + &gwip, sizeof(u32)) == 0 && + nh->neigh_entry->key.dev == fib_nh->nh_dev) + return true; + } + return false; +} + +static bool mlxsw_sp_nexthop_group_match(struct mlxsw_sp_nexthop_group *nh_grp, + struct fib_info *fi) +{ + int i; + + if (nh_grp->count != fi->fib_nhs) + return false; + for (i = 0; i < nh_grp->count; i++) { + struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; + + if (!mlxsw_sp_nexthop_match(nh, fi)) + return false; + } + return true; +} + +static struct mlxsw_sp_nexthop_group * +mlxsw_sp_nexthop_group_find(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) +{ + struct mlxsw_sp_nexthop_group *nh_grp; + + list_for_each_entry(nh_grp, &mlxsw_sp->router.nexthop_group_list, + list) { + if (mlxsw_sp_nexthop_group_match(nh_grp, fi)) + return nh_grp; + } + return NULL; +} + +static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + struct fib_info *fi) +{ + struct mlxsw_sp_nexthop_group *nh_grp; + + nh_grp = mlxsw_sp_nexthop_group_find(mlxsw_sp, fi); + if (!nh_grp) { + nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi); + if (IS_ERR(nh_grp)) + return PTR_ERR(nh_grp); + } + list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list); + fib_entry->nh_group = nh_grp; + return 0; +} + +static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; + + list_del(&fib_entry->nexthop_group_node); + if (!list_empty(&nh_grp->fib_list)) + return; + mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp); +} + +static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) +{ + char rgcr_pl[MLXSW_REG_RGCR_LEN]; + + mlxsw_reg_rgcr_pack(rgcr_pl, true); + mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, MLXSW_SP_RIF_MAX); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); +} + +static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) +{ + char rgcr_pl[MLXSW_REG_RGCR_LEN]; + + mlxsw_reg_rgcr_pack(rgcr_pl, false); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); +} + +int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) +{ + int err; + + INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list); + INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list); + err = __mlxsw_sp_router_init(mlxsw_sp); + if (err) + return err; + mlxsw_sp_lpm_init(mlxsw_sp); + mlxsw_sp_vrs_init(mlxsw_sp); + err = mlxsw_sp_neigh_init(mlxsw_sp); + if (err) + goto err_neigh_init; + return 0; + +err_neigh_init: + __mlxsw_sp_router_fini(mlxsw_sp); + return err; +} + +void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp_neigh_fini(mlxsw_sp); + __mlxsw_sp_router_fini(mlxsw_sp); +} + +static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + char ralue_pl[MLXSW_REG_RALUE_LEN]; + u32 *p_dip = (u32 *) fib_entry->key.addr; + struct mlxsw_sp_vr *vr = fib_entry->vr; + enum mlxsw_reg_ralue_trap_action trap_action; + u16 trap_id = 0; + u32 adjacency_index = 0; + u16 ecmp_size = 0; + + /* In case the nexthop group adjacency index is valid, use it + * with provided ECMP size. Otherwise, setup trap and pass + * traffic to kernel. + */ + if (fib_entry->nh_group->adj_index_valid) { + trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; + adjacency_index = fib_entry->nh_group->adj_index; + ecmp_size = fib_entry->nh_group->ecmp_size; + } else { + trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; + trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; + } + + mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id, + fib_entry->key.prefix_len, *p_dip); + mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id, + adjacency_index, ecmp_size); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); +} + +static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + char ralue_pl[MLXSW_REG_RALUE_LEN]; + u32 *p_dip = (u32 *) fib_entry->key.addr; + struct mlxsw_sp_vr *vr = fib_entry->vr; + + mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id, + fib_entry->key.prefix_len, *p_dip); + mlxsw_reg_ralue_act_local_pack(ralue_pl, + MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0, + fib_entry->rif); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); +} + +static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + char ralue_pl[MLXSW_REG_RALUE_LEN]; + u32 *p_dip = (u32 *) fib_entry->key.addr; + struct mlxsw_sp_vr *vr = fib_entry->vr; + + mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id, + fib_entry->key.prefix_len, *p_dip); + mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); +} + +static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + switch (fib_entry->type) { + case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: + return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op); + case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: + return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op); + case MLXSW_SP_FIB_ENTRY_TYPE_TRAP: + return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op); + } + return -EINVAL; +} + +static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + switch (fib_entry->vr->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op); + case MLXSW_SP_L3_PROTO_IPV6: + return -EINVAL; + } + return -EINVAL; +} + +static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, + MLXSW_REG_RALUE_OP_WRITE_WRITE); +} + +static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, + MLXSW_REG_RALUE_OP_WRITE_DELETE); +} + +struct mlxsw_sp_router_fib4_add_info { + struct switchdev_trans_item tritem; + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_fib_entry *fib_entry; +}; + +static void mlxsw_sp_router_fib4_add_info_destroy(void const *data) +{ + const struct mlxsw_sp_router_fib4_add_info *info = data; + struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry; + struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp; + struct mlxsw_sp_vr *vr = fib_entry->vr; + + mlxsw_sp_fib_entry_destroy(fib_entry); + mlxsw_sp_vr_put(mlxsw_sp, vr); + kfree(info); +} + +static int +mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp, + const struct switchdev_obj_ipv4_fib *fib4, + struct mlxsw_sp_fib_entry *fib_entry) +{ + struct fib_info *fi = fib4->fi; + + if (fib4->type == RTN_LOCAL || fib4->type == RTN_BROADCAST) { + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; + return 0; + } + if (fib4->type != RTN_UNICAST) + return -EINVAL; + + if (fi->fib_scope != RT_SCOPE_UNIVERSE) { + struct mlxsw_sp_rif *r; + + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fi->fib_dev); + if (!r) + return -EINVAL; + fib_entry->rif = r->rif; + return 0; + } + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; + return mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi); +} + +static void +mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_REMOTE) + return; + mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry); +} + +static struct mlxsw_sp_fib_entry * +mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp, + const struct switchdev_obj_ipv4_fib *fib4) +{ + struct mlxsw_sp_fib_entry *fib_entry; + struct fib_info *fi = fib4->fi; + struct mlxsw_sp_vr *vr; + int err; + + vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id, + MLXSW_SP_L3_PROTO_IPV4); + if (IS_ERR(vr)) + return ERR_CAST(vr); + + fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst, + sizeof(fib4->dst), + fib4->dst_len, fi->fib_dev); + if (fib_entry) { + /* Already exists, just take a reference */ + fib_entry->ref_count++; + return fib_entry; + } + fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst, + sizeof(fib4->dst), + fib4->dst_len, fi->fib_dev); + if (!fib_entry) { + err = -ENOMEM; + goto err_fib_entry_create; + } + fib_entry->vr = vr; + fib_entry->ref_count = 1; + + err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry); + if (err) + goto err_fib4_entry_init; + + return fib_entry; + +err_fib4_entry_init: + mlxsw_sp_fib_entry_destroy(fib_entry); +err_fib_entry_create: + mlxsw_sp_vr_put(mlxsw_sp, vr); + + return ERR_PTR(err); +} + +static struct mlxsw_sp_fib_entry * +mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp, + const struct switchdev_obj_ipv4_fib *fib4) +{ + struct mlxsw_sp_vr *vr; + + vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4); + if (!vr) + return NULL; + + return mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst, + sizeof(fib4->dst), fib4->dst_len, + fib4->fi->fib_dev); +} + +void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_vr *vr = fib_entry->vr; + + if (--fib_entry->ref_count == 0) { + mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry); + mlxsw_sp_fib_entry_destroy(fib_entry); + } + mlxsw_sp_vr_put(mlxsw_sp, vr); +} + +static int +mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_ipv4_fib *fib4, + struct switchdev_trans *trans) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_router_fib4_add_info *info; + struct mlxsw_sp_fib_entry *fib_entry; + int err; + + fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fib4); + if (IS_ERR(fib_entry)) + return PTR_ERR(fib_entry); + + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + err = -ENOMEM; + goto err_alloc_info; + } + info->mlxsw_sp = mlxsw_sp; + info->fib_entry = fib_entry; + switchdev_trans_item_enqueue(trans, info, + mlxsw_sp_router_fib4_add_info_destroy, + &info->tritem); + return 0; + +err_alloc_info: + mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); + return err; +} + +static int +mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_ipv4_fib *fib4, + struct switchdev_trans *trans) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_router_fib4_add_info *info; + struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_vr *vr; + int err; + + info = switchdev_trans_item_dequeue(trans); + fib_entry = info->fib_entry; + kfree(info); + + if (fib_entry->ref_count != 1) + return 0; + + vr = fib_entry->vr; + err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry); + if (err) + goto err_fib_entry_insert; + err = mlxsw_sp_fib_entry_update(mlxsw_sp_port->mlxsw_sp, fib_entry); + if (err) + goto err_fib_entry_add; + return 0; + +err_fib_entry_add: + mlxsw_sp_fib_entry_remove(vr->fib, fib_entry); +err_fib_entry_insert: + mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); + return err; +} + +int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_ipv4_fib *fib4, + struct switchdev_trans *trans) +{ + if (switchdev_trans_ph_prepare(trans)) + return mlxsw_sp_router_fib4_add_prepare(mlxsw_sp_port, + fib4, trans); + return mlxsw_sp_router_fib4_add_commit(mlxsw_sp_port, + fib4, trans); +} + +int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_ipv4_fib *fib4) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_fib_entry *fib_entry; + + fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fib4); + if (!fib_entry) { + dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n"); + return -ENOENT; + } + + if (fib_entry->ref_count == 1) { + mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry); + mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry); + } + + mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 3710f19ed..7b654c517 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -55,13 +55,10 @@ static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) { + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port); u16 fid = vid; - if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { - u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); - - fid = mlxsw_sp_vfid_to_fid(vfid); - } + fid = f ? f->fid : fid; if (!fid) fid = mlxsw_sp_port->pvid; @@ -169,14 +166,9 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); } -static bool mlxsw_sp_vfid_is_vport_br(u16 vfid) -{ - return vfid >= MLXSW_SP_VFID_PORT_MAX; -} - static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 idx_begin, u16 idx_end, bool set, - bool only_uc) + u16 idx_begin, u16 idx_end, bool uc_set, + bool bm_set) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u16 local_port = mlxsw_sp_port->local_port; @@ -185,43 +177,32 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, char *sftr_pl; int err; - if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; - if (mlxsw_sp_vfid_is_vport_br(idx_begin)) - local_port = mlxsw_sp_port->local_port; - else - local_port = MLXSW_PORT_CPU_PORT; - } else { + else table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; - } sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); if (!sftr_pl) return -ENOMEM; mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, - table_type, range, local_port, set); + table_type, range, local_port, uc_set); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); if (err) goto buffer_out; - /* Flooding control allows one to decide whether a given port will - * flood unicast traffic for which there is no FDB entry. - */ - if (only_uc) - goto buffer_out; - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, - table_type, range, local_port, set); + table_type, range, local_port, bm_set); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); if (err) goto err_flood_bm_set; - else - goto buffer_out; + + goto buffer_out; err_flood_bm_set: mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, - table_type, range, local_port, !set); + table_type, range, local_port, !uc_set); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); buffer_out: kfree(sftr_pl); @@ -236,7 +217,8 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, int err; if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { - u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); + u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid; + u16 vfid = mlxsw_sp_fid_to_vfid(fid); return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid, set, true); @@ -260,14 +242,16 @@ err_port_flood_set: return err; } -int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, - bool set, bool only_uc) +int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, + bool set) { + u16 vfid; + /* In case of vFIDs, index into the flooding table is relative to * the start of the vFIDs range. */ - return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, - only_uc); + vfid = mlxsw_sp_fid_to_vfid(fid); + return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set); } static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, @@ -383,6 +367,192 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, return err; } +static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) +{ + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); +} + +static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid) +{ + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID; + char svfa_pl[MLXSW_REG_SVFA_LEN]; + + mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); +} + +static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid) +{ + struct mlxsw_sp_fid *f; + + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return NULL; + + f->fid = fid; + + return f; +} + +struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) +{ + struct mlxsw_sp_fid *f; + int err; + + err = mlxsw_sp_fid_op(mlxsw_sp, fid, true); + if (err) + return ERR_PTR(err); + + /* Although all the ports member in the FID might be using a + * {Port, VID} to FID mapping, we create a global VID-to-FID + * mapping. This allows a port to transition to VLAN mode, + * knowing the global mapping exists. + */ + err = mlxsw_sp_fid_map(mlxsw_sp, fid, true); + if (err) + goto err_fid_map; + + f = mlxsw_sp_fid_alloc(fid); + if (!f) { + err = -ENOMEM; + goto err_allocate_fid; + } + + list_add(&f->list, &mlxsw_sp->fids); + + return f; + +err_allocate_fid: + mlxsw_sp_fid_map(mlxsw_sp, fid, false); +err_fid_map: + mlxsw_sp_fid_op(mlxsw_sp, fid, false); + return ERR_PTR(err); +} + +void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f) +{ + u16 fid = f->fid; + + list_del(&f->list); + + if (f->r) + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); + + kfree(f); + + mlxsw_sp_fid_map(mlxsw_sp, fid, false); + + mlxsw_sp_fid_op(mlxsw_sp, fid, false); +} + +static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) +{ + struct mlxsw_sp_fid *f; + + if (test_bit(fid, mlxsw_sp_port->active_vlans)) + return 0; + + f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid); + if (!f) { + f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid); + if (IS_ERR(f)) + return PTR_ERR(f); + } + + f->ref_count++; + + netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid); + + return 0; +} + +static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) +{ + struct mlxsw_sp_fid *f; + + f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid); + if (WARN_ON(!f)) + return; + + netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid); + + mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid); + + if (--f->ref_count == 0) + mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f); +} + +static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid, + bool valid) +{ + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; + + /* If port doesn't have vPorts, then it can use the global + * VID-to-FID mapping. + */ + if (list_empty(&mlxsw_sp_port->vports_list)) + return 0; + + return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid); +} + +static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid_begin, u16 fid_end) +{ + int fid, err; + + for (fid = fid_begin; fid <= fid_end; fid++) { + err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid); + if (err) + goto err_port_fid_join; + } + + err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, + mlxsw_sp_port->uc_flood, true); + if (err) + goto err_port_flood_set; + + for (fid = fid_begin; fid <= fid_end; fid++) { + err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true); + if (err) + goto err_port_fid_map; + } + + return 0; + +err_port_fid_map: + for (fid--; fid >= fid_begin; fid--) + mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false); + __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false, + false); +err_port_flood_set: + fid = fid_end; +err_port_fid_join: + for (fid--; fid >= fid_begin; fid--) + __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid); + return err; +} + +static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid_begin, u16 fid_end) +{ + int fid; + + for (fid = fid_begin; fid <= fid_end; fid++) + mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false); + + __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false, + false); + + for (fid = fid_begin; fid <= fid_end; fid++) + __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid); +} + static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) { @@ -440,74 +610,6 @@ err_port_allow_untagged_set: return err; } -static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) -{ - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - int err; - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); - - if (err) - return err; - - set_bit(fid, mlxsw_sp->active_fids); - return 0; -} - -static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid) -{ - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - - clear_bit(fid, mlxsw_sp->active_fids); - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, - fid, fid); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); -} - -static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) -{ - enum mlxsw_reg_svfa_mt mt; - - if (!list_empty(&mlxsw_sp_port->vports_list)) - mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; - else - mt = MLXSW_REG_SVFA_MT_VID_TO_FID; - - return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid); -} - -static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) -{ - enum mlxsw_reg_svfa_mt mt; - - if (list_empty(&mlxsw_sp_port->vports_list)) - return 0; - - mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; - return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid); -} - -static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin, - u16 vid_end) -{ - u16 vid; - int err; - - for (vid = vid_begin; vid <= vid_end; vid++) { - err = mlxsw_sp_port_add_vid(dev, 0, vid); - if (err) - goto err_port_add_vid; - } - return 0; - -err_port_add_vid: - for (vid--; vid >= vid_begin; vid--) - mlxsw_sp_port_kill_vid(dev, 0, vid); - return err; -} - static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool is_member, bool untagged) @@ -533,57 +635,17 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool flag_untagged, bool flag_pvid) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct net_device *dev = mlxsw_sp_port->dev; - u16 vid, last_visited_vid, old_pvid; - enum mlxsw_reg_svfa_mt mt; + u16 vid, old_pvid; int err; - /* In case this is invoked with BRIDGE_FLAGS_SELF and port is - * not bridged, then packets ingressing through the port with - * the specified VIDs will be directed to CPU. - */ if (!mlxsw_sp_port->bridged) - return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end); - - for (vid = vid_begin; vid <= vid_end; vid++) { - if (!test_bit(vid, mlxsw_sp->active_fids)) { - err = mlxsw_sp_fid_create(mlxsw_sp, vid); - if (err) { - netdev_err(dev, "Failed to create FID=%d\n", - vid); - return err; - } - - /* When creating a FID, we set a VID to FID mapping - * regardless of the port's mode. - */ - mt = MLXSW_REG_SVFA_MT_VID_TO_FID; - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, - true, vid, vid); - if (err) { - netdev_err(dev, "Failed to create FID=VID=%d mapping\n", - vid); - goto err_port_vid_to_fid_set; - } - } - } - - /* Set FID mapping according to port's mode */ - for (vid = vid_begin; vid <= vid_end; vid++) { - err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid); - if (err) { - netdev_err(dev, "Failed to map FID=%d", vid); - last_visited_vid = --vid; - goto err_port_fid_map; - } - } + return -EINVAL; - err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, - true, false); + err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end); if (err) { - netdev_err(dev, "Failed to configure flooding\n"); - goto err_port_flood_set; + netdev_err(dev, "Failed to join FIDs\n"); + return err; } err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, @@ -628,10 +690,6 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, return 0; -err_port_vid_to_fid_set: - mlxsw_sp_fid_destroy(mlxsw_sp, vid); - return err; - err_port_stp_state_set: for (vid = vid_begin; vid <= vid_end; vid++) clear_bit(vid, mlxsw_sp_port->active_vlans); @@ -641,13 +699,7 @@ err_port_pvid_set: __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, false); err_port_vlans_set: - __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false, - false); -err_port_flood_set: - last_visited_vid = vid_end; -err_port_fid_map: - for (vid = last_visited_vid; vid >= vid_begin; vid--) - mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid); + mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); return err; } @@ -678,9 +730,10 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) MLXSW_REG_SFD_OP_WRITE_REMOVE; } -static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, - const char *mac, u16 fid, bool adding, - bool dynamic) +static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, + const char *mac, u16 fid, bool adding, + enum mlxsw_reg_sfd_rec_action action, + bool dynamic) { char *sfd_pl; int err; @@ -691,14 +744,29 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), - mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, - local_port); + mac, fid, action, local_port); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); kfree(sfd_pl); return err; } +static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, + const char *mac, u16 fid, bool adding, + bool dynamic) +{ + return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, + MLXSW_REG_SFD_REC_ACTION_NOP, dynamic); +} + +int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, + bool adding) +{ + return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, + MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, + false); +} + static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, const char *mac, u16 fid, u16 lag_vid, bool adding, bool dynamic) @@ -903,6 +971,11 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, SWITCHDEV_OBJ_PORT_VLAN(obj), trans); break; + case SWITCHDEV_OBJ_ID_IPV4_FIB: + err = mlxsw_sp_router_fib4_add(mlxsw_sp_port, + SWITCHDEV_OBJ_IPV4_FIB(obj), + trans); + break; case SWITCHDEV_OBJ_ID_PORT_FDB: err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_FDB(obj), @@ -921,34 +994,15 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, return err; } -static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin, - u16 vid_end) -{ - u16 vid; - int err; - - for (vid = vid_begin; vid <= vid_end; vid++) { - err = mlxsw_sp_port_kill_vid(dev, 0, vid); - if (err) - return err; - } - - return 0; -} - static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, - u16 vid_begin, u16 vid_end, bool init) + u16 vid_begin, u16 vid_end) { struct net_device *dev = mlxsw_sp_port->dev; u16 vid, pvid; int err; - /* In case this is invoked with BRIDGE_FLAGS_SELF and port is - * not bridged, then prevent packets ingressing through the - * port with the specified VIDs from being trapped to CPU. - */ - if (!init && !mlxsw_sp_port->bridged) - return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end); + if (!mlxsw_sp_port->bridged) + return -EINVAL; err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, false); @@ -958,9 +1012,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, return err; } - if (init) - goto out; - pvid = mlxsw_sp_port->pvid; if (pvid >= vid_begin && pvid <= vid_end) { err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); @@ -970,23 +1021,8 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, } } - err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, - false, false); - if (err) { - netdev_err(dev, "Failed to clear flooding\n"); - return err; - } + mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); - for (vid = vid_begin; vid <= vid_end; vid++) { - /* Remove FID mapping in case of Virtual mode */ - err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid); - if (err) { - netdev_err(dev, "Failed to unmap FID=%d", vid); - return err; - } - } - -out: /* Changing activity bits only if HW operation succeded */ for (vid = vid_begin; vid <= vid_end; vid++) clear_bit(vid, mlxsw_sp_port->active_vlans); @@ -997,8 +1033,8 @@ out: static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_vlan *vlan) { - return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, - vlan->vid_begin, vlan->vid_end, false); + return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin, + vlan->vid_end); } void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) @@ -1006,7 +1042,7 @@ void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) u16 vid; for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) - __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false); + __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid); } static int @@ -1081,6 +1117,10 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_VLAN(obj)); break; + case SWITCHDEV_OBJ_ID_IPV4_FIB: + err = mlxsw_sp_router_fib4_del(mlxsw_sp_port, + SWITCHDEV_OBJ_IPV4_FIB(obj)); + break; case SWITCHDEV_OBJ_ID_PORT_FDB: err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_FDB(obj)); @@ -1118,7 +1158,8 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_port *tmp; - u16 vport_fid = 0; + struct mlxsw_sp_fid *f; + u16 vport_fid; char *sfd_pl; char mac[ETH_ALEN]; u16 fid; @@ -1133,12 +1174,8 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, if (!sfd_pl) return -ENOMEM; - if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { - u16 tmp; - - tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); - vport_fid = mlxsw_sp_vfid_to_fid(tmp); - } + f = mlxsw_sp_vport_fid_get(mlxsw_sp_port); + vport_fid = f ? f->fid : 0; mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); do { @@ -1310,11 +1347,10 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, } if (mlxsw_sp_fid_is_vfid(fid)) { - u16 vfid = mlxsw_sp_fid_to_vfid(fid); struct mlxsw_sp_port *mlxsw_sp_vport; - mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, - vfid); + mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port, + fid); if (!mlxsw_sp_vport) { netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); goto just_remove; @@ -1370,11 +1406,10 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, } if (mlxsw_sp_fid_is_vfid(fid)) { - u16 vfid = mlxsw_sp_fid_to_vfid(fid); struct mlxsw_sp_port *mlxsw_sp_vport; - mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, - vfid); + mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port, + fid); if (!mlxsw_sp_vport) { netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); goto just_remove; @@ -1495,14 +1530,6 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw); } -static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp) -{ - u16 fid; - - for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID) - mlxsw_sp_fid_destroy(mlxsw_sp, fid); -} - int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) { return mlxsw_sp_fdb_init(mlxsw_sp); @@ -1511,33 +1538,6 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) { mlxsw_sp_fdb_fini(mlxsw_sp); - mlxsw_sp_fids_fini(mlxsw_sp); -} - -int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) -{ - struct net_device *dev = mlxsw_sp_port->dev; - int err; - - /* Allow only untagged packets to ingress and tag them internally - * with VID 1. - */ - mlxsw_sp_port->pvid = 1; - err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1, - true); - if (err) { - netdev_err(dev, "Unable to init VLANs\n"); - return err; - } - - /* Add implicit VLAN interface in the device, so that untagged - * packets will be classified to the default vFID. - */ - err = mlxsw_sp_port_add_vid(dev, 0, 1); - if (err) - netdev_err(dev, "Failed to configure default vFID\n"); - - return err; } void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 25f658b38..377daa4d5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1541,6 +1541,7 @@ static struct mlxsw_config_profile mlxsw_sx_config_profile = { .type = MLXSW_PORT_SWID_TYPE_ETH, } }, + .resource_query_enable = 0, }; static struct mlxsw_driver mlxsw_sx_driver = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 53a9550be..ed8e30186 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -54,6 +54,15 @@ enum { MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32, MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33, MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, + MLXSW_TRAP_ID_ARPBC = 0x50, + MLXSW_TRAP_ID_ARPUC = 0x51, + MLXSW_TRAP_ID_MTUERROR = 0x52, + MLXSW_TRAP_ID_TTLERROR = 0x53, + MLXSW_TRAP_ID_LBERROR = 0x54, + MLXSW_TRAP_ID_OSPF = 0x55, + MLXSW_TRAP_ID_IP2ME = 0x5F, + MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, + MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, MLXSW_TRAP_ID_MAX = 0x1FF }; diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 2874dffe7..eaa37c079 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -7412,7 +7412,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) || - (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) && + (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) && (dev->features & NETIF_F_RXCSUM)) { l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index e744acc18..690635660 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -63,7 +63,7 @@ #define NFP_NET_POLL_TIMEOUT 5 /* Bar allocation */ -#define NFP_NET_CRTL_BAR 0 +#define NFP_NET_CTRL_BAR 0 #define NFP_NET_Q0_BAR 2 #define NFP_NET_Q1_BAR 4 /* OBSOLETE */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index ba26bb356..39dadfca8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -41,7 +41,6 @@ * Chris Telfer */ -#include #include #include #include @@ -1441,10 +1440,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) nfp_net_set_hash(nn->netdev, skb, rxd); - /* Pad small frames to minimum */ - if (skb_put_padto(skb, 60)) - break; - /* Stats update */ u64_stats_update_begin(&r_vec->rx_sync); r_vec->rx_pkts++; @@ -1845,13 +1840,14 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn) } /** - * nfp_net_write_mac_addr() - Write mac address to device registers + * nfp_net_write_mac_addr() - Write mac address to the device control BAR * @nn: NFP Net device to reconfigure - * @mac: Six-byte MAC address to be written * - * We do a bit of byte swapping dance because firmware is LE. + * Writes the MAC address from the netdev to the device control BAR. Does not + * perform the required reconfig. We do a bit of byte swapping dance because + * firmware is LE. */ -static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac) +static void nfp_net_write_mac_addr(struct nfp_net *nn) { nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(nn->netdev->dev_addr)); @@ -1952,7 +1948,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn) nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ? 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1); - nfp_net_write_mac_addr(nn, nn->netdev->dev_addr); + nfp_net_write_mac_addr(nn); nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu); nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz); @@ -1979,7 +1975,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn) if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) { memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); - vxlan_get_rx_port(nn->netdev); + udp_tunnel_get_rx_info(nn->netdev); } return err; @@ -2048,12 +2044,16 @@ static int nfp_net_netdev_open(struct net_device *netdev) nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings), GFP_KERNEL); - if (!nn->rx_rings) + if (!nn->rx_rings) { + err = -ENOMEM; goto err_free_lsc; + } nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings), GFP_KERNEL); - if (!nn->tx_rings) + if (!nn->tx_rings) { + err = -ENOMEM; goto err_free_rx_rings; + } for (r = 0; r < nn->num_r_vecs; r++) { err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); @@ -2551,27 +2551,33 @@ static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port) } static void nfp_net_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct nfp_net *nn = netdev_priv(netdev); int idx; - idx = nfp_net_find_vxlan_idx(nn, port); + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + idx = nfp_net_find_vxlan_idx(nn, ti->port); if (idx == -ENOSPC) return; if (!nn->vxlan_usecnt[idx]++) - nfp_net_set_vxlan_port(nn, idx, port); + nfp_net_set_vxlan_port(nn, idx, ti->port); } static void nfp_net_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct nfp_net *nn = netdev_priv(netdev); int idx; - idx = nfp_net_find_vxlan_idx(nn, port); - if (!nn->vxlan_usecnt[idx] || idx == -ENOSPC) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + idx = nfp_net_find_vxlan_idx(nn, ti->port); + if (idx == -ENOSPC || !nn->vxlan_usecnt[idx]) return; if (!--nn->vxlan_usecnt[idx]) @@ -2589,8 +2595,8 @@ static const struct net_device_ops nfp_net_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_set_features = nfp_net_set_features, .ndo_features_check = nfp_net_features_check, - .ndo_add_vxlan_port = nfp_net_add_vxlan_port, - .ndo_del_vxlan_port = nfp_net_del_vxlan_port, + .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, + .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, }; /** @@ -2733,7 +2739,7 @@ int nfp_net_netdev_init(struct net_device *netdev) nn->cap = nn_readl(nn, NFP_NET_CFG_CAP); nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU); - nfp_net_write_mac_addr(nn, nn->netdev->dev_addr); + nfp_net_write_mac_addr(nn); /* Set default MTU and Freelist buffer size */ if (nn->max_mtu < NFP_NET_DEFAULT_MTU) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index ccfef1f17..4c9897220 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -40,7 +40,6 @@ * Brad Petrus */ -#include #include #include #include @@ -605,6 +604,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev, static const struct ethtool_ops nfp_net_ethtool_ops = { .get_drvinfo = nfp_net_get_drvinfo, + .get_link = ethtool_op_get_link, .get_ringparam = nfp_net_get_ringparam, .set_ringparam = nfp_net_set_ringparam, .get_strings = nfp_net_get_strings, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index e2b22b8a2..f7062cb64 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -38,7 +38,6 @@ * Rolf Neugebauer */ -#include #include #include #include @@ -124,17 +123,17 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, * first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code * the identical for PF and VF drivers. */ - ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CRTL_BAR), + ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR), NFP_NET_CFG_BAR_SZ); if (!ctrl_bar) { dev_err(&pdev->dev, - "Failed to map resource %d\n", NFP_NET_CRTL_BAR); + "Failed to map resource %d\n", NFP_NET_CTRL_BAR); err = -EIO; goto err_pci_regions; } nfp_net_get_fw_version(&fw_ver, ctrl_bar); - if (fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { + if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n", fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor); err = -EINVAL; @@ -142,9 +141,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, } /* Determine stride */ - if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 0) || - nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1) || - nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0x12, 0x48)) { + if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) { stride = 2; tx_bar_no = NFP_NET_Q0_BAR; rx_bar_no = NFP_NET_Q1_BAR; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index b1ce7aaa8..8e13ec84c 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -425,7 +425,6 @@ struct netdata_local { unsigned int last_tx_idx; unsigned int num_used_tx_buffs; struct mii_bus *mii_bus; - struct phy_device *phy_dev; struct clk *clk; dma_addr_t dma_buff_base_p; void *dma_buff_base_v; @@ -476,14 +475,6 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac) mac[5] = tmp >> 8; } -static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable) -{ - if (enable) - clk_prepare_enable(pldat->clk); - else - clk_disable_unprepare(pldat->clk); -} - static void __lpc_params_setup(struct netdata_local *pldat) { u32 tmp; @@ -750,7 +741,7 @@ static int lpc_mdio_reset(struct mii_bus *bus) static void lpc_handle_link_change(struct net_device *ndev) { struct netdata_local *pldat = netdev_priv(ndev); - struct phy_device *phydev = pldat->phy_dev; + struct phy_device *phydev = ndev->phydev; unsigned long flags; bool status_change = false; @@ -814,7 +805,6 @@ static int lpc_mii_probe(struct net_device *ndev) pldat->link = 0; pldat->speed = 0; pldat->duplex = -1; - pldat->phy_dev = phydev; phy_attached_info(phydev); @@ -1048,8 +1038,8 @@ static int lpc_eth_close(struct net_device *ndev) napi_disable(&pldat->napi); netif_stop_queue(ndev); - if (pldat->phy_dev) - phy_stop(pldat->phy_dev); + if (ndev->phydev) + phy_stop(ndev->phydev); spin_lock_irqsave(&pldat->lock, flags); __lpc_eth_reset(pldat); @@ -1058,7 +1048,7 @@ static int lpc_eth_close(struct net_device *ndev) writel(0, LPC_ENET_MAC2(pldat->net_base)); spin_unlock_irqrestore(&pldat->lock, flags); - __lpc_eth_clock_enable(pldat, false); + clk_disable_unprepare(pldat->clk); return 0; } @@ -1185,8 +1175,7 @@ static void lpc_eth_set_multicast_list(struct net_device *ndev) static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) { - struct netdata_local *pldat = netdev_priv(ndev); - struct phy_device *phydev = pldat->phy_dev; + struct phy_device *phydev = ndev->phydev; if (!netif_running(ndev)) return -EINVAL; @@ -1200,21 +1189,24 @@ static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) static int lpc_eth_open(struct net_device *ndev) { struct netdata_local *pldat = netdev_priv(ndev); + int ret; if (netif_msg_ifup(pldat)) dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name); - __lpc_eth_clock_enable(pldat, true); + ret = clk_prepare_enable(pldat->clk); + if (ret) + return ret; /* Suspended PHY makes LPC ethernet core block, so resume now */ - phy_resume(pldat->phy_dev); + phy_resume(ndev->phydev); /* Reset and initialize */ __lpc_eth_reset(pldat); __lpc_eth_init(pldat); /* schedule a link state check */ - phy_start(pldat->phy_dev); + phy_start(ndev->phydev); netif_start_queue(ndev); napi_enable(&pldat->napi); @@ -1247,37 +1239,13 @@ static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level) pldat->msg_enable = level; } -static int lpc_eth_ethtool_getsettings(struct net_device *ndev, - struct ethtool_cmd *cmd) -{ - struct netdata_local *pldat = netdev_priv(ndev); - struct phy_device *phydev = pldat->phy_dev; - - if (!phydev) - return -EOPNOTSUPP; - - return phy_ethtool_gset(phydev, cmd); -} - -static int lpc_eth_ethtool_setsettings(struct net_device *ndev, - struct ethtool_cmd *cmd) -{ - struct netdata_local *pldat = netdev_priv(ndev); - struct phy_device *phydev = pldat->phy_dev; - - if (!phydev) - return -EOPNOTSUPP; - - return phy_ethtool_sset(phydev, cmd); -} - static const struct ethtool_ops lpc_eth_ethtool_ops = { .get_drvinfo = lpc_eth_ethtool_getdrvinfo, - .get_settings = lpc_eth_ethtool_getsettings, - .set_settings = lpc_eth_ethtool_setsettings, .get_msglevel = lpc_eth_ethtool_getmsglevel, .set_msglevel = lpc_eth_ethtool_setmsglevel, .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops lpc_netdev_ops = { @@ -1347,7 +1315,9 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) } /* Enable network clock */ - __lpc_eth_clock_enable(pldat, true); + ret = clk_prepare_enable(pldat->clk); + if (ret) + goto err_out_clk_put; /* Map IO space */ pldat->net_base = ioremap(res->start, resource_size(res)); @@ -1460,7 +1430,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) netdev_info(ndev, "LPC mac at 0x%08x irq %d\n", res->start, ndev->irq); - phydev = pldat->phy_dev; + phydev = ndev->phydev; device_init_wakeup(&pdev->dev, 1); device_set_wakeup_enable(&pdev->dev, 0); @@ -1481,6 +1451,7 @@ err_out_iounmap: iounmap(pldat->net_base); err_out_disable_clocks: clk_disable_unprepare(pldat->clk); +err_out_clk_put: clk_put(pldat->clk); err_out_free_dev: free_netdev(ndev); diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index af54df52a..2f4a837f0 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -989,7 +989,7 @@ static void pasemi_adjust_link(struct net_device *dev) unsigned int flags; unsigned int new_flags; - if (!mac->phydev->link) { + if (!dev->phydev->link) { /* If no link, MAC speed settings don't matter. Just report * link down and return. */ @@ -1010,10 +1010,10 @@ static void pasemi_adjust_link(struct net_device *dev) new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M | PAS_MAC_CFG_PCFG_TSR_M); - if (!mac->phydev->duplex) + if (!dev->phydev->duplex) new_flags |= PAS_MAC_CFG_PCFG_HD; - switch (mac->phydev->speed) { + switch (dev->phydev->speed) { case 1000: new_flags |= PAS_MAC_CFG_PCFG_SPD_1G | PAS_MAC_CFG_PCFG_TSR_1G; @@ -1027,15 +1027,15 @@ static void pasemi_adjust_link(struct net_device *dev) PAS_MAC_CFG_PCFG_TSR_10M; break; default: - printk("Unsupported speed %d\n", mac->phydev->speed); + printk("Unsupported speed %d\n", dev->phydev->speed); } /* Print on link or speed/duplex change */ - msg = mac->link != mac->phydev->link || flags != new_flags; + msg = mac->link != dev->phydev->link || flags != new_flags; - mac->duplex = mac->phydev->duplex; - mac->speed = mac->phydev->speed; - mac->link = mac->phydev->link; + mac->duplex = dev->phydev->duplex; + mac->speed = dev->phydev->speed; + mac->link = dev->phydev->link; if (new_flags != flags) write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags); @@ -1067,8 +1067,6 @@ static int pasemi_mac_phy_init(struct net_device *dev) return -ENODEV; } - mac->phydev = phydev; - return 0; } @@ -1198,8 +1196,8 @@ static int pasemi_mac_open(struct net_device *dev) goto out_rx_int; } - if (mac->phydev) - phy_start(mac->phydev); + if (dev->phydev) + phy_start(dev->phydev); setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer, (unsigned long)mac->tx); @@ -1293,9 +1291,9 @@ static int pasemi_mac_close(struct net_device *dev) rxch = rx_ring(mac)->chan.chno; txch = tx_ring(mac)->chan.chno; - if (mac->phydev) { - phy_stop(mac->phydev); - phy_disconnect(mac->phydev); + if (dev->phydev) { + phy_stop(dev->phydev); + phy_disconnect(dev->phydev); } del_timer_sync(&mac->tx->clean_timer); diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h index 161c99a98..7c47e263b 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.h +++ b/drivers/net/ethernet/pasemi/pasemi_mac.h @@ -70,7 +70,6 @@ struct pasemi_mac { struct pci_dev *pdev; struct pci_dev *dma_pdev; struct pci_dev *iob_pdev; - struct phy_device *phydev; struct napi_struct napi; int bufsz; /* RX ring buffer size */ diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c index f046bfc18..d0afc2b8f 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c @@ -62,32 +62,6 @@ static struct { { "tx-1024-1518-byte-packets" }, }; -static int -pasemi_mac_ethtool_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - struct pasemi_mac *mac = netdev_priv(netdev); - struct phy_device *phydev = mac->phydev; - - if (!phydev) - return -EOPNOTSUPP; - - return phy_ethtool_gset(phydev, cmd); -} - -static int -pasemi_mac_ethtool_set_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - struct pasemi_mac *mac = netdev_priv(netdev); - struct phy_device *phydev = mac->phydev; - - if (!phydev) - return -EOPNOTSUPP; - - return phy_ethtool_sset(phydev, cmd); -} - static u32 pasemi_mac_ethtool_get_msglevel(struct net_device *netdev) { @@ -145,8 +119,6 @@ static void pasemi_mac_get_strings(struct net_device *netdev, u32 stringset, } const struct ethtool_ops pasemi_mac_ethtool_ops = { - .get_settings = pasemi_mac_ethtool_get_settings, - .set_settings = pasemi_mac_ethtool_set_settings, .get_msglevel = pasemi_mac_ethtool_get_msglevel, .set_msglevel = pasemi_mac_ethtool_set_msglevel, .get_link = ethtool_op_get_link, @@ -154,5 +126,7 @@ const struct ethtool_ops pasemi_mac_ethtool_ops = { .get_strings = pasemi_mac_get_strings, .get_sset_count = pasemi_mac_get_sset_count, .get_ethtool_stats = pasemi_mac_get_ethtool_stats, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index 680d8c736..6ba484068 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -54,16 +54,6 @@ config QLCNIC_DCB mode of DCB is supported. PG and PFC values are related only to Tx. -config QLCNIC_VXLAN - bool "Virtual eXtensible Local Area Network (VXLAN) offload support" - default n - depends on QLCNIC && VXLAN && !(QLCNIC=y && VXLAN=m) - ---help--- - This enables hardware offload support for VXLAN protocol over QLogic's - 84XX series adapters. - Say Y here if you want to enable hardware offload support for - Virtual eXtensible Local Area Network (VXLAN) in the driver. - config QLCNIC_HWMON bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support" depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m) @@ -114,24 +104,4 @@ config QEDE ---help--- This enables the support for ... -config QEDE_VXLAN - bool "Virtual eXtensible Local Area Network support" - default n - depends on QEDE && VXLAN && !(QEDE=y && VXLAN=m) - ---help--- - This enables hardware offload support for VXLAN protocol over - qede module. Say Y here if you want to enable hardware offload - support for Virtual eXtensible Local Area Network (VXLAN) - in the driver. - -config QEDE_GENEVE - bool "Generic Network Virtualization Encapsulation (GENEVE) support" - depends on QEDE && GENEVE && !(QEDE=y && GENEVE=m) - ---help--- - This allows one to create GENEVE virtual interfaces that provide - Layer 2 Networks over Layer 3 Networks. GENEVE is often used - to tunnel virtual network infrastructure in virtualized environments. - Say Y here if you want to enable hardware offload support for - Generic Network Virtualization Encapsulation (GENEVE) in the driver. - endif # NET_VENDOR_QLOGIC diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 1042f2af8..45ab74676 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -127,6 +127,8 @@ struct qed_tunn_update_params { */ enum qed_pci_personality { QED_PCI_ETH, + QED_PCI_ISCSI, + QED_PCI_ETH_ROCE, QED_PCI_DEFAULT /* default in shmem */ }; @@ -170,6 +172,8 @@ enum QED_PORT_MODE { enum qed_dev_cap { QED_DEV_CAP_ETH, + QED_DEV_CAP_ISCSI, + QED_DEV_CAP_ROCE, }; struct qed_hw_info { @@ -183,6 +187,8 @@ struct qed_hw_info { #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc]) #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc]) +#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \ + RESC_NUM(_p_hwfn, resc)) #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc]) u8 num_tc; @@ -255,6 +261,7 @@ struct qed_qm_info { u8 pure_lb_pq; u8 offload_pq; u8 pure_ack_pq; + u8 ooo_pq; u8 vf_queues_offset; u16 num_pqs; u16 num_vf_pqs; @@ -267,6 +274,7 @@ struct qed_qm_info { u8 pf_wfq; u32 pf_rl; struct qed_wfq_data *wfq_data; + u8 num_pf_rls; }; struct storm_stats { @@ -312,6 +320,7 @@ struct qed_hwfn { bool hw_init_done; u8 num_funcs_on_engine; + u8 enabled_func_idx; /* BAR access */ void __iomem *regview; @@ -350,6 +359,9 @@ struct qed_hwfn { /* Protocol related */ struct qed_pf_params pf_params; + bool b_rdma_enabled_in_prs; + u32 rdma_prs_search_reg; + /* Array of sb_info of all status blocks */ struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD]; u16 num_sbs; @@ -477,8 +489,8 @@ struct qed_dev { u32 int_mode; enum qed_coalescing_mode int_coalescing_mode; - u8 rx_coalesce_usecs; - u8 tx_coalesce_usecs; + u16 rx_coalesce_usecs; + u16 tx_coalesce_usecs; /* Start Bar offset of first hwfn */ void __iomem *regview; @@ -549,12 +561,22 @@ struct qed_dev { static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, u32 concrete_fid) { + u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID); u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID); + u8 vf_valid = GET_FIELD(concrete_fid, + PXP_CONCRETE_FID_VFVALID); + u8 sw_fid; + + if (vf_valid) + sw_fid = vfid + MAX_NUM_PFS; + else + sw_fid = pfid; - return pfid; + return sw_fid; } #define PURE_LB_TC 8 +#define OOO_LB_TC 9 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate); diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index ac284c58d..1c35f3761 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -39,6 +39,14 @@ #define DQ_RANGE_SHIFT 4 #define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT) +/* Searcher constants */ +#define SRC_MIN_NUM_ELEMS 256 + +/* Timers constants */ +#define TM_SHIFT 7 +#define TM_ALIGN BIT(TM_SHIFT) +#define TM_ELEM_SIZE 4 + /* ILT constants */ #define ILT_DEFAULT_HW_P_SIZE 3 #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) @@ -56,26 +64,71 @@ union conn_context { struct core_conn_context core_ctx; struct eth_conn_context eth_ctx; + struct iscsi_conn_context iscsi_ctx; + struct roce_conn_context roce_ctx; +}; + +/* TYPE-0 task context - iSCSI */ +union type0_task_context { + struct iscsi_task_context iscsi_ctx; }; +/* TYPE-1 task context - ROCE */ +union type1_task_context { + struct rdma_task_context roce_ctx; +}; + +struct src_ent { + u8 opaque[56]; + u64 next; +}; + +#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ +#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12)) + #define CONN_CXT_SIZE(p_hwfn) \ ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) +#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context)) + +#define TYPE0_TASK_CXT_SIZE(p_hwfn) \ + ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn) + +/* Alignment is inherent to the type1_task_context structure */ +#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context) + /* PF per protocl configuration object */ +#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS) +#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS) + +struct qed_tid_seg { + u32 count; + u8 type; + bool has_fl_mem; +}; + struct qed_conn_type_cfg { u32 cid_count; u32 cid_start; u32 cids_per_vf; + struct qed_tid_seg tid_seg[TASK_SEGMENTS]; }; /* ILT Client configuration, Per connection type (protocol) resources. */ #define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2) #define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2) #define CDUC_BLK (0) +#define SRQ_BLK (0) +#define CDUT_SEG_BLK(n) (1 + (u8)(n)) +#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS) enum ilt_clients { ILT_CLI_CDUC, + ILT_CLI_CDUT, ILT_CLI_QM, + ILT_CLI_TM, + ILT_CLI_SRC, + ILT_CLI_TSDM, ILT_CLI_MAX }; @@ -88,6 +141,7 @@ struct qed_ilt_cli_blk { u32 total_size; /* 0 means not active */ u32 real_size_in_page; u32 start_line; + u32 dynamic_line_cnt; }; struct qed_ilt_client_cfg { @@ -131,18 +185,44 @@ struct qed_cxt_mngr { /* computed ILT structure */ struct qed_ilt_client_cfg clients[ILT_CLI_MAX]; + /* Task type sizes */ + u32 task_type_size[NUM_TASK_TYPES]; + /* total number of VFs for this hwfn - * ALL VFs are symmetric in terms of HW resources */ u32 vf_count; + /* total number of SRQ's for this hwfn */ + u32 srq_count; + /* Acquired CIDs */ struct qed_cid_acquired_map acquired[MAX_CONN_TYPES]; /* ILT shadow table */ struct qed_dma_mem *ilt_shadow; u32 pf_start_line; + + /* Mutex for a dynamic ILT allocation */ + struct mutex mutex; + + /* SRC T2 */ + struct qed_dma_mem *t2; + u32 t2_num_pages; + u64 first_free; + u64 last_free; }; +static bool src_proto(enum protocol_type type) +{ + return type == PROTOCOLID_ISCSI || + type == PROTOCOLID_ROCE; +} + +static bool tm_cid_proto(enum protocol_type type) +{ + return type == PROTOCOLID_ISCSI || + type == PROTOCOLID_ROCE; +} /* counts the iids for the CDU/CDUC ILT client configuration */ struct qed_cdu_iids { @@ -161,21 +241,120 @@ static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr, } } +/* counts the iids for the Searcher block configuration */ +struct qed_src_iids { + u32 pf_cids; + u32 per_vf_cids; +}; + +static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr, + struct qed_src_iids *iids) +{ + u32 i; + + for (i = 0; i < MAX_CONN_TYPES; i++) { + if (!src_proto(i)) + continue; + + iids->pf_cids += p_mngr->conn_cfg[i].cid_count; + iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf; + } +} + +/* counts the iids for the Timers block configuration */ +struct qed_tm_iids { + u32 pf_cids; + u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */ + u32 pf_tids_total; + u32 per_vf_cids; + u32 per_vf_tids; +}; + +static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr, + struct qed_tm_iids *iids) +{ + u32 i, j; + + for (i = 0; i < MAX_CONN_TYPES; i++) { + struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i]; + + if (tm_cid_proto(i)) { + iids->pf_cids += p_cfg->cid_count; + iids->per_vf_cids += p_cfg->cids_per_vf; + } + } + + iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN); + iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN); + iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN); + + for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) { + iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN); + iids->pf_tids_total += iids->pf_tids[j]; + } +} + static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, struct qed_qm_iids *iids) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; - u32 vf_cids = 0, type; + struct qed_tid_seg *segs; + u32 vf_cids = 0, type, j; + u32 vf_tids = 0; for (type = 0; type < MAX_CONN_TYPES; type++) { iids->cids += p_mngr->conn_cfg[type].cid_count; vf_cids += p_mngr->conn_cfg[type].cids_per_vf; + + segs = p_mngr->conn_cfg[type].tid_seg; + /* for each segment there is at most one + * protocol for which count is not 0. + */ + for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++) + iids->tids += segs[j].count; + + /* The last array elelment is for the VFs. As for PF + * segments there can be only one protocol for + * which this value is not 0. + */ + vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; } iids->vf_cids += vf_cids * p_mngr->vf_count; + iids->tids += vf_tids * p_mngr->vf_count; + DP_VERBOSE(p_hwfn, QED_MSG_ILT, - "iids: CIDS %08x vf_cids %08x\n", - iids->cids, iids->vf_cids); + "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n", + iids->cids, iids->vf_cids, iids->tids, vf_tids); +} + +static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn, + u32 seg) +{ + struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr; + u32 i; + + /* Find the protocol with tid count > 0 for this segment. + * Note: there can only be one and this is already validated. + */ + for (i = 0; i < MAX_CONN_TYPES; i++) + if (p_cfg->conn_cfg[i].tid_seg[seg].count) + return &p_cfg->conn_cfg[i].tid_seg[seg]; + return NULL; +} + +void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs) +{ + struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; + + p_mgr->srq_count = num_srqs; +} + +u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; + + return p_mgr->srq_count; } /* set the iids count per protocol */ @@ -188,6 +367,14 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn, p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN); p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN); + + if (type == PROTOCOLID_ROCE) { + u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val; + u32 cxt_size = CONN_CXT_SIZE(p_hwfn); + u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + + p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page); + } } u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, @@ -200,6 +387,37 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; } +u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn, + enum protocol_type type) +{ + return p_hwfn->p_cxt_mngr->acquired[type].start_cid; +} + +u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn, + enum protocol_type type) +{ + u32 cnt = 0; + int i; + + for (i = 0; i < TASK_SEGMENTS; i++) + cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count; + + return cnt; +} + +static void +qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn, + enum protocol_type proto, + u8 seg, u8 seg_type, u32 count, bool has_fl) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg]; + + p_seg->count = count; + p_seg->has_fl_mem = has_fl; + p_seg->type = seg_type; +} + static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli, struct qed_ilt_cli_blk *p_blk, u32 start_line, u32 total_size, @@ -241,17 +459,42 @@ static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn, p_blk->real_size_in_page, p_blk->start_line); } +static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn, + enum ilt_clients ilt_client) +{ + u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count; + struct qed_ilt_client_cfg *p_cli; + u32 lines_to_skip = 0; + u32 cxts_per_p; + + if (ilt_client == ILT_CLI_CDUC) { + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + + cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) / + (u32) CONN_CXT_SIZE(p_hwfn); + + lines_to_skip = cid_count / cxts_per_p; + } + + return lines_to_skip; +} + int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 curr_line, total, i, task_size, line; struct qed_ilt_client_cfg *p_cli; struct qed_ilt_cli_blk *p_blk; struct qed_cdu_iids cdu_iids; + struct qed_src_iids src_iids; struct qed_qm_iids qm_iids; - u32 curr_line, total, i; + struct qed_tm_iids tm_iids; + struct qed_tid_seg *p_seg; memset(&qm_iids, 0, sizeof(qm_iids)); memset(&cdu_iids, 0, sizeof(cdu_iids)); + memset(&src_iids, 0, sizeof(src_iids)); + memset(&tm_iids, 0, sizeof(tm_iids)); p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT); @@ -279,6 +522,9 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); p_cli->pf_total_lines = curr_line - p_blk->start_line; + p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn, + ILT_CLI_CDUC); + /* CDUC VF */ p_blk = &p_cli->vf_blks[CDUC_BLK]; total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn); @@ -293,21 +539,128 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); + /* CDUT PF */ + p_cli = &p_mngr->clients[ILT_CLI_CDUT]; + p_cli->first.val = curr_line; + + /* first the 'working' task memory */ + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + p_seg = qed_cxt_tid_seg_info(p_hwfn, i); + if (!p_seg || p_seg->count == 0) + continue; + + p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)]; + total = p_seg->count * p_mngr->task_type_size[p_seg->type]; + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, + p_mngr->task_type_size[p_seg->type]); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + + /* next the 'init' task memory (forced load memory) */ + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + p_seg = qed_cxt_tid_seg_info(p_hwfn, i); + if (!p_seg || p_seg->count == 0) + continue; + + p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]; + + if (!p_seg->has_fl_mem) { + /* The segment is active (total size pf 'working' + * memory is > 0) but has no FL (forced-load, Init) + * memory. Thus: + * + * 1. The total-size in the corrsponding FL block of + * the ILT client is set to 0 - No ILT line are + * provisioned and no ILT memory allocated. + * + * 2. The start-line of said block is set to the + * start line of the matching working memory + * block in the ILT client. This is later used to + * configure the CDU segment offset registers and + * results in an FL command for TIDs of this + * segement behaves as regular load commands + * (loading TIDs from the working memory). + */ + line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line; + + qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0); + continue; + } + total = p_seg->count * p_mngr->task_type_size[p_seg->type]; + + qed_ilt_cli_blk_fill(p_cli, p_blk, + curr_line, total, + p_mngr->task_type_size[p_seg->type]); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line; + + /* CDUT VF */ + p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF); + if (p_seg && p_seg->count) { + /* Stricly speaking we need to iterate over all VF + * task segment types, but a VF has only 1 segment + */ + + /* 'working' memory */ + total = p_seg->count * p_mngr->task_type_size[p_seg->type]; + + p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)]; + qed_ilt_cli_blk_fill(p_cli, p_blk, + curr_line, total, + p_mngr->task_type_size[p_seg->type]); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + + /* 'init' memory */ + p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]; + if (!p_seg->has_fl_mem) { + /* see comment above */ + line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line; + qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0); + } else { + task_size = p_mngr->task_type_size[p_seg->type]; + qed_ilt_cli_blk_fill(p_cli, p_blk, + curr_line, total, task_size); + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + p_cli->vf_total_lines = curr_line - + p_cli->vf_blks[0].start_line; + + /* Now for the rest of the VFs */ + for (i = 1; i < p_mngr->vf_count; i++) { + p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)]; + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + + p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]; + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + } + /* QM */ p_cli = &p_mngr->clients[ILT_CLI_QM]; p_blk = &p_cli->pf_blks[0]; qed_cxt_qm_iids(p_hwfn, &qm_iids); total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, - qm_iids.vf_cids, 0, + qm_iids.vf_cids, qm_iids.tids, p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs); DP_VERBOSE(p_hwfn, QED_MSG_ILT, - "QM ILT Info, (cids=%d, vf_cids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n", + "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n", qm_iids.cids, qm_iids.vf_cids, + qm_iids.tids, p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total); qed_ilt_cli_blk_fill(p_cli, p_blk, @@ -317,6 +670,75 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM); p_cli->pf_total_lines = curr_line - p_blk->start_line; + /* SRC */ + p_cli = &p_mngr->clients[ILT_CLI_SRC]; + qed_cxt_src_iids(p_mngr, &src_iids); + + /* Both the PF and VFs searcher connections are stored in the per PF + * database. Thus sum the PF searcher cids and all the VFs searcher + * cids. + */ + total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; + if (total) { + u32 local_max = max_t(u32, total, + SRC_MIN_NUM_ELEMS); + + total = roundup_pow_of_two(local_max); + + p_blk = &p_cli->pf_blks[0]; + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * sizeof(struct src_ent), + sizeof(struct src_ent)); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_SRC); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + } + + /* TM PF */ + p_cli = &p_mngr->clients[ILT_CLI_TM]; + qed_cxt_tm_iids(p_mngr, &tm_iids); + total = tm_iids.pf_cids + tm_iids.pf_tids_total; + if (total) { + p_blk = &p_cli->pf_blks[0]; + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * TM_ELEM_SIZE, TM_ELEM_SIZE); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TM); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + } + + /* TM VF */ + total = tm_iids.per_vf_cids + tm_iids.per_vf_tids; + if (total) { + p_blk = &p_cli->vf_blks[0]; + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * TM_ELEM_SIZE, TM_ELEM_SIZE); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TM); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + + for (i = 1; i < p_mngr->vf_count; i++) + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TM); + } + + /* TSDM (SRQ CONTEXT) */ + total = qed_cxt_get_srq_count(p_hwfn); + + if (total) { + p_cli = &p_mngr->clients[ILT_CLI_TSDM]; + p_blk = &p_cli->pf_blks[SRQ_BLK]; + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * SRQ_CXT_SIZE, SRQ_CXT_SIZE); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TSDM); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + } + if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line > RESC_NUM(p_hwfn, QED_ILT)) { DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n", @@ -327,8 +749,122 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) return 0; } +static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 i; + + if (!p_mngr->t2) + return; + + for (i = 0; i < p_mngr->t2_num_pages; i++) + if (p_mngr->t2[i].p_virt) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_mngr->t2[i].size, + p_mngr->t2[i].p_virt, + p_mngr->t2[i].p_phys); + + kfree(p_mngr->t2); + p_mngr->t2 = NULL; +} + +static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 conn_num, total_size, ent_per_page, psz, i; + struct qed_ilt_client_cfg *p_src; + struct qed_src_iids src_iids; + struct qed_dma_mem *p_t2; + int rc; + + memset(&src_iids, 0, sizeof(src_iids)); + + /* if the SRC ILT client is inactive - there are no connection + * requiring the searcer, leave. + */ + p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC]; + if (!p_src->active) + return 0; + + qed_cxt_src_iids(p_mngr, &src_iids); + conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; + total_size = conn_num * sizeof(struct src_ent); + + /* use the same page size as the SRC ILT client */ + psz = ILT_PAGE_IN_BYTES(p_src->p_size.val); + p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz); + + /* allocate t2 */ + p_mngr->t2 = kzalloc(p_mngr->t2_num_pages * sizeof(struct qed_dma_mem), + GFP_KERNEL); + if (!p_mngr->t2) { + DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n"); + rc = -ENOMEM; + goto t2_fail; + } + + /* allocate t2 pages */ + for (i = 0; i < p_mngr->t2_num_pages; i++) { + u32 size = min_t(u32, total_size, psz); + void **p_virt = &p_mngr->t2[i].p_virt; + + *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + size, + &p_mngr->t2[i].p_phys, GFP_KERNEL); + if (!p_mngr->t2[i].p_virt) { + rc = -ENOMEM; + goto t2_fail; + } + memset(*p_virt, 0, size); + p_mngr->t2[i].size = size; + total_size -= size; + } + + /* Set the t2 pointers */ + + /* entries per page - must be a power of two */ + ent_per_page = psz / sizeof(struct src_ent); + + p_mngr->first_free = (u64) p_mngr->t2[0].p_phys; + + p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page]; + p_mngr->last_free = (u64) p_t2->p_phys + + ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent); + + for (i = 0; i < p_mngr->t2_num_pages; i++) { + u32 ent_num = min_t(u32, + ent_per_page, + conn_num); + struct src_ent *entries = p_mngr->t2[i].p_virt; + u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val; + u32 j; + + for (j = 0; j < ent_num - 1; j++) { + val = p_ent_phys + (j + 1) * sizeof(struct src_ent); + entries[j].next = cpu_to_be64(val); + } + + if (i < p_mngr->t2_num_pages - 1) + val = (u64) p_mngr->t2[i + 1].p_phys; + else + val = 0; + entries[j].next = cpu_to_be64(val); + + conn_num -= ent_num; + } + + return 0; + +t2_fail: + qed_cxt_src_t2_free(p_hwfn); + return rc; +} + #define for_each_ilt_valid_client(pos, clients) \ - for (pos = 0; pos < ILT_CLI_MAX; pos++) + for (pos = 0; pos < ILT_CLI_MAX; pos++) \ + if (!clients[pos].active) { \ + continue; \ + } else \ /* Total number of ILT lines used by this PF */ static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients) @@ -336,12 +872,8 @@ static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients) u32 size = 0; u32 i; - for_each_ilt_valid_client(i, ilt_clients) { - if (!ilt_clients[i].active) - continue; - size += (ilt_clients[i].last.val - - ilt_clients[i].first.val + 1); - } + for_each_ilt_valid_client(i, ilt_clients) + size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1); return size; } @@ -372,15 +904,22 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, u32 start_line_offset) { struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow; - u32 lines, line, sz_left; + u32 lines, line, sz_left, lines_to_skip = 0; + + /* Special handling for RoCE that supports dynamic allocation */ + if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) && + ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM)) + return 0; + + lines_to_skip = p_blk->dynamic_line_cnt; if (!p_blk->total_size) return 0; sz_left = p_blk->total_size; - lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page); + lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip; line = p_blk->start_line + start_line_offset - - p_hwfn->p_cxt_mngr->pf_start_line; + p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip; for (; lines; lines--) { dma_addr_t p_phys; @@ -434,8 +973,6 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) (u32)(size * sizeof(struct qed_dma_mem))); for_each_ilt_valid_client(i, clients) { - if (!clients[i].active) - continue; for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) { p_blk = &clients[i].pf_blks[j]; rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0); @@ -514,6 +1051,7 @@ cid_map_fail: int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) { + struct qed_ilt_client_cfg *clients; struct qed_cxt_mngr *p_mngr; u32 i; @@ -524,20 +1062,42 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) } /* Initialize ILT client registers */ - p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT); - p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT); - p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE); - - p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT); - p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT); - p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE); - + clients = p_mngr->clients; + clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT); + clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT); + clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE); + + clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT); + clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT); + clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE); + + clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT); + clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT); + clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE); + + clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT); + clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT); + clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE); + + clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT); + clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT); + clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE); + + clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT); + clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT); + clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE); /* default ILT page size for all clients is 32K */ for (i = 0; i < ILT_CLI_MAX; i++) p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; + /* Initialize task sizes */ + p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn); + p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn); + if (p_hwfn->cdev->p_iov_info) p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs; + /* Initialize the dynamic ILT allocation mutex */ + mutex_init(&p_mngr->mutex); /* Set the cxt mangr pointer priori to further allocations */ p_hwfn->p_cxt_mngr = p_mngr; @@ -556,6 +1116,13 @@ int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn) goto tables_alloc_fail; } + /* Allocate the T2 table */ + rc = qed_cxt_src_t2_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to allocate T2 memory\n"); + goto tables_alloc_fail; + } + /* Allocate and initialize the acquired cids bitmaps */ rc = qed_cid_map_alloc(p_hwfn); if (rc) { @@ -576,6 +1143,7 @@ void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn) return; qed_cid_map_free(p_hwfn); + qed_cxt_src_t2_free(p_hwfn); qed_ilt_shadow_free(p_hwfn); kfree(p_hwfn->p_cxt_mngr); @@ -620,6 +1188,48 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn) #define CDUC_NCIB_MASK \ (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT) +#define CDUT_TYPE0_CXT_SIZE_SHIFT \ + CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT + +#define CDUT_TYPE0_CXT_SIZE_MASK \ + (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \ + CDUT_TYPE0_CXT_SIZE_SHIFT) + +#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \ + CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT + +#define CDUT_TYPE0_BLOCK_WASTE_MASK \ + (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \ + CDUT_TYPE0_BLOCK_WASTE_SHIFT) + +#define CDUT_TYPE0_NCIB_SHIFT \ + CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT + +#define CDUT_TYPE0_NCIB_MASK \ + (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \ + CDUT_TYPE0_NCIB_SHIFT) + +#define CDUT_TYPE1_CXT_SIZE_SHIFT \ + CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT + +#define CDUT_TYPE1_CXT_SIZE_MASK \ + (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \ + CDUT_TYPE1_CXT_SIZE_SHIFT) + +#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \ + CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT + +#define CDUT_TYPE1_BLOCK_WASTE_MASK \ + (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \ + CDUT_TYPE1_BLOCK_WASTE_SHIFT) + +#define CDUT_TYPE1_NCIB_SHIFT \ + CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT + +#define CDUT_TYPE1_NCIB_MASK \ + (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \ + CDUT_TYPE1_NCIB_SHIFT) + static void qed_cdu_init_common(struct qed_hwfn *p_hwfn) { u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0; @@ -634,6 +1244,92 @@ static void qed_cdu_init_common(struct qed_hwfn *p_hwfn) SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste); SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page); STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params); + + /* CDUT - type-0 tasks configuration */ + page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val; + cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0]; + elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; + + /* cxt size and block-waste are multipes of 8 */ + cdu_params = 0; + SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page); + STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params); + + /* CDUT - type-1 tasks configuration */ + cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1]; + elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; + + /* cxt size and block-waste are multipes of 8 */ + cdu_params = 0; + SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page); + STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params); +} + +/* CDU PF */ +#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT +#define CDU_SEG_REG_TYPE_MASK 0x1 +#define CDU_SEG_REG_OFFSET_SHIFT 0 +#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK + +static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *p_cli; + struct qed_tid_seg *p_seg; + u32 cdu_seg_params, offset; + int i; + + static const u32 rt_type_offset_arr[] = { + CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET + }; + + static const u32 rt_type_offset_fl_arr[] = { + CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET + }; + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + + /* There are initializations only for CDUT during pf Phase */ + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + /* Segment 0 */ + p_seg = qed_cxt_tid_seg_info(p_hwfn, i); + if (!p_seg) + continue; + + /* Note: start_line is already adjusted for the CDU + * segment register granularity, so we just need to + * divide. Adjustment is implicit as we assume ILT + * Page size is larger than 32K! + */ + offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) * + (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line - + p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES; + + cdu_seg_params = 0; + SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type); + SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset); + STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params); + + offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) * + (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line - + p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES; + + cdu_seg_params = 0; + SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type); + SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset); + STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params); + } } void qed_qm_init_pf(struct qed_hwfn *p_hwfn) @@ -742,14 +1438,11 @@ static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn) ilt_clients = p_hwfn->p_cxt_mngr->clients; for_each_ilt_valid_client(i, ilt_clients) { - if (!ilt_clients[i].active) - continue; STORE_RT_REG(p_hwfn, ilt_clients[i].first.reg, ilt_clients[i].first.val); STORE_RT_REG(p_hwfn, - ilt_clients[i].last.reg, - ilt_clients[i].last.val); + ilt_clients[i].last.reg, ilt_clients[i].last.val); STORE_RT_REG(p_hwfn, ilt_clients[i].p_size.reg, ilt_clients[i].p_size.val); @@ -786,6 +1479,33 @@ static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn) PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET, p_cli->vf_total_lines); } + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); + if (p_cli->active) { + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET, + blk_factor); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET, + p_cli->pf_total_lines); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET, + p_cli->vf_total_lines); + } + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM]; + blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); + if (p_cli->active) { + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET, + p_cli->pf_total_lines); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET, + p_cli->vf_total_lines); + } } /* ILT (PSWRQ2) PF */ @@ -804,9 +1524,6 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) clients = p_hwfn->p_cxt_mngr->clients; for_each_ilt_valid_client(i, clients) { - if (!clients[i].active) - continue; - /** Client's 1st val and RT array are absolute, ILT shadows' * lines are relative. */ @@ -837,6 +1554,137 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) } } +/* SRC (Searcher) PF */ +static void qed_src_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 rounded_conn_num, conn_num, conn_max; + struct qed_src_iids src_iids; + + memset(&src_iids, 0, sizeof(src_iids)); + qed_cxt_src_iids(p_mngr, &src_iids); + conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; + if (!conn_num) + return; + + conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS); + rounded_conn_num = roundup_pow_of_two(conn_max); + + STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num); + STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET, + ilog2(rounded_conn_num)); + + STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET, + p_hwfn->p_cxt_mngr->first_free); + STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET, + p_hwfn->p_cxt_mngr->last_free); +} + +/* Timers PF */ +#define TM_CFG_NUM_IDS_SHIFT 0 +#define TM_CFG_NUM_IDS_MASK 0xFFFFULL +#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16 +#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL +#define TM_CFG_PARENT_PF_SHIFT 25 +#define TM_CFG_PARENT_PF_MASK 0x7ULL + +#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30 +#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL + +#define TM_CFG_TID_OFFSET_SHIFT 30 +#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL +#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49 +#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL + +static void qed_tm_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 active_seg_mask = 0, tm_offset, rt_reg; + struct qed_tm_iids tm_iids; + u64 cfg_word; + u8 i; + + memset(&tm_iids, 0, sizeof(tm_iids)); + qed_cxt_tm_iids(p_mngr, &tm_iids); + + /* @@@TBD No pre-scan for now */ + + /* Note: We assume consecutive VFs for a PF */ + for (i = 0; i < p_mngr->vf_count; i++) { + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id); + SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); + rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i); + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + } + + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */ + SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */ + + rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id); + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + + /* enale scan */ + STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET, + tm_iids.pf_cids ? 0x1 : 0x0); + + /* @@@TBD how to enable the scan for the VFs */ + + tm_offset = tm_iids.per_vf_cids; + + /* Note: We assume consecutive VFs for a PF */ + for (i = 0; i < p_mngr->vf_count; i++) { + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id); + SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset); + SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0); + + rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i); + + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + } + + tm_offset = tm_iids.pf_cids; + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); + SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset); + SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0); + + rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (NUM_OF_VFS(p_hwfn->cdev) + + p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i); + + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0); + + tm_offset += tm_iids.pf_tids[i]; + } + + if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) + active_seg_mask = 0; + + STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask); + + /* @@@TBD how to enable the scan for the VFs */ +} + void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn) { qed_cdu_init_common(p_hwfn); @@ -847,7 +1695,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn) qed_qm_init_pf(p_hwfn); qed_cm_init_pf(p_hwfn); qed_dq_init_pf(p_hwfn); + qed_cdu_init_pf(p_hwfn); qed_ilt_init_pf(p_hwfn); + qed_src_init_pf(p_hwfn); + qed_tm_init_pf(p_hwfn); } int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, @@ -968,17 +1819,439 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, return 0; } -int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) +void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, + struct qed_rdma_pf_params *p_params) { - struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params; + u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs; + enum protocol_type proto; + + num_mrs = min_t(u32, RDMA_MAX_TIDS, p_params->num_mrs); + num_tasks = num_mrs; /* each mr uses a single task id */ + num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs); + + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ETH_ROCE: + num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps); + num_cons = num_qps * 2; /* each QP requires two connections */ + proto = PROTOCOLID_ROCE; + break; + default: + return; + } + + if (num_cons && num_tasks) { + qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0); + + /* Deliberatly passing ROCE for tasks id. This is because + * iWARP / RoCE share the task id. + */ + qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE, + QED_CXT_ROCE_TID_SEG, 1, + num_tasks, false); + qed_cxt_set_srq_count(p_hwfn, num_srqs); + } else { + DP_INFO(p_hwfn->cdev, + "RDMA personality used without setting params!\n"); + } +} +int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) +{ /* Set the number of required CORE connections */ u32 core_cids = 1; /* SPQ */ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0); - qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, - p_params->num_cons, 1); + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ETH_ROCE: + { + qed_rdma_set_pf_params(p_hwfn, + &p_hwfn-> + pf_params.rdma_pf_params); + /* no need for break since RoCE coexist with Ethernet */ + } + case QED_PCI_ETH: + { + struct qed_eth_pf_params *p_params = + &p_hwfn->pf_params.eth_pf_params; + + qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, + p_params->num_cons, 1); + break; + } + case QED_PCI_ISCSI: + { + struct qed_iscsi_pf_params *p_params; + + p_params = &p_hwfn->pf_params.iscsi_pf_params; + + if (p_params->num_cons && p_params->num_tasks) { + qed_cxt_set_proto_cid_count(p_hwfn, + PROTOCOLID_ISCSI, + p_params->num_cons, + 0); + + qed_cxt_set_proto_tid_count(p_hwfn, + PROTOCOLID_ISCSI, + QED_CXT_ISCSI_TID_SEG, + 0, + p_params->num_tasks, + true); + } else { + DP_INFO(p_hwfn->cdev, + "Iscsi personality used without setting params!\n"); + } + break; + } + default: + return -EINVAL; + } + + return 0; +} + +int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, + struct qed_tid_mem *p_info) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 proto, seg, total_lines, i, shadow_line; + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_fl_seg; + struct qed_tid_seg *p_seg_info; + + /* Verify the personality */ + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ISCSI: + proto = PROTOCOLID_ISCSI; + seg = QED_CXT_ISCSI_TID_SEG; + break; + default: + return -EINVAL; + } + + p_cli = &p_mngr->clients[ILT_CLI_CDUT]; + if (!p_cli->active) + return -EINVAL; + + p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg]; + if (!p_seg_info->has_fl_mem) + return -EINVAL; + + p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)]; + total_lines = DIV_ROUND_UP(p_fl_seg->total_size, + p_fl_seg->real_size_in_page); + + for (i = 0; i < total_lines; i++) { + shadow_line = i + p_fl_seg->start_line - + p_hwfn->p_cxt_mngr->pf_start_line; + p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt; + } + p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) - + p_fl_seg->real_size_in_page; + p_info->tid_size = p_mngr->task_type_size[p_seg_info->type]; + p_info->num_tids_per_block = p_fl_seg->real_size_in_page / + p_info->tid_size; + + return 0; +} + +/* This function is very RoCE oriented, if another protocol in the future + * will want this feature we'll need to modify the function to be more generic + */ +int +qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, + enum qed_cxt_elem_type elem_type, u32 iid) +{ + u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line; + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_blk; + struct qed_ptt *p_ptt; + dma_addr_t p_phys; + u64 ilt_hw_entry; + void *p_virt; + int rc = 0; + + switch (elem_type) { + case QED_ELEM_CXT: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + elem_size = CONN_CXT_SIZE(p_hwfn); + p_blk = &p_cli->pf_blks[CDUC_BLK]; + break; + case QED_ELEM_SRQ: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; + elem_size = SRQ_CXT_SIZE; + p_blk = &p_cli->pf_blks[SRQ_BLK]; + break; + case QED_ELEM_TASK: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); + p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)]; + break; + default: + DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type); + return -EINVAL; + } + + /* Calculate line in ilt */ + hw_p_size = p_cli->p_size.val; + elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size; + line = p_blk->start_line + (iid / elems_per_p); + shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line; + + /* If line is already allocated, do nothing, otherwise allocate it and + * write it to the PSWRQ2 registers. + * This section can be run in parallel from different contexts and thus + * a mutex protection is needed. + */ + + mutex_lock(&p_hwfn->p_cxt_mngr->mutex); + + if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt) + goto out0; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_NOTICE(p_hwfn, + "QED_TIME_OUT on ptt acquire - dynamic allocation"); + rc = -EBUSY; + goto out0; + } + + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_blk->real_size_in_page, + &p_phys, GFP_KERNEL); + if (!p_virt) { + rc = -ENOMEM; + goto out1; + } + memset(p_virt, 0, p_blk->real_size_in_page); + + /* configuration of refTagMask to 0xF is required for RoCE DIF MR only, + * to compensate for a HW bug, but it is configured even if DIF is not + * enabled. This is harmless and allows us to avoid a dedicated API. We + * configure the field for all of the contexts on the newly allocated + * page. + */ + if (elem_type == QED_ELEM_TASK) { + u32 elem_i; + u8 *elem_start = (u8 *)p_virt; + union type1_task_context *elem; + + for (elem_i = 0; elem_i < elems_per_p; elem_i++) { + elem = (union type1_task_context *)elem_start; + SET_FIELD(elem->roce_ctx.tdif_context.flags1, + TDIF_TASK_CONTEXT_REFTAGMASK, 0xf); + elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn); + } + } + + p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt; + p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys; + p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size = + p_blk->real_size_in_page; + + /* compute absolute offset */ + reg_offset = PSWRQ2_REG_ILT_MEMORY + + (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS); + + ilt_hw_entry = 0; + SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); + SET_FIELD(ilt_hw_entry, + ILT_ENTRY_PHY_ADDR, + (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12)); + + /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */ + qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry, + reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 0); + + if (elem_type == QED_ELEM_CXT) { + u32 last_cid_allocated = (1 + (iid / elems_per_p)) * + elems_per_p; + + /* Update the relevant register in the parser */ + qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, + last_cid_allocated - 1); + + if (!p_hwfn->b_rdma_enabled_in_prs) { + /* Enable RoCE search */ + qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1); + p_hwfn->b_rdma_enabled_in_prs = true; + } + } + +out1: + qed_ptt_release(p_hwfn, p_ptt); +out0: + mutex_unlock(&p_hwfn->p_cxt_mngr->mutex); + + return rc; +} + +/* This function is very RoCE oriented, if another protocol in the future + * will want this feature we'll need to modify the function to be more generic + */ +static int +qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn, + enum qed_cxt_elem_type elem_type, + u32 start_iid, u32 count) +{ + u32 start_line, end_line, shadow_start_line, shadow_end_line; + u32 reg_offset, elem_size, hw_p_size, elems_per_p; + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_blk; + u32 end_iid = start_iid + count; + struct qed_ptt *p_ptt; + u64 ilt_hw_entry = 0; + u32 i; + + switch (elem_type) { + case QED_ELEM_CXT: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + elem_size = CONN_CXT_SIZE(p_hwfn); + p_blk = &p_cli->pf_blks[CDUC_BLK]; + break; + case QED_ELEM_SRQ: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; + elem_size = SRQ_CXT_SIZE; + p_blk = &p_cli->pf_blks[SRQ_BLK]; + break; + case QED_ELEM_TASK: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); + p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)]; + break; + default: + DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type); + return -EINVAL; + } + + /* Calculate line in ilt */ + hw_p_size = p_cli->p_size.val; + elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size; + start_line = p_blk->start_line + (start_iid / elems_per_p); + end_line = p_blk->start_line + (end_iid / elems_per_p); + if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p)) + end_line--; + + shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line; + shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_NOTICE(p_hwfn, + "QED_TIME_OUT on ptt acquire - dynamic allocation"); + return -EBUSY; + } + + for (i = shadow_start_line; i < shadow_end_line; i++) { + if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt) + continue; + + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_hwfn->p_cxt_mngr->ilt_shadow[i].size, + p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt, + p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys); + + p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL; + p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0; + p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0; + + /* compute absolute offset */ + reg_offset = PSWRQ2_REG_ILT_MEMORY + + ((start_line++) * ILT_REG_SIZE_IN_BYTES * + ILT_ENTRY_IN_REGS); + + /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a + * wide-bus. + */ + qed_dmae_host2grc(p_hwfn, p_ptt, + (u64) (uintptr_t) &ilt_hw_entry, + reg_offset, + sizeof(ilt_hw_entry) / sizeof(u32), + 0); + } + + qed_ptt_release(p_hwfn, p_ptt); + + return 0; +} + +int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto) +{ + int rc; + u32 cid; + + /* Free Connection CXT */ + rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT, + qed_cxt_get_proto_cid_start(p_hwfn, + proto), + qed_cxt_get_proto_cid_count(p_hwfn, + proto, &cid)); + + if (rc) + return rc; + + /* Free Task CXT */ + rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0, + qed_cxt_get_proto_tid_count(p_hwfn, proto)); + if (rc) + return rc; + + /* Free TSDM CXT */ + rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0, + qed_cxt_get_srq_count(p_hwfn)); + + return rc; +} + +int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn, + u32 tid, u8 ctx_type, void **pp_task_ctx) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_seg; + struct qed_tid_seg *p_seg_info; + u32 proto, seg; + u32 total_lines; + u32 tid_size, ilt_idx; + u32 num_tids_per_block; + + /* Verify the personality */ + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ISCSI: + proto = PROTOCOLID_ISCSI; + seg = QED_CXT_ISCSI_TID_SEG; + break; + default: + return -EINVAL; + } + + p_cli = &p_mngr->clients[ILT_CLI_CDUT]; + if (!p_cli->active) + return -EINVAL; + + p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg]; + + if (ctx_type == QED_CTX_WORKING_MEM) { + p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)]; + } else if (ctx_type == QED_CTX_FL_MEM) { + if (!p_seg_info->has_fl_mem) + return -EINVAL; + p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)]; + } else { + return -EINVAL; + } + total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page); + tid_size = p_mngr->task_type_size[p_seg_info->type]; + num_tids_per_block = p_seg->real_size_in_page / tid_size; + + if (total_lines < tid / num_tids_per_block) + return -EINVAL; + + ilt_idx = tid / num_tids_per_block + p_seg->start_line - + p_mngr->pf_start_line; + *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt + + (tid % num_tids_per_block) * tid_size; return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 234c0fa8d..c6f6f2e81 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -21,6 +21,14 @@ struct qed_cxt_info { enum protocol_type type; }; +#define MAX_TID_BLOCKS 512 +struct qed_tid_mem { + u32 tid_size; + u32 num_tids_per_block; + u32 waste; + u8 *blocks[MAX_TID_BLOCKS]; /* 4K */ +}; + /** * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type * @@ -46,8 +54,22 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info); +/** + * @brief qed_cxt_get_tid_mem_info + * + * @param p_hwfn + * @param p_info + * + * @return int + */ +int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, + struct qed_tid_mem *p_info); + +#define QED_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI +#define QED_CXT_ROCE_TID_SEG PROTOCOLID_ROCE enum qed_cxt_elem_type { QED_ELEM_CXT, + QED_ELEM_SRQ, QED_ELEM_TASK }; @@ -149,4 +171,6 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid); +#define QED_CTX_WORKING_MEM 0 +#define QED_CTX_FL_MEM 1 #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 21ec1c2df..3656d2fd6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -18,6 +19,10 @@ #include "qed_dcbx.h" #include "qed_hsi.h" #include "qed_sp.h" +#include "qed_sriov.h" +#ifdef CONFIG_DCB +#include +#endif #define QED_DCBX_MAX_MIB_READ_TRY (100) #define QED_ETH_TYPE_DEFAULT (0) @@ -48,40 +53,94 @@ static bool qed_dcbx_app_ethtype(u32 app_info_bitmap) DCBX_APP_SF_ETHTYPE); } +static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap) +{ + u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); + + /* Old MFW */ + if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) + return qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE); +} + static bool qed_dcbx_app_port(u32 app_info_bitmap) { return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == DCBX_APP_SF_PORT); } -static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type) +{ + u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); + + /* Old MFW */ + if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) + return qed_dcbx_app_port(app_info_bitmap); + + return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT); +} + +static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_ethtype(app_info_bitmap) && - proto_id == QED_ETH_TYPE_DEFAULT); + bool ethtype; + + if (ieee) + ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); + else + ethtype = qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT)); } -static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_port(app_info_bitmap) && - proto_id == QED_TCP_PORT_ISCSI); + bool port; + + if (ieee) + port = qed_dcbx_ieee_app_port(app_info_bitmap, + DCBX_APP_SF_IEEE_TCP_PORT); + else + port = qed_dcbx_app_port(app_info_bitmap); + + return !!(port && (proto_id == QED_TCP_PORT_ISCSI)); } -static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_ethtype(app_info_bitmap) && - proto_id == QED_ETH_TYPE_FCOE); + bool ethtype; + + if (ieee) + ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); + else + ethtype = qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE)); } -static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_ethtype(app_info_bitmap) && - proto_id == QED_ETH_TYPE_ROCE); + bool ethtype; + + if (ieee) + ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); + else + ethtype = qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE)); } -static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_port(app_info_bitmap) && - proto_id == QED_UDP_PORT_TYPE_ROCE_V2); + bool port; + + if (ieee) + port = qed_dcbx_ieee_app_port(app_info_bitmap, + DCBX_APP_SF_IEEE_UDP_PORT); + else + port = qed_dcbx_app_port(app_info_bitmap); + + return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2)); } static void @@ -160,17 +219,17 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, static bool qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, u32 app_prio_bitmap, - u16 id, enum dcbx_protocol_type *type) + u16 id, enum dcbx_protocol_type *type, bool ieee) { - if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) { + if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_FCOE; - } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) { + } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ROCE; - } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) { + } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ISCSI; - } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) { + } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ETH; - } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) { + } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ROCE_V2; } else { *type = DCBX_MAX_PROTOCOL_TYPE; @@ -190,17 +249,18 @@ static int qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data, struct dcbx_app_priority_entry *p_tbl, - u32 pri_tc_tbl, int count, bool dcbx_enabled) + u32 pri_tc_tbl, int count, u8 dcbx_version) { u8 tc, priority_map; enum dcbx_protocol_type type; + bool enable, ieee; u16 protocol_id; int priority; - bool enable; int i; DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); + ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE); /* Parse APP TLV */ for (i = 0; i < count; i++) { protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, @@ -215,7 +275,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, - protocol_id, &type)) { + protocol_id, &type, ieee)) { /* ETH always have the enable bit reset, as it gets * vlan information per packet. For other protocols, * should be set according to the dcbx_enabled @@ -252,7 +312,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, if (p_data->arr[type].update) continue; - enable = (type == DCBX_PROTOCOL_ETH) ? false : dcbx_enabled; + enable = !(type == DCBX_PROTOCOL_ETH); qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, priority, tc, type); } @@ -271,15 +331,12 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) struct dcbx_ets_feature *p_ets; struct qed_hw_info *p_info; u32 pri_tc_tbl, flags; - bool dcbx_enabled; + u8 dcbx_version; int num_entries; int rc = 0; - /* If DCBx version is non zero, then negotiation was - * successfuly performed - */ flags = p_hwfn->p_dcbx_info->operational.flags; - dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); + dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); p_app = &p_hwfn->p_dcbx_info->operational.features.app; p_tbl = p_app->app_pri_tbl; @@ -291,13 +348,13 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, - num_entries, dcbx_enabled); + num_entries, dcbx_version); if (rc) return rc; p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); data.pf_id = p_hwfn->rel_pf_id; - data.dcbx_enabled = dcbx_enabled; + data.dcbx_enabled = !!dcbx_version; qed_dcbx_dp_protocol(p_hwfn, &data); @@ -351,6 +408,325 @@ qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn, return rc; } +#ifdef CONFIG_DCB +static void +qed_dcbx_get_priority_info(struct qed_hwfn *p_hwfn, + struct qed_dcbx_app_prio *p_prio, + struct qed_dcbx_results *p_results) +{ + u8 val; + + p_prio->roce = QED_DCBX_INVALID_PRIORITY; + p_prio->roce_v2 = QED_DCBX_INVALID_PRIORITY; + p_prio->iscsi = QED_DCBX_INVALID_PRIORITY; + p_prio->fcoe = QED_DCBX_INVALID_PRIORITY; + + if (p_results->arr[DCBX_PROTOCOL_ROCE].update && + p_results->arr[DCBX_PROTOCOL_ROCE].enable) + p_prio->roce = p_results->arr[DCBX_PROTOCOL_ROCE].priority; + + if (p_results->arr[DCBX_PROTOCOL_ROCE_V2].update && + p_results->arr[DCBX_PROTOCOL_ROCE_V2].enable) { + val = p_results->arr[DCBX_PROTOCOL_ROCE_V2].priority; + p_prio->roce_v2 = val; + } + + if (p_results->arr[DCBX_PROTOCOL_ISCSI].update && + p_results->arr[DCBX_PROTOCOL_ISCSI].enable) + p_prio->iscsi = p_results->arr[DCBX_PROTOCOL_ISCSI].priority; + + if (p_results->arr[DCBX_PROTOCOL_FCOE].update && + p_results->arr[DCBX_PROTOCOL_FCOE].enable) + p_prio->fcoe = p_results->arr[DCBX_PROTOCOL_FCOE].priority; + + if (p_results->arr[DCBX_PROTOCOL_ETH].update && + p_results->arr[DCBX_PROTOCOL_ETH].enable) + p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority; + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "Priorities: iscsi %d, roce %d, roce v2 %d, fcoe %d, eth %d\n", + p_prio->iscsi, p_prio->roce, p_prio->roce_v2, p_prio->fcoe, + p_prio->eth); +} + +static void +qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, + struct dcbx_app_priority_feature *p_app, + struct dcbx_app_priority_entry *p_tbl, + struct qed_dcbx_params *p_params, bool ieee) +{ + struct qed_app_entry *entry; + u8 pri_map; + int i; + + p_params->app_willing = QED_MFW_GET_FIELD(p_app->flags, + DCBX_APP_WILLING); + p_params->app_valid = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ENABLED); + p_params->app_error = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR); + p_params->num_app_entries = QED_MFW_GET_FIELD(p_app->flags, + DCBX_APP_NUM_ENTRIES); + for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { + entry = &p_params->app_entry[i]; + if (ieee) { + u8 sf_ieee; + u32 val; + + sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_SF_IEEE); + switch (sf_ieee) { + case DCBX_APP_SF_IEEE_RESERVED: + /* Old MFW */ + val = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_SF); + entry->sf_ieee = val ? + QED_DCBX_SF_IEEE_TCP_UDP_PORT : + QED_DCBX_SF_IEEE_ETHTYPE; + break; + case DCBX_APP_SF_IEEE_ETHTYPE: + entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE; + break; + case DCBX_APP_SF_IEEE_TCP_PORT: + entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT; + break; + case DCBX_APP_SF_IEEE_UDP_PORT: + entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT; + break; + case DCBX_APP_SF_IEEE_TCP_UDP_PORT: + entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT; + break; + } + } else { + entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_SF)); + } + + pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); + entry->prio = ffs(pri_map) - 1; + entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_PROTOCOL_ID); + qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, + entry->proto_id, + &entry->proto_type, ieee); + } + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "APP params: willing %d, valid %d error = %d\n", + p_params->app_willing, p_params->app_valid, + p_params->app_error); +} + +static void +qed_dcbx_get_pfc_data(struct qed_hwfn *p_hwfn, + u32 pfc, struct qed_dcbx_params *p_params) +{ + u8 pfc_map; + + p_params->pfc.willing = QED_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING); + p_params->pfc.max_tc = QED_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS); + p_params->pfc.enabled = QED_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED); + pfc_map = QED_MFW_GET_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP); + p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0); + p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1); + p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2); + p_params->pfc.prio[3] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_3); + p_params->pfc.prio[4] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_4); + p_params->pfc.prio[5] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_5); + p_params->pfc.prio[6] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_6); + p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7); + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "PFC params: willing %d, pfc_bitmap %d\n", + p_params->pfc.willing, pfc_map); +} + +static void +qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn, + struct dcbx_ets_feature *p_ets, + struct qed_dcbx_params *p_params) +{ + u32 bw_map[2], tsa_map[2], pri_map; + int i; + + p_params->ets_willing = QED_MFW_GET_FIELD(p_ets->flags, + DCBX_ETS_WILLING); + p_params->ets_enabled = QED_MFW_GET_FIELD(p_ets->flags, + DCBX_ETS_ENABLED); + p_params->ets_cbs = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_CBS); + p_params->max_ets_tc = QED_MFW_GET_FIELD(p_ets->flags, + DCBX_ETS_MAX_TCS); + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "ETS params: willing %d, ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n", + p_params->ets_willing, + p_params->ets_cbs, + p_ets->pri_tc_tbl[0], p_params->max_ets_tc); + + /* 8 bit tsa and bw data corresponding to each of the 8 TC's are + * encoded in a type u32 array of size 2. + */ + bw_map[0] = be32_to_cpu(p_ets->tc_bw_tbl[0]); + bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]); + tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]); + tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]); + pri_map = p_ets->pri_tc_tbl[0]; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { + p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; + p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; + p_params->ets_pri_tc_tbl[i] = QED_DCBX_PRIO2TC(pri_map, i); + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "elem %d bw_tbl %x tsa_tbl %x\n", + i, p_params->ets_tc_bw_tbl[i], + p_params->ets_tc_tsa_tbl[i]); + } +} + +static void +qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn, + struct dcbx_app_priority_feature *p_app, + struct dcbx_app_priority_entry *p_tbl, + struct dcbx_ets_feature *p_ets, + u32 pfc, struct qed_dcbx_params *p_params, bool ieee) +{ + qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee); + qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params); + qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params); +} + +static void +qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_dcbx_get *params) +{ + struct dcbx_features *p_feat; + + p_feat = &p_hwfn->p_dcbx_info->local_admin.features; + qed_dcbx_get_common_params(p_hwfn, &p_feat->app, + p_feat->app.app_pri_tbl, &p_feat->ets, + p_feat->pfc, ¶ms->local.params, false); + params->local.valid = true; +} + +static void +qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_dcbx_get *params) +{ + struct dcbx_features *p_feat; + + p_feat = &p_hwfn->p_dcbx_info->remote.features; + qed_dcbx_get_common_params(p_hwfn, &p_feat->app, + p_feat->app.app_pri_tbl, &p_feat->ets, + p_feat->pfc, ¶ms->remote.params, false); + params->remote.valid = true; +} + +static void +qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_dcbx_get *params) +{ + struct qed_dcbx_operational_params *p_operational; + struct qed_dcbx_results *p_results; + struct dcbx_features *p_feat; + bool enabled, err; + u32 flags; + bool val; + + flags = p_hwfn->p_dcbx_info->operational.flags; + + /* If DCBx version is non zero, then negotiation + * was successfuly performed + */ + p_operational = ¶ms->operational; + enabled = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) != + DCBX_CONFIG_VERSION_DISABLED); + if (!enabled) { + p_operational->enabled = enabled; + p_operational->valid = false; + return; + } + + p_feat = &p_hwfn->p_dcbx_info->operational.features; + p_results = &p_hwfn->p_dcbx_info->results; + + val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) == + DCBX_CONFIG_VERSION_IEEE); + p_operational->ieee = val; + val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) == + DCBX_CONFIG_VERSION_CEE); + p_operational->cee = val; + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Version support: ieee %d, cee %d\n", + p_operational->ieee, p_operational->cee); + + qed_dcbx_get_common_params(p_hwfn, &p_feat->app, + p_feat->app.app_pri_tbl, &p_feat->ets, + p_feat->pfc, ¶ms->operational.params, + p_operational->ieee); + qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results); + err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR); + p_operational->err = err; + p_operational->enabled = enabled; + p_operational->valid = true; +} + +static void +qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_dcbx_get *params) +{ + struct lldp_config_params_s *p_local; + + p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE]; + + memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id, + ARRAY_SIZE(p_local->local_chassis_id)); + memcpy(params->lldp_local.local_port_id, p_local->local_port_id, + ARRAY_SIZE(p_local->local_port_id)); +} + +static void +qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_dcbx_get *params) +{ + struct lldp_status_params_s *p_remote; + + p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE]; + + memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id, + ARRAY_SIZE(p_remote->peer_chassis_id)); + memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id, + ARRAY_SIZE(p_remote->peer_port_id)); +} + +static int +qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_dcbx_get *p_params, + enum qed_mib_read_type type) +{ + switch (type) { + case QED_DCBX_REMOTE_MIB: + qed_dcbx_get_remote_params(p_hwfn, p_ptt, p_params); + break; + case QED_DCBX_LOCAL_MIB: + qed_dcbx_get_local_params(p_hwfn, p_ptt, p_params); + break; + case QED_DCBX_OPERATIONAL_MIB: + qed_dcbx_get_operational_params(p_hwfn, p_ptt, p_params); + break; + case QED_DCBX_REMOTE_LLDP_MIB: + qed_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params); + break; + case QED_DCBX_LOCAL_LLDP_MIB: + qed_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params); + break; + default: + DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type); + return -EINVAL; + } + + return 0; +} +#endif + static int qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -561,3 +937,1377 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, p_dcb_data = &p_dest->eth_dcb_data; qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH); } + +#ifdef CONFIG_DCB +static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn, + struct qed_dcbx_get *p_get, + enum qed_mib_read_type type) +{ + struct qed_ptt *p_ptt; + int rc; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type); + if (rc) + goto out; + + rc = qed_dcbx_get_params(p_hwfn, p_ptt, p_get, type); + +out: + qed_ptt_release(p_hwfn, p_ptt); + return rc; +} + +static void +qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn, + u32 *pfc, struct qed_dcbx_params *p_params) +{ + u8 pfc_map = 0; + int i; + + if (p_params->pfc.willing) + *pfc |= DCBX_PFC_WILLING_MASK; + else + *pfc &= ~DCBX_PFC_WILLING_MASK; + + if (p_params->pfc.enabled) + *pfc |= DCBX_PFC_ENABLED_MASK; + else + *pfc &= ~DCBX_PFC_ENABLED_MASK; + + *pfc &= ~DCBX_PFC_CAPS_MASK; + *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_SHIFT; + + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) + if (p_params->pfc.prio[i]) + pfc_map |= BIT(i); + + *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK; + *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT); + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc); +} + +static void +qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, + struct dcbx_ets_feature *p_ets, + struct qed_dcbx_params *p_params) +{ + u8 *bw_map, *tsa_map; + u32 val; + int i; + + if (p_params->ets_willing) + p_ets->flags |= DCBX_ETS_WILLING_MASK; + else + p_ets->flags &= ~DCBX_ETS_WILLING_MASK; + + if (p_params->ets_cbs) + p_ets->flags |= DCBX_ETS_CBS_MASK; + else + p_ets->flags &= ~DCBX_ETS_CBS_MASK; + + if (p_params->ets_enabled) + p_ets->flags |= DCBX_ETS_ENABLED_MASK; + else + p_ets->flags &= ~DCBX_ETS_ENABLED_MASK; + + p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK; + p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT; + + bw_map = (u8 *)&p_ets->tc_bw_tbl[0]; + tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0]; + p_ets->pri_tc_tbl[0] = 0; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { + bw_map[i] = p_params->ets_tc_bw_tbl[i]; + tsa_map[i] = p_params->ets_tc_tsa_tbl[i]; + /* Copy the priority value to the corresponding 4 bits in the + * traffic class table. + */ + val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); + p_ets->pri_tc_tbl[0] |= val; + } + for (i = 0; i < 2; i++) { + p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]); + p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]); + } +} + +static void +qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, + struct dcbx_app_priority_feature *p_app, + struct qed_dcbx_params *p_params, bool ieee) +{ + u32 *entry; + int i; + + if (p_params->app_willing) + p_app->flags |= DCBX_APP_WILLING_MASK; + else + p_app->flags &= ~DCBX_APP_WILLING_MASK; + + if (p_params->app_valid) + p_app->flags |= DCBX_APP_ENABLED_MASK; + else + p_app->flags &= ~DCBX_APP_ENABLED_MASK; + + p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK; + p_app->flags |= (u32)p_params->num_app_entries << + DCBX_APP_NUM_ENTRIES_SHIFT; + + for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { + entry = &p_app->app_pri_tbl[i].entry; + *entry = 0; + if (ieee) { + *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK); + switch (p_params->app_entry[i].sf_ieee) { + case QED_DCBX_SF_IEEE_ETHTYPE: + *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE << + DCBX_APP_SF_IEEE_SHIFT); + *entry |= ((u32)DCBX_APP_SF_ETHTYPE << + DCBX_APP_SF_SHIFT); + break; + case QED_DCBX_SF_IEEE_TCP_PORT: + *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT << + DCBX_APP_SF_IEEE_SHIFT); + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_SHIFT); + break; + case QED_DCBX_SF_IEEE_UDP_PORT: + *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT << + DCBX_APP_SF_IEEE_SHIFT); + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_SHIFT); + break; + case QED_DCBX_SF_IEEE_TCP_UDP_PORT: + *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT << + DCBX_APP_SF_IEEE_SHIFT); + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_SHIFT); + break; + } + } else { + *entry &= ~DCBX_APP_SF_MASK; + if (p_params->app_entry[i].ethtype) + *entry |= ((u32)DCBX_APP_SF_ETHTYPE << + DCBX_APP_SF_SHIFT); + else + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_SHIFT); + } + + *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; + *entry |= ((u32)p_params->app_entry[i].proto_id << + DCBX_APP_PROTOCOL_ID_SHIFT); + *entry &= ~DCBX_APP_PRI_MAP_MASK; + *entry |= ((u32)(p_params->app_entry[i].prio) << + DCBX_APP_PRI_MAP_SHIFT); + } +} + +static void +qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn, + struct dcbx_local_params *local_admin, + struct qed_dcbx_set *params) +{ + bool ieee = false; + + local_admin->flags = 0; + memcpy(&local_admin->features, + &p_hwfn->p_dcbx_info->operational.features, + sizeof(local_admin->features)); + + if (params->enabled) { + local_admin->config = params->ver_num; + ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE); + } else { + local_admin->config = DCBX_CONFIG_VERSION_DISABLED; + } + + if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) + qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, + ¶ms->config.params); + + if (params->override_flags & QED_DCBX_OVERRIDE_ETS_CFG) + qed_dcbx_set_ets_data(p_hwfn, &local_admin->features.ets, + ¶ms->config.params); + + if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG) + qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app, + ¶ms->config.params, ieee); +} + +int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_dcbx_set *params, bool hw_commit) +{ + struct dcbx_local_params local_admin; + struct qed_dcbx_mib_meta_data data; + u32 resp = 0, param = 0; + int rc = 0; + + if (!hw_commit) { + memcpy(&p_hwfn->p_dcbx_info->set, params, + sizeof(struct qed_dcbx_set)); + return 0; + } + + /* clear set-parmas cache */ + memset(&p_hwfn->p_dcbx_info->set, 0, sizeof(p_hwfn->p_dcbx_info->set)); + + memset(&local_admin, 0, sizeof(local_admin)); + qed_dcbx_set_local_params(p_hwfn, &local_admin, params); + + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, local_admin_dcbx_mib); + data.local_admin = &local_admin; + data.size = sizeof(struct dcbx_local_params); + qed_memcpy_to(p_hwfn, p_ptt, data.addr, data.local_admin, data.size); + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX, + 1 << DRV_MB_PARAM_LLDP_SEND_SHIFT, &resp, ¶m); + if (rc) + DP_NOTICE(p_hwfn, "Failed to send DCBX update request\n"); + + return rc; +} + +int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn, + struct qed_dcbx_set *params) +{ + struct qed_dcbx_get *dcbx_info; + int rc; + + if (p_hwfn->p_dcbx_info->set.config.valid) { + memcpy(params, &p_hwfn->p_dcbx_info->set, + sizeof(struct qed_dcbx_set)); + return 0; + } + + dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL); + if (!dcbx_info) { + DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n"); + return -ENOMEM; + } + + rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB); + if (rc) { + kfree(dcbx_info); + return rc; + } + + p_hwfn->p_dcbx_info->set.override_flags = 0; + p_hwfn->p_dcbx_info->set.ver_num = DCBX_CONFIG_VERSION_DISABLED; + if (dcbx_info->operational.cee) + p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE; + if (dcbx_info->operational.ieee) + p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE; + + p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled; + memcpy(&p_hwfn->p_dcbx_info->set.config.params, + &dcbx_info->operational.params, + sizeof(struct qed_dcbx_admin_params)); + p_hwfn->p_dcbx_info->set.config.valid = true; + + memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set)); + + kfree(dcbx_info); + + return 0; +} + +static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn, + enum qed_mib_read_type type) +{ + struct qed_dcbx_get *dcbx_info; + + dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL); + if (!dcbx_info) { + DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n"); + return NULL; + } + + if (qed_dcbx_query_params(hwfn, dcbx_info, type)) { + kfree(dcbx_info); + return NULL; + } + + if ((type == QED_DCBX_OPERATIONAL_MIB) && + !dcbx_info->operational.enabled) { + DP_INFO(hwfn, "DCBX is not enabled/operational\n"); + kfree(dcbx_info); + return NULL; + } + + return dcbx_info; +} + +static u8 qed_dcbnl_getstate(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + bool enabled; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return 0; + + enabled = dcbx_info->operational.enabled; + DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", enabled); + kfree(dcbx_info); + + return enabled; +} + +static u8 qed_dcbnl_setstate(struct qed_dev *cdev, u8 state) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", state); + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return 1; + + dcbx_set.enabled = !!state; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return 1; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return rc ? 1 : 0; +} + +static void qed_dcbnl_getpgtccfgtx(struct qed_dev *cdev, int tc, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "tc = %d\n", tc); + *prio_type = *pgid = *bw_pct = *up_map = 0; + if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid tc %d\n", tc); + return; + } + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return; + + *pgid = dcbx_info->operational.params.ets_pri_tc_tbl[tc]; + kfree(dcbx_info); +} + +static void qed_dcbnl_getpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 *bw_pct) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + + *bw_pct = 0; + DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d\n", pgid); + if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid pgid %d\n", pgid); + return; + } + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return; + + *bw_pct = dcbx_info->operational.params.ets_tc_bw_tbl[pgid]; + DP_VERBOSE(hwfn, QED_MSG_DCB, "bw_pct = %d\n", *bw_pct); + kfree(dcbx_info); +} + +static void qed_dcbnl_getpgtccfgrx(struct qed_dev *cdev, int tc, u8 *prio, + u8 *bwg_id, u8 *bw_pct, u8 *up_map) +{ + DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n"); + *prio = *bwg_id = *bw_pct = *up_map = 0; +} + +static void qed_dcbnl_getpgbwgcfgrx(struct qed_dev *cdev, + int bwg_id, u8 *bw_pct) +{ + DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n"); + *bw_pct = 0; +} + +static void qed_dcbnl_getpfccfg(struct qed_dev *cdev, + int priority, u8 *setting) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d\n", priority); + if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid priority %d\n", priority); + return; + } + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return; + + *setting = dcbx_info->operational.params.pfc.prio[priority]; + DP_VERBOSE(hwfn, QED_MSG_DCB, "setting = %d\n", *setting); + kfree(dcbx_info); +} + +static void qed_dcbnl_setpfccfg(struct qed_dev *cdev, int priority, u8 setting) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d setting = %d\n", + priority, setting); + if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid priority %d\n", priority); + return; + } + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; + dcbx_set.config.params.pfc.prio[priority] = !!setting; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); +} + +static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + int rc = 0; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "capid = %d\n", capid); + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return 1; + + switch (capid) { + case DCB_CAP_ATTR_PG: + case DCB_CAP_ATTR_PFC: + case DCB_CAP_ATTR_UP2TC: + case DCB_CAP_ATTR_GSP: + *cap = true; + break; + case DCB_CAP_ATTR_PG_TCS: + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_DCBX: + *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE | + DCB_CAP_DCBX_VER_IEEE); + break; + default: + *cap = false; + rc = 1; + } + + DP_VERBOSE(hwfn, QED_MSG_DCB, "id = %d caps = %d\n", capid, *cap); + kfree(dcbx_info); + + return rc; +} + +static int qed_dcbnl_getnumtcs(struct qed_dev *cdev, int tcid, u8 *num) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + int rc = 0; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d\n", tcid); + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + *num = dcbx_info->operational.params.max_ets_tc; + break; + case DCB_NUMTCS_ATTR_PFC: + *num = dcbx_info->operational.params.pfc.max_tc; + break; + default: + rc = -EINVAL; + } + + kfree(dcbx_info); + DP_VERBOSE(hwfn, QED_MSG_DCB, "numtcs = %d\n", *num); + + return rc; +} + +static u8 qed_dcbnl_getpfcstate(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + bool enabled; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return 0; + + enabled = dcbx_info->operational.params.pfc.enabled; + DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d\n", enabled); + kfree(dcbx_info); + + return enabled; +} + +static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + u8 mode = 0; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return 0; + + if (dcbx_info->operational.enabled) + mode |= DCB_CAP_DCBX_LLD_MANAGED; + if (dcbx_info->operational.ieee) + mode |= DCB_CAP_DCBX_VER_IEEE; + if (dcbx_info->operational.cee) + mode |= DCB_CAP_DCBX_VER_CEE; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "dcb mode = %d\n", mode); + kfree(dcbx_info); + + return mode; +} + +static void qed_dcbnl_setpgtccfgtx(struct qed_dev *cdev, + int tc, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, + "tc = %d pri_type = %d pgid = %d bw_pct = %d up_map = %d\n", + tc, pri_type, pgid, bw_pct, up_map); + + if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid tc %d\n", tc); + return; + } + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; + dcbx_set.config.params.ets_pri_tc_tbl[tc] = pgid; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); +} + +static void qed_dcbnl_setpgtccfgrx(struct qed_dev *cdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map) +{ + DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n"); +} + +static void qed_dcbnl_setpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 bw_pct) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d bw_pct = %d\n", pgid, bw_pct); + if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid pgid %d\n", pgid); + return; + } + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; + dcbx_set.config.params.ets_tc_bw_tbl[pgid] = bw_pct; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); +} + +static void qed_dcbnl_setpgbwgcfgrx(struct qed_dev *cdev, int pgid, u8 bw_pct) +{ + DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n"); +} + +static u8 qed_dcbnl_setall(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return 1; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return 1; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 1); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static int qed_dcbnl_setnumtcs(struct qed_dev *cdev, int tcid, u8 num) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d num = %d\n", tcid, num); + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return 1; + + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; + dcbx_set.config.params.max_ets_tc = num; + break; + case DCB_NUMTCS_ATTR_PFC: + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; + dcbx_set.config.params.pfc.max_tc = num; + break; + default: + DP_INFO(hwfn, "Invalid tcid %d\n", tcid); + return -EINVAL; + } + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EINVAL; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return 0; +} + +static void qed_dcbnl_setpfcstate(struct qed_dev *cdev, u8 state) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "new state = %d\n", state); + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; + dcbx_set.config.params.pfc.enabled = !!state; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); +} + +static int qed_dcbnl_getapp(struct qed_dev *cdev, u8 idtype, u16 idval) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_app_entry *entry; + bool ethtype; + u8 prio = 0; + int i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE); + for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { + entry = &dcbx_info->operational.params.app_entry[i]; + if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) { + prio = entry->prio; + break; + } + } + + if (i == QED_DCBX_MAX_APP_PROTOCOL) { + DP_ERR(cdev, "App entry (%d, %d) not found\n", idtype, idval); + kfree(dcbx_info); + return -EINVAL; + } + + kfree(dcbx_info); + + return prio; +} + +static int qed_dcbnl_setapp(struct qed_dev *cdev, + u8 idtype, u16 idval, u8 pri_map) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_app_entry *entry; + struct qed_ptt *ptt; + bool ethtype; + int rc, i; + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return -EINVAL; + + ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE); + for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { + entry = &dcbx_set.config.params.app_entry[i]; + if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) + break; + /* First empty slot */ + if (!entry->proto_id) { + dcbx_set.config.params.num_app_entries++; + break; + } + } + + if (i == QED_DCBX_MAX_APP_PROTOCOL) { + DP_ERR(cdev, "App table is full\n"); + return -EBUSY; + } + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG; + dcbx_set.config.params.app_entry[i].ethtype = ethtype; + dcbx_set.config.params.app_entry[i].proto_id = idval; + dcbx_set.config.params.app_entry[i].prio = pri_map; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EBUSY; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "new mode = %x\n", mode); + + if (!(mode & DCB_CAP_DCBX_VER_IEEE) && !(mode & DCB_CAP_DCBX_VER_CEE)) { + DP_INFO(hwfn, "Allowed mode is cee, ieee or both\n"); + return 1; + } + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return 1; + + dcbx_set.ver_num = 0; + if (mode & DCB_CAP_DCBX_VER_CEE) { + dcbx_set.ver_num |= DCBX_CONFIG_VERSION_CEE; + dcbx_set.enabled = true; + } + + if (mode & DCB_CAP_DCBX_VER_IEEE) { + dcbx_set.ver_num |= DCBX_CONFIG_VERSION_IEEE; + dcbx_set.enabled = true; + } + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return 1; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return 0; +} + +static u8 qed_dcbnl_getfeatcfg(struct qed_dev *cdev, int featid, u8 *flags) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "Feature id = %d\n", featid); + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return 1; + + *flags = 0; + switch (featid) { + case DCB_FEATCFG_ATTR_PG: + if (dcbx_info->operational.params.ets_enabled) + *flags = DCB_FEATCFG_ENABLE; + else + *flags = DCB_FEATCFG_ERROR; + break; + case DCB_FEATCFG_ATTR_PFC: + if (dcbx_info->operational.params.pfc.enabled) + *flags = DCB_FEATCFG_ENABLE; + else + *flags = DCB_FEATCFG_ERROR; + break; + case DCB_FEATCFG_ATTR_APP: + if (dcbx_info->operational.params.app_valid) + *flags = DCB_FEATCFG_ENABLE; + else + *flags = DCB_FEATCFG_ERROR; + break; + default: + DP_INFO(hwfn, "Invalid feature-ID %d\n", featid); + kfree(dcbx_info); + return 1; + } + + DP_VERBOSE(hwfn, QED_MSG_DCB, "flags = %d\n", *flags); + kfree(dcbx_info); + + return 0; +} + +static u8 qed_dcbnl_setfeatcfg(struct qed_dev *cdev, int featid, u8 flags) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + bool enabled, willing; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "featid = %d flags = %d\n", + featid, flags); + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return 1; + + enabled = !!(flags & DCB_FEATCFG_ENABLE); + willing = !!(flags & DCB_FEATCFG_WILLING); + switch (featid) { + case DCB_FEATCFG_ATTR_PG: + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; + dcbx_set.config.params.ets_enabled = enabled; + dcbx_set.config.params.ets_willing = willing; + break; + case DCB_FEATCFG_ATTR_PFC: + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; + dcbx_set.config.params.pfc.enabled = enabled; + dcbx_set.config.params.pfc.willing = willing; + break; + case DCB_FEATCFG_ATTR_APP: + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG; + dcbx_set.config.params.app_willing = willing; + break; + default: + DP_INFO(hwfn, "Invalid feature-ID %d\n", featid); + return 1; + } + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return 1; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return 0; +} + +static int qed_dcbnl_peer_getappinfo(struct qed_dev *cdev, + struct dcb_peer_app_info *info, + u16 *app_count) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB); + if (!dcbx_info) + return -EINVAL; + + info->willing = dcbx_info->remote.params.app_willing; + info->error = dcbx_info->remote.params.app_error; + *app_count = dcbx_info->remote.params.num_app_entries; + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_peer_getapptable(struct qed_dev *cdev, + struct dcb_app *table) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + int i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB); + if (!dcbx_info) + return -EINVAL; + + for (i = 0; i < dcbx_info->remote.params.num_app_entries; i++) { + if (dcbx_info->remote.params.app_entry[i].ethtype) + table[i].selector = DCB_APP_IDTYPE_ETHTYPE; + else + table[i].selector = DCB_APP_IDTYPE_PORTNUM; + table[i].priority = dcbx_info->remote.params.app_entry[i].prio; + table[i].protocol = + dcbx_info->remote.params.app_entry[i].proto_id; + } + + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_cee_peer_getpfc(struct qed_dev *cdev, struct cee_pfc *pfc) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + int i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB); + if (!dcbx_info) + return -EINVAL; + + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) + if (dcbx_info->remote.params.pfc.prio[i]) + pfc->pfc_en |= BIT(i); + + pfc->tcs_supported = dcbx_info->remote.params.pfc.max_tc; + DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d tcs_supported = %d\n", + pfc->pfc_en, pfc->tcs_supported); + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_cee_peer_getpg(struct qed_dev *cdev, struct cee_pg *pg) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + int i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB); + if (!dcbx_info) + return -EINVAL; + + pg->willing = dcbx_info->remote.params.ets_willing; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { + pg->pg_bw[i] = dcbx_info->remote.params.ets_tc_bw_tbl[i]; + pg->prio_pg[i] = dcbx_info->remote.params.ets_pri_tc_tbl[i]; + } + + DP_VERBOSE(hwfn, QED_MSG_DCB, "willing = %d", pg->willing); + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev, + struct ieee_pfc *pfc, bool remote) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_params *params; + struct qed_dcbx_get *dcbx_info; + int rc, i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + return -EINVAL; + } + + if (remote) { + memset(dcbx_info, 0, sizeof(*dcbx_info)); + rc = qed_dcbx_query_params(hwfn, dcbx_info, + QED_DCBX_REMOTE_MIB); + if (rc) { + kfree(dcbx_info); + return -EINVAL; + } + + params = &dcbx_info->remote.params; + } else { + params = &dcbx_info->operational.params; + } + + pfc->pfc_cap = params->pfc.max_tc; + pfc->pfc_en = 0; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) + if (params->pfc.prio[i]) + pfc->pfc_en |= BIT(i); + + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_ieee_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc) +{ + return qed_dcbnl_get_ieee_pfc(cdev, pfc, false); +} + +static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc, i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); + return -EINVAL; + } + + kfree(dcbx_info); + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return -EINVAL; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) + dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i)); + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EINVAL; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static int qed_dcbnl_get_ieee_ets(struct qed_dev *cdev, + struct ieee_ets *ets, bool remote) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_dcbx_params *params; + int rc; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); + return -EINVAL; + } + + if (remote) { + memset(dcbx_info, 0, sizeof(*dcbx_info)); + rc = qed_dcbx_query_params(hwfn, dcbx_info, + QED_DCBX_REMOTE_MIB); + if (rc) { + kfree(dcbx_info); + return -EINVAL; + } + + params = &dcbx_info->remote.params; + } else { + params = &dcbx_info->operational.params; + } + + ets->ets_cap = params->max_ets_tc; + ets->willing = params->ets_willing; + ets->cbs = params->ets_cbs; + memcpy(ets->tc_tx_bw, params->ets_tc_bw_tbl, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_tsa, params->ets_tc_tsa_tbl, sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, params->ets_pri_tc_tbl, sizeof(ets->prio_tc)); + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_ieee_getets(struct qed_dev *cdev, struct ieee_ets *ets) +{ + return qed_dcbnl_get_ieee_ets(cdev, ets, false); +} + +static int qed_dcbnl_ieee_setets(struct qed_dev *cdev, struct ieee_ets *ets) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); + return -EINVAL; + } + + kfree(dcbx_info); + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return -EINVAL; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; + dcbx_set.config.params.max_ets_tc = ets->ets_cap; + dcbx_set.config.params.ets_willing = ets->willing; + dcbx_set.config.params.ets_cbs = ets->cbs; + memcpy(dcbx_set.config.params.ets_tc_bw_tbl, ets->tc_tx_bw, + sizeof(ets->tc_tx_bw)); + memcpy(dcbx_set.config.params.ets_tc_tsa_tbl, ets->tc_tsa, + sizeof(ets->tc_tsa)); + memcpy(dcbx_set.config.params.ets_pri_tc_tbl, ets->prio_tc, + sizeof(ets->prio_tc)); + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EINVAL; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +int qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets) +{ + return qed_dcbnl_get_ieee_ets(cdev, ets, true); +} + +int qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc) +{ + return qed_dcbnl_get_ieee_pfc(cdev, pfc, true); +} + +int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_app_entry *entry; + bool ethtype; + u8 prio = 0; + int i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); + return -EINVAL; + } + + /* ieee defines the selector field value for ethertype to be 1 */ + ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE); + for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { + entry = &dcbx_info->operational.params.app_entry[i]; + if ((entry->ethtype == ethtype) && + (entry->proto_id == app->protocol)) { + prio = entry->prio; + break; + } + } + + if (i == QED_DCBX_MAX_APP_PROTOCOL) { + DP_ERR(cdev, "App entry (%d, %d) not found\n", app->selector, + app->protocol); + kfree(dcbx_info); + return -EINVAL; + } + + app->priority = ffs(prio) - 1; + + kfree(dcbx_info); + + return 0; +} + +int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_dcbx_set dcbx_set; + struct qed_app_entry *entry; + struct qed_ptt *ptt; + bool ethtype; + int rc, i; + + if (app->priority < 0 || app->priority >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid priority %d\n", app->priority); + return -EINVAL; + } + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); + return -EINVAL; + } + + kfree(dcbx_info); + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return -EINVAL; + + /* ieee defines the selector field value for ethertype to be 1 */ + ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE); + for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { + entry = &dcbx_set.config.params.app_entry[i]; + if ((entry->ethtype == ethtype) && + (entry->proto_id == app->protocol)) + break; + /* First empty slot */ + if (!entry->proto_id) { + dcbx_set.config.params.num_app_entries++; + break; + } + } + + if (i == QED_DCBX_MAX_APP_PROTOCOL) { + DP_ERR(cdev, "App table is full\n"); + return -EBUSY; + } + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG; + dcbx_set.config.params.app_entry[i].ethtype = ethtype; + dcbx_set.config.params.app_entry[i].proto_id = app->protocol; + dcbx_set.config.params.app_entry[i].prio = BIT(app->priority); + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EBUSY; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass = { + .getstate = qed_dcbnl_getstate, + .setstate = qed_dcbnl_setstate, + .getpgtccfgtx = qed_dcbnl_getpgtccfgtx, + .getpgbwgcfgtx = qed_dcbnl_getpgbwgcfgtx, + .getpgtccfgrx = qed_dcbnl_getpgtccfgrx, + .getpgbwgcfgrx = qed_dcbnl_getpgbwgcfgrx, + .getpfccfg = qed_dcbnl_getpfccfg, + .setpfccfg = qed_dcbnl_setpfccfg, + .getcap = qed_dcbnl_getcap, + .getnumtcs = qed_dcbnl_getnumtcs, + .getpfcstate = qed_dcbnl_getpfcstate, + .getdcbx = qed_dcbnl_getdcbx, + .setpgtccfgtx = qed_dcbnl_setpgtccfgtx, + .setpgtccfgrx = qed_dcbnl_setpgtccfgrx, + .setpgbwgcfgtx = qed_dcbnl_setpgbwgcfgtx, + .setpgbwgcfgrx = qed_dcbnl_setpgbwgcfgrx, + .setall = qed_dcbnl_setall, + .setnumtcs = qed_dcbnl_setnumtcs, + .setpfcstate = qed_dcbnl_setpfcstate, + .setapp = qed_dcbnl_setapp, + .setdcbx = qed_dcbnl_setdcbx, + .setfeatcfg = qed_dcbnl_setfeatcfg, + .getfeatcfg = qed_dcbnl_getfeatcfg, + .getapp = qed_dcbnl_getapp, + .peer_getappinfo = qed_dcbnl_peer_getappinfo, + .peer_getapptable = qed_dcbnl_peer_getapptable, + .cee_peer_getpfc = qed_dcbnl_cee_peer_getpfc, + .cee_peer_getpg = qed_dcbnl_cee_peer_getpg, + .ieee_getpfc = qed_dcbnl_ieee_getpfc, + .ieee_setpfc = qed_dcbnl_ieee_setpfc, + .ieee_getets = qed_dcbnl_ieee_getets, + .ieee_setets = qed_dcbnl_ieee_setets, + .ieee_peer_getpfc = qed_dcbnl_ieee_peer_getpfc, + .ieee_peer_getets = qed_dcbnl_ieee_peer_getets, + .ieee_getapp = qed_dcbnl_ieee_getapp, + .ieee_setapp = qed_dcbnl_ieee_setapp, +}; + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index e7f834dbd..9ba681643 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -33,6 +33,24 @@ struct qed_dcbx_app_data { u8 tc; /* Traffic Class */ }; +#ifdef CONFIG_DCB +#define QED_DCBX_VERSION_DISABLED 0 +#define QED_DCBX_VERSION_IEEE 1 +#define QED_DCBX_VERSION_CEE 2 + +struct qed_dcbx_set { +#define QED_DCBX_OVERRIDE_STATE BIT(0) +#define QED_DCBX_OVERRIDE_PFC_CFG BIT(1) +#define QED_DCBX_OVERRIDE_ETS_CFG BIT(2) +#define QED_DCBX_OVERRIDE_APP_CFG BIT(3) +#define QED_DCBX_OVERRIDE_DSCP_CFG BIT(4) + u32 override_flags; + bool enabled; + struct qed_dcbx_admin_params config; + u32 ver_num; +}; +#endif + struct qed_dcbx_results { bool dcbx_enabled; u8 pf_id; @@ -55,6 +73,9 @@ struct qed_dcbx_info { struct qed_dcbx_results results; struct dcbx_mib operational; struct dcbx_mib remote; +#ifdef CONFIG_DCB + struct qed_dcbx_set set; +#endif u8 dcbx_cap; }; @@ -67,6 +88,13 @@ struct qed_dcbx_mib_meta_data { u32 addr; }; +#ifdef CONFIG_DCB +int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *); + +int qed_dcbx_config_params(struct qed_hwfn *, + struct qed_ptt *, struct qed_dcbx_set *, bool); +#endif + /* QED local interface routines */ int qed_dcbx_mib_update_event(struct qed_hwfn *, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 2d89e8c16..0e4f4a930 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -160,9 +161,13 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct init_qm_port_params *p_qm_port; + bool init_rdma_offload_pq = false; + bool init_pure_ack_pq = false; + bool init_ooo_pq = false; u16 num_pqs, multi_cos_tcs = 1; u8 pf_wfq = qm_info->pf_wfq; u32 pf_rl = qm_info->pf_rl; + u16 num_pf_rls = 0; u16 num_vfs = 0; #ifdef CONFIG_QED_SRIOV @@ -174,6 +179,25 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */ num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT); + if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { + num_pqs++; /* for RoCE queue */ + init_rdma_offload_pq = true; + /* we subtract num_vfs because each require a rate limiter, + * and one default rate limiter + */ + if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn) + num_pf_rls = RESC_NUM(p_hwfn, QED_RL) - num_vfs - 1; + + num_pqs += num_pf_rls; + qm_info->num_pf_rls = (u8) num_pf_rls; + } + + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { + num_pqs += 2; /* for iSCSI pure-ACK / OOO queue */ + init_pure_ack_pq = true; + init_ooo_pq = true; + } + /* Sanity checking that setup requires legal number of resources */ if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) { DP_ERR(p_hwfn, @@ -211,12 +235,22 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); + /* First init rate limited queues */ + for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) { + qm_info->qm_pq_params[curr_queue].vport_id = vport_id++; + qm_info->qm_pq_params[curr_queue].tc_id = + p_hwfn->hw_info.non_offload_tc; + qm_info->qm_pq_params[curr_queue].wrr_group = 1; + qm_info->qm_pq_params[curr_queue].rl_valid = 1; + } + /* First init per-TC PQs */ for (i = 0; i < multi_cos_tcs; i++) { struct init_qm_pq_params *params = &qm_info->qm_pq_params[curr_queue++]; - if (p_hwfn->hw_info.personality == QED_PCI_ETH) { + if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE || + p_hwfn->hw_info.personality == QED_PCI_ETH) { params->vport_id = vport_id; params->tc_id = p_hwfn->hw_info.non_offload_tc; params->wrr_group = 1; @@ -236,6 +270,32 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) curr_queue++; qm_info->offload_pq = 0; + if (init_rdma_offload_pq) { + qm_info->offload_pq = curr_queue; + qm_info->qm_pq_params[curr_queue].vport_id = vport_id; + qm_info->qm_pq_params[curr_queue].tc_id = + p_hwfn->hw_info.offload_tc; + qm_info->qm_pq_params[curr_queue].wrr_group = 1; + curr_queue++; + } + + if (init_pure_ack_pq) { + qm_info->pure_ack_pq = curr_queue; + qm_info->qm_pq_params[curr_queue].vport_id = vport_id; + qm_info->qm_pq_params[curr_queue].tc_id = + p_hwfn->hw_info.offload_tc; + qm_info->qm_pq_params[curr_queue].wrr_group = 1; + curr_queue++; + } + + if (init_ooo_pq) { + qm_info->ooo_pq = curr_queue; + qm_info->qm_pq_params[curr_queue].vport_id = vport_id; + qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC; + qm_info->qm_pq_params[curr_queue].wrr_group = 1; + curr_queue++; + } + /* Then init per-VF PQs */ vf_offset = curr_queue; for (i = 0; i < num_vfs; i++) { @@ -244,6 +304,7 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) qm_info->qm_pq_params[curr_queue].tc_id = p_hwfn->hw_info.non_offload_tc; qm_info->qm_pq_params[curr_queue].wrr_group = 1; + qm_info->qm_pq_params[curr_queue].rl_valid = 1; curr_queue++; } @@ -256,7 +317,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) for (i = 0; i < num_ports; i++) { p_qm_port = &qm_info->qm_port_params[i]; p_qm_port->active = 1; - p_qm_port->num_active_phys_tcs = 4; + if (num_ports == 4) + p_qm_port->active_phys_tcs = 0x7; + else + p_qm_port->active_phys_tcs = 0x9f; p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports; p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; } @@ -366,21 +430,20 @@ int qed_resc_alloc(struct qed_dev *cdev) if (!p_hwfn->p_tx_cids) { DP_NOTICE(p_hwfn, "Failed to allocate memory for Tx Cids\n"); - rc = -ENOMEM; - goto alloc_err; + goto alloc_no_mem; } p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL); if (!p_hwfn->p_rx_cids) { DP_NOTICE(p_hwfn, "Failed to allocate memory for Rx Cids\n"); - rc = -ENOMEM; - goto alloc_err; + goto alloc_no_mem; } } for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + u32 n_eqes, num_cons; /* First allocate the context manager structure */ rc = qed_cxt_mngr_alloc(p_hwfn); @@ -429,18 +492,35 @@ int qed_resc_alloc(struct qed_dev *cdev) goto alloc_err; /* EQ */ - p_eq = qed_eq_alloc(p_hwfn, 256); - if (!p_eq) { - rc = -ENOMEM; + n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain); + if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { + num_cons = qed_cxt_get_proto_cid_count(p_hwfn, + PROTOCOLID_ROCE, + 0) * 2; + n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; + } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { + num_cons = + qed_cxt_get_proto_cid_count(p_hwfn, + PROTOCOLID_ISCSI, 0); + n_eqes += 2 * num_cons; + } + + if (n_eqes > 0xFFFF) { + DP_ERR(p_hwfn, + "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n", + n_eqes, 0xFFFF); + rc = -EINVAL; goto alloc_err; } + + p_eq = qed_eq_alloc(p_hwfn, (u16) n_eqes); + if (!p_eq) + goto alloc_no_mem; p_hwfn->p_eq = p_eq; p_consq = qed_consq_alloc(p_hwfn); - if (!p_consq) { - rc = -ENOMEM; - goto alloc_err; - } + if (!p_consq) + goto alloc_no_mem; p_hwfn->p_consq = p_consq; /* DMA info initialization */ @@ -469,6 +549,8 @@ int qed_resc_alloc(struct qed_dev *cdev) return 0; +alloc_no_mem: + rc = -ENOMEM; alloc_err: qed_resc_free(cdev); return rc; @@ -634,6 +716,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_common_rt_init_params params; struct qed_dev *cdev = p_hwfn->cdev; + u16 num_pfs, pf_id; u32 concrete_fid; int rc = 0; u8 vf_id; @@ -682,9 +765,16 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); - /* Disable relaxed ordering in the PCI config space */ - qed_wr(p_hwfn, p_ptt, 0x20b4, - qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10); + if (QED_IS_BB(p_hwfn->cdev)) { + num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev); + for (pf_id = 0; pf_id < num_pfs; pf_id++) { + qed_fid_pretend(p_hwfn, p_ptt, pf_id); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); + } + /* pretend to original PF */ + qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + } for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) { concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); @@ -703,8 +793,31 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn, { int rc = 0; - rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, - hw_mode); + rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode); + if (rc != 0) + return rc; + + if (hw_mode & (1 << MODE_MF_SI)) { + u8 pf_id = 0; + + if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) { + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, + "PF[%08x] is first eth on engine\n", pf_id); + + /* We should have configured BIT for ppfid, i.e., the + * relative function number in the port. But there's a + * bug in LLH in BB where the ppfid is actually engine + * based, so we need to take this into account. + */ + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id); + } + + /* Take the protocol-based hit vector if there is a hit, + * otherwise take the other vector. + */ + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2); + } return rc; } @@ -751,7 +864,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, } /* Protocl Configuration */ - STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0); + STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, + (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0); STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0); STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); @@ -773,6 +887,21 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* Pure runtime initializations - directly to the HW */ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); + if (hw_mode & (1 << MODE_MF_SI)) { + u8 pf_id = 0; + u32 val = 0; + + if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) { + if (p_hwfn->rel_pf_id == pf_id) { + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, + "PF[%d] is first ETH on engine\n", + pf_id); + val = 1; + } + qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val); + } + } + if (b_hw_start) { /* enable interrupts */ qed_int_igu_enable(p_hwfn, p_ptt, int_mode); @@ -1213,8 +1342,9 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) num_features); } -static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) +static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) { + u8 enabled_func_idx = p_hwfn->enabled_func_idx; u32 *resc_start = p_hwfn->hw_info.resc_start; u8 num_funcs = p_hwfn->num_funcs_on_engine; u32 *resc_num = p_hwfn->hw_info.resc_num; @@ -1238,14 +1368,22 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs; resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs; resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs; - resc_num[QED_RL] = 8; + resc_num[QED_RL] = min_t(u32, 64, resc_num[QED_VPORT]); resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs; resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) / num_funcs; - resc_num[QED_ILT] = 950; + resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs; for (i = 0; i < QED_MAX_RESC; i++) - resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id; + resc_start[i] = resc_num[i] * enabled_func_idx; + + /* Sanity for ILT */ + if (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB) { + DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n", + RESC_START(p_hwfn, QED_ILT), + RESC_END(p_hwfn, QED_ILT) - 1); + return -EINVAL; + } qed_hw_set_feat(p_hwfn); @@ -1275,6 +1413,8 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) p_hwfn->hw_info.resc_start[QED_VLAN], p_hwfn->hw_info.resc_num[QED_ILT], p_hwfn->hw_info.resc_start[QED_ILT]); + + return 0; } static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, @@ -1304,31 +1444,31 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G; break; - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G; break; - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G; break; - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F; break; - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E; break; - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G; break; - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G; break; - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G; break; - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; break; default: @@ -1373,7 +1513,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: link->speed.forced_speed = 50000; break; - case NVM_CFG1_PORT_DRV_LINK_SPEED_100G: + case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: link->speed.forced_speed = 100000; break; default: @@ -1429,14 +1569,20 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) __set_bit(QED_DEV_CAP_ETH, &p_hwfn->hw_info.device_capabilities); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) + __set_bit(QED_DEV_CAP_ISCSI, + &p_hwfn->hw_info.device_capabilities); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) + __set_bit(QED_DEV_CAP_ROCE, + &p_hwfn->hw_info.device_capabilities); return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); } static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 reg_function_hide, tmp, eng_mask; - u8 num_funcs; + u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; + u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; num_funcs = MAX_NUM_PFS_BB; @@ -1466,9 +1612,19 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) num_funcs++; tmp >>= 0x1; } + + /* Get the PF index within the enabled functions */ + low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; + tmp = reg_function_hide & eng_mask & low_pfs_mask; + while (tmp) { + if (tmp & 0x1) + enabled_func_idx--; + tmp >>= 0x1; + } } p_hwfn->num_funcs_on_engine = num_funcs; + p_hwfn->enabled_func_idx = enabled_func_idx; DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, @@ -1538,9 +1694,7 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, qed_get_num_funcs(p_hwfn, p_ptt); - qed_hw_get_resc(p_hwfn); - - return rc; + return qed_hw_get_resc(p_hwfn); } static int qed_get_dev_info(struct qed_dev *cdev) @@ -1737,92 +1891,285 @@ void qed_hw_remove(struct qed_dev *cdev) qed_iov_free_hw_info(cdev); } -int qed_chain_alloc(struct qed_dev *cdev, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - u16 num_elems, - size_t elem_size, - struct qed_chain *p_chain) +static void qed_chain_free_next_ptr(struct qed_dev *cdev, + struct qed_chain *p_chain) +{ + void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL; + dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; + struct qed_chain_next *p_next; + u32 size, i; + + if (!p_virt) + return; + + size = p_chain->elem_size * p_chain->usable_per_page; + + for (i = 0; i < p_chain->page_cnt; i++) { + if (!p_virt) + break; + + p_next = (struct qed_chain_next *)((u8 *)p_virt + size); + p_virt_next = p_next->next_virt; + p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); + + dma_free_coherent(&cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, p_virt, p_phys); + + p_virt = p_virt_next; + p_phys = p_phys_next; + } +} + +static void qed_chain_free_single(struct qed_dev *cdev, + struct qed_chain *p_chain) { - dma_addr_t p_pbl_phys = 0; - void *p_pbl_virt = NULL; + if (!p_chain->p_virt_addr) + return; + + dma_free_coherent(&cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_chain->p_virt_addr, p_chain->p_phys_addr); +} + +static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) +{ + void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; + u32 page_cnt = p_chain->page_cnt, i, pbl_size; + u8 *p_pbl_virt = p_chain->pbl.p_virt_table; + + if (!pp_virt_addr_tbl) + return; + + if (!p_chain->pbl.p_virt_table) + goto out; + + for (i = 0; i < page_cnt; i++) { + if (!pp_virt_addr_tbl[i]) + break; + + dma_free_coherent(&cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + pp_virt_addr_tbl[i], + *(dma_addr_t *)p_pbl_virt); + + p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; + } + + pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; + dma_free_coherent(&cdev->pdev->dev, + pbl_size, + p_chain->pbl.p_virt_table, p_chain->pbl.p_phys_table); +out: + vfree(p_chain->pbl.pp_virt_addr_tbl); +} + +void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain) +{ + switch (p_chain->mode) { + case QED_CHAIN_MODE_NEXT_PTR: + qed_chain_free_next_ptr(cdev, p_chain); + break; + case QED_CHAIN_MODE_SINGLE: + qed_chain_free_single(cdev, p_chain); + break; + case QED_CHAIN_MODE_PBL: + qed_chain_free_pbl(cdev, p_chain); + break; + } +} + +static int +qed_chain_alloc_sanity_check(struct qed_dev *cdev, + enum qed_chain_cnt_type cnt_type, + size_t elem_size, u32 page_cnt) +{ + u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; + + /* The actual chain size can be larger than the maximal possible value + * after rounding up the requested elements number to pages, and after + * taking into acount the unusuable elements (next-ptr elements). + * The size of a "u16" chain can be (U16_MAX + 1) since the chain + * size/capacity fields are of a u32 type. + */ + if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 && + chain_size > 0x10000) || + (cnt_type == QED_CHAIN_CNT_TYPE_U32 && + chain_size > 0x100000000ULL)) { + DP_NOTICE(cdev, + "The actual chain size (0x%llx) is larger than the maximal possible value\n", + chain_size); + return -EINVAL; + } + + return 0; +} + +static int +qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain) +{ + void *p_virt = NULL, *p_virt_prev = NULL; dma_addr_t p_phys = 0; - void *p_virt = NULL; - u16 page_cnt = 0; - size_t size; + u32 i; - if (mode == QED_CHAIN_MODE_SINGLE) - page_cnt = 1; - else - page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode); + for (i = 0; i < p_chain->page_cnt; i++) { + p_virt = dma_alloc_coherent(&cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_phys, GFP_KERNEL); + if (!p_virt) { + DP_NOTICE(cdev, "Failed to allocate chain memory\n"); + return -ENOMEM; + } + + if (i == 0) { + qed_chain_init_mem(p_chain, p_virt, p_phys); + qed_chain_reset(p_chain); + } else { + qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, + p_virt, p_phys); + } + + p_virt_prev = p_virt; + } + /* Last page's next element should point to the beginning of the + * chain. + */ + qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, + p_chain->p_virt_addr, + p_chain->p_phys_addr); + + return 0; +} + +static int +qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain) +{ + dma_addr_t p_phys = 0; + void *p_virt = NULL; - size = page_cnt * QED_CHAIN_PAGE_SIZE; p_virt = dma_alloc_coherent(&cdev->pdev->dev, - size, &p_phys, GFP_KERNEL); + QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL); if (!p_virt) { - DP_NOTICE(cdev, "Failed to allocate chain mem\n"); - goto nomem; + DP_NOTICE(cdev, "Failed to allocate chain memory\n"); + return -ENOMEM; } - if (mode == QED_CHAIN_MODE_PBL) { - size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; - p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev, - size, &p_pbl_phys, - GFP_KERNEL); - if (!p_pbl_virt) { - DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n"); - goto nomem; - } + qed_chain_init_mem(p_chain, p_virt, p_phys); + qed_chain_reset(p_chain); - qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt, - (u8)elem_size, intended_use, - p_pbl_phys, p_pbl_virt); - } else { - qed_chain_init(p_chain, p_virt, p_phys, page_cnt, - (u8)elem_size, intended_use, mode); + return 0; +} + +static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) +{ + u32 page_cnt = p_chain->page_cnt, size, i; + dma_addr_t p_phys = 0, p_pbl_phys = 0; + void **pp_virt_addr_tbl = NULL; + u8 *p_pbl_virt = NULL; + void *p_virt = NULL; + + size = page_cnt * sizeof(*pp_virt_addr_tbl); + pp_virt_addr_tbl = vmalloc(size); + if (!pp_virt_addr_tbl) { + DP_NOTICE(cdev, + "Failed to allocate memory for the chain virtual addresses table\n"); + return -ENOMEM; } + memset(pp_virt_addr_tbl, 0, size); - return 0; + /* The allocation of the PBL table is done with its full size, since it + * is expected to be successive. + * qed_chain_init_pbl_mem() is called even in a case of an allocation + * failure, since pp_virt_addr_tbl was previously allocated, and it + * should be saved to allow its freeing during the error flow. + */ + size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; + p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev, + size, &p_pbl_phys, GFP_KERNEL); + qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, + pp_virt_addr_tbl); + if (!p_pbl_virt) { + DP_NOTICE(cdev, "Failed to allocate chain pbl memory\n"); + return -ENOMEM; + } -nomem: - dma_free_coherent(&cdev->pdev->dev, - page_cnt * QED_CHAIN_PAGE_SIZE, - p_virt, p_phys); - dma_free_coherent(&cdev->pdev->dev, - page_cnt * QED_CHAIN_PBL_ENTRY_SIZE, - p_pbl_virt, p_pbl_phys); + for (i = 0; i < page_cnt; i++) { + p_virt = dma_alloc_coherent(&cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_phys, GFP_KERNEL); + if (!p_virt) { + DP_NOTICE(cdev, "Failed to allocate chain memory\n"); + return -ENOMEM; + } - return -ENOMEM; + if (i == 0) { + qed_chain_init_mem(p_chain, p_virt, p_phys); + qed_chain_reset(p_chain); + } + + /* Fill the PBL table with the physical address of the page */ + *(dma_addr_t *)p_pbl_virt = p_phys; + /* Keep the virtual address of the page */ + p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; + + p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; + } + + return 0; } -void qed_chain_free(struct qed_dev *cdev, - struct qed_chain *p_chain) +int qed_chain_alloc(struct qed_dev *cdev, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode, + enum qed_chain_cnt_type cnt_type, + u32 num_elems, size_t elem_size, struct qed_chain *p_chain) { - size_t size; + u32 page_cnt; + int rc = 0; - if (!p_chain->p_virt_addr) - return; + if (mode == QED_CHAIN_MODE_SINGLE) + page_cnt = 1; + else + page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode); - if (p_chain->mode == QED_CHAIN_MODE_PBL) { - size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; - dma_free_coherent(&cdev->pdev->dev, size, - p_chain->pbl.p_virt_table, - p_chain->pbl.p_phys_table); + rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt); + if (rc) { + DP_NOTICE(cdev, + "Cannot allocate a chain with the given arguments:\n" + "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", + intended_use, mode, cnt_type, num_elems, elem_size); + return rc; } - size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE; - dma_free_coherent(&cdev->pdev->dev, size, - p_chain->p_virt_addr, - p_chain->p_phys_addr); + qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use, + mode, cnt_type); + + switch (mode) { + case QED_CHAIN_MODE_NEXT_PTR: + rc = qed_chain_alloc_next_ptr(cdev, p_chain); + break; + case QED_CHAIN_MODE_SINGLE: + rc = qed_chain_alloc_single(cdev, p_chain); + break; + case QED_CHAIN_MODE_PBL: + rc = qed_chain_alloc_pbl(cdev, p_chain); + break; + } + if (rc) + goto nomem; + + return 0; + +nomem: + qed_chain_free(cdev, p_chain); + return rc; } -int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, - u16 src_id, u16 *dst_id) +int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) { if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { u16 min, max; - min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); + min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE); max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); DP_NOTICE(p_hwfn, "l2_queue id [%d] is not valid, available indices [%d - %d]\n", @@ -1876,6 +2223,110 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, return 0; } +static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 hw_addr, void *p_eth_qzone, + size_t eth_qzone_size, u8 timeset) +{ + struct coalescing_timeset *p_coal_timeset; + + if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) { + DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n"); + return -EINVAL; + } + + p_coal_timeset = p_eth_qzone; + memset(p_coal_timeset, 0, eth_qzone_size); + SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); + SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); + qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); + + return 0; +} + +int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 coalesce, u8 qid, u16 sb_id) +{ + struct ustorm_eth_queue_zone eth_qzone; + u8 timeset, timer_res; + u16 fw_qid = 0; + u32 address; + int rc; + + /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ + if (coalesce <= 0x7F) { + timer_res = 0; + } else if (coalesce <= 0xFF) { + timer_res = 1; + } else if (coalesce <= 0x1FF) { + timer_res = 2; + } else { + DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); + return -EINVAL; + } + timeset = (u8)(coalesce >> timer_res); + + rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid); + if (rc) + return rc; + + rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false); + if (rc) + goto out; + + address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); + + rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, + sizeof(struct ustorm_eth_queue_zone), timeset); + if (rc) + goto out; + + p_hwfn->cdev->rx_coalesce_usecs = coalesce; +out: + return rc; +} + +int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 coalesce, u8 qid, u16 sb_id) +{ + struct xstorm_eth_queue_zone eth_qzone; + u8 timeset, timer_res; + u16 fw_qid = 0; + u32 address; + int rc; + + /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ + if (coalesce <= 0x7F) { + timer_res = 0; + } else if (coalesce <= 0xFF) { + timer_res = 1; + } else if (coalesce <= 0x1FF) { + timer_res = 2; + } else { + DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); + return -EINVAL; + } + timeset = (u8)(coalesce >> timer_res); + + rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid); + if (rc) + return rc; + + rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true); + if (rc) + goto out; + + address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); + + rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, + sizeof(struct xstorm_eth_queue_zone), timeset); + if (rc) + goto out; + + p_hwfn->cdev->tx_coalesce_usecs = coalesce; +out: + return rc; +} + /* Calculate final WFQ values for all vports and configure them. * After this configuration each vport will have * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT) @@ -2089,7 +2540,7 @@ int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate) rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); - if (!rc) { + if (rc) { qed_ptt_release(p_hwfn, p_ptt); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index dde364d6f..343bb0344 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -212,6 +212,20 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn, u32 size_in_dwords, u32 flags); + /** + * @brief qed_dmae_grc2host - Read data from dmae data offset + * to source address using the given ptt + * + * @param p_ptt + * @param grc_addr (dmae_data_offset) + * @param dest_addr + * @param size_in_dwords + * @param flags - one of the flags defined above + */ +int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords, + u32 flags); + /** * @brief qed_dmae_host2host - copy data from to source address * to a destination adress (for SRIOV) using the given ptt @@ -245,9 +259,8 @@ int qed_chain_alloc(struct qed_dev *cdev, enum qed_chain_use_mode intended_use, enum qed_chain_mode mode, - u16 num_elems, - size_t elem_size, - struct qed_chain *p_chain); + enum qed_chain_cnt_type cnt_type, + u32 num_elems, size_t elem_size, struct qed_chain *p_chain); /** * @brief qed_chain_free - Free chain DMA memory @@ -255,8 +268,7 @@ qed_chain_alloc(struct qed_dev *cdev, * @param p_hwfn * @param p_chain */ -void qed_chain_free(struct qed_dev *cdev, - struct qed_chain *p_chain); +void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain); /** * @@brief qed_fw_l2_queue - Get absolute L2 queue ID @@ -310,4 +322,37 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, int qed_final_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 id, bool is_vf); +/** + * @brief qed_set_rxq_coalesce - Configure coalesce parameters for an Rx queue + * The fact that we can configure coalescing to up to 511, but on varying + * accuracy [the bigger the value the less accurate] up to a mistake of 3usec + * for the highest values. + * + * @param p_hwfn + * @param p_ptt + * @param coalesce - Coalesce value in micro seconds. + * @param qid - Queue index. + * @param qid - SB Id + * + * @return int + */ +int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 coalesce, u8 qid, u16 sb_id); + +/** + * @brief qed_set_txq_coalesce - Configure coalesce parameters for a Tx queue + * While the API allows setting coalescing per-qid, all tx queues sharing a + * SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] + * otherwise configuration would break. + * + * @param p_hwfn + * @param p_ptt + * @param coalesce - Coalesce value in micro seconds. + * @param qid - Queue index. + * @param qid - SB Id + * + * @return int + */ +int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 coalesce, u8 qid, u16 sb_id); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index e29ed5a69..6f9d3b831 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -17,13 +17,15 @@ #include #include #include +#include +#include #include +#include +#include +#include struct qed_hwfn; struct qed_ptt; -/********************************/ -/* Add include to common target */ -/********************************/ /* opcodes for the event ring */ enum common_event_opcode { @@ -32,9 +34,10 @@ enum common_event_opcode { COMMON_EVENT_VF_START, COMMON_EVENT_VF_STOP, COMMON_EVENT_VF_PF_CHANNEL, - COMMON_EVENT_RESERVED4, - COMMON_EVENT_RESERVED5, - COMMON_EVENT_RESERVED6, + COMMON_EVENT_VF_FLR, + COMMON_EVENT_PF_UPDATE, + COMMON_EVENT_MALICIOUS_VF, + COMMON_EVENT_RL_UPDATE, COMMON_EVENT_EMPTY, MAX_COMMON_EVENT_OPCODE }; @@ -42,11 +45,12 @@ enum common_event_opcode { /* Common Ramrod Command IDs */ enum common_ramrod_cmd_id { COMMON_RAMROD_UNUSED, - COMMON_RAMROD_PF_START /* PF Function Start Ramrod */, - COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */, + COMMON_RAMROD_PF_START, + COMMON_RAMROD_PF_STOP, COMMON_RAMROD_VF_START, COMMON_RAMROD_VF_STOP, COMMON_RAMROD_PF_UPDATE, + COMMON_RAMROD_RL_UPDATE, COMMON_RAMROD_EMPTY, MAX_COMMON_RAMROD_CMD_ID }; @@ -63,448 +67,448 @@ struct pstorm_core_conn_st_ctx { /* Core Slowpath Connection storm context of Xstorm */ struct xstorm_core_conn_st_ctx { - __le32 spq_base_lo /* SPQ Ring Base Address low dword */; - __le32 spq_base_hi /* SPQ Ring Base Address high dword */; - struct regpair consolid_base_addr; - __le16 spq_cons /* SPQ Ring Consumer */; - __le16 consolid_cons /* Consolidation Ring Consumer */; - __le32 reserved0[55] /* Pad to 15 cycles */; + __le32 spq_base_lo; + __le32 spq_base_hi; + struct regpair consolid_base_addr; + __le16 spq_cons; + __le16 consolid_cons; + __le32 reserved0[55]; }; struct xstorm_core_conn_ag_ctx { - u8 reserved0 /* cdu_validation */; - u8 core_state /* state */; - u8 flags0; -#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 reserved0; + u8 core_state; + u8 flags0; +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */ -#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */ -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ -#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ -#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ -#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ -#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */ -#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */ -#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */ -#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */ -#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */ -#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 /* cf16 */ -#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */ -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */ -#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 u8 flags7; -#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */ -#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */ -#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ -#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ -#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ -#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ -#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */ -#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */ -#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */ -#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */ -#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */ -#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 /* cf16en */ -#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */ -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */ -#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */ -#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */ -#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 /* cf23en */ -#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */ -#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 /* bit16 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 /* bit17 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 /* bit18 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 /* bit19 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 /* bit20 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 /* bit21 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 /* cf23 */ -#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 - u8 byte2 /* byte2 */; - __le16 physical_q0 /* physical_q0 */; - __le16 consolid_prod /* physical_q1 */; - __le16 reserved16 /* physical_q2 */; - __le16 tx_bd_cons /* word3 */; - __le16 tx_bd_or_spq_prod /* word4 */; - __le16 word5 /* word5 */; - __le16 conn_dpi /* conn_dpi */; - u8 byte3 /* byte3 */; - u8 byte4 /* byte4 */; - u8 byte5 /* byte5 */; - u8 byte6 /* byte6 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le32 reg4 /* reg4 */; - __le32 reg5 /* cf_array0 */; - __le32 reg6 /* cf_array1 */; - __le16 word7 /* word7 */; - __le16 word8 /* word8 */; - __le16 word9 /* word9 */; - __le16 word10 /* word10 */; - __le32 reg7 /* reg7 */; - __le32 reg8 /* reg8 */; - __le32 reg9 /* reg9 */; - u8 byte7 /* byte7 */; - u8 byte8 /* byte8 */; - u8 byte9 /* byte9 */; - u8 byte10 /* byte10 */; - u8 byte11 /* byte11 */; - u8 byte12 /* byte12 */; - u8 byte13 /* byte13 */; - u8 byte14 /* byte14 */; - u8 byte15 /* byte15 */; - u8 byte16 /* byte16 */; - __le16 word11 /* word11 */; - __le32 reg10 /* reg10 */; - __le32 reg11 /* reg11 */; - __le32 reg12 /* reg12 */; - __le32 reg13 /* reg13 */; - __le32 reg14 /* reg14 */; - __le32 reg15 /* reg15 */; - __le32 reg16 /* reg16 */; - __le32 reg17 /* reg17 */; - __le32 reg18 /* reg18 */; - __le32 reg19 /* reg19 */; - __le16 word12 /* word12 */; - __le16 word13 /* word13 */; - __le16 word14 /* word14 */; - __le16 word15 /* word15 */; +#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 consolid_prod; + __le16 reserved16; + __le16 tx_bd_cons; + __le16 tx_bd_or_spq_prod; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le16 word7; + __le16 word8; + __le16 word9; + __le16 word10; + __le32 reg7; + __le32 reg8; + __le32 reg9; + u8 byte7; + u8 byte8; + u8 byte9; + u8 byte10; + u8 byte11; + u8 byte12; + u8 byte13; + u8 byte14; + u8 byte15; + u8 byte16; + __le16 word11; + __le32 reg10; + __le32 reg11; + __le32 reg12; + __le32 reg13; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; + __le32 reg18; + __le32 reg19; + __le16 word12; + __le16 word13; + __le16 word14; + __le16 word15; }; struct tstorm_core_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 + u8 byte0; + u8 byte1; + u8 flags0; +#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ -#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ -#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ -#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ -#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le32 reg4 /* reg4 */; - __le32 reg5 /* reg5 */; - __le32 reg6 /* reg6 */; - __le32 reg7 /* reg7 */; - __le32 reg8 /* reg8 */; - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* word0 */; - u8 byte4 /* byte4 */; - u8 byte5 /* byte5 */; - __le16 word1 /* word1 */; - __le16 word2 /* conn_dpi */; - __le16 word3 /* word3 */; - __le32 reg9 /* reg9 */; - __le32 reg10 /* reg10 */; +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 word0; + u8 byte4; + u8 byte5; + __le16 word1; + __le16 word2; + __le16 word3; + __le32 reg9; + __le32 reg10; }; struct ustorm_core_conn_ag_ctx { - u8 reserved /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 reserved; + u8 byte1; + u8 flags0; +#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 -#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* conn_dpi */; - __le16 word1 /* word1 */; - __le32 rx_producers /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le16 word2 /* word2 */; - __le16 word3 /* word3 */; +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 rx_producers; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; }; /* The core storm context for the Mstorm */ @@ -519,122 +523,186 @@ struct ustorm_core_conn_st_ctx { /* core connection context */ struct core_conn_context { - struct ystorm_core_conn_st_ctx ystorm_st_context; - struct regpair ystorm_st_padding[2] /* padding */; - struct pstorm_core_conn_st_ctx pstorm_st_context; - struct regpair pstorm_st_padding[2]; - struct xstorm_core_conn_st_ctx xstorm_st_context; - struct xstorm_core_conn_ag_ctx xstorm_ag_context; - struct tstorm_core_conn_ag_ctx tstorm_ag_context; - struct ustorm_core_conn_ag_ctx ustorm_ag_context; - struct mstorm_core_conn_st_ctx mstorm_st_context; - struct ustorm_core_conn_st_ctx ustorm_st_context; - struct regpair ustorm_st_padding[2] /* padding */; + struct ystorm_core_conn_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; + struct pstorm_core_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct xstorm_core_conn_st_ctx xstorm_st_context; + struct xstorm_core_conn_ag_ctx xstorm_ag_context; + struct tstorm_core_conn_ag_ctx tstorm_ag_context; + struct ustorm_core_conn_ag_ctx ustorm_ag_context; + struct mstorm_core_conn_st_ctx mstorm_st_context; + struct ustorm_core_conn_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; +}; + +struct eth_mstorm_per_pf_stat { + struct regpair gre_discard_pkts; + struct regpair vxlan_discard_pkts; + struct regpair geneve_discard_pkts; + struct regpair lb_discard_pkts; }; struct eth_mstorm_per_queue_stat { - struct regpair ttl0_discard; - struct regpair packet_too_big_discard; - struct regpair no_buff_discard; - struct regpair not_active_discard; - struct regpair tpa_coalesced_pkts; - struct regpair tpa_coalesced_events; - struct regpair tpa_aborts_num; - struct regpair tpa_coalesced_bytes; + struct regpair ttl0_discard; + struct regpair packet_too_big_discard; + struct regpair no_buff_discard; + struct regpair not_active_discard; + struct regpair tpa_coalesced_pkts; + struct regpair tpa_coalesced_events; + struct regpair tpa_aborts_num; + struct regpair tpa_coalesced_bytes; +}; + +/* Ethernet TX Per PF */ +struct eth_pstorm_per_pf_stat { + struct regpair sent_lb_ucast_bytes; + struct regpair sent_lb_mcast_bytes; + struct regpair sent_lb_bcast_bytes; + struct regpair sent_lb_ucast_pkts; + struct regpair sent_lb_mcast_pkts; + struct regpair sent_lb_bcast_pkts; + struct regpair sent_gre_bytes; + struct regpair sent_vxlan_bytes; + struct regpair sent_geneve_bytes; + struct regpair sent_gre_pkts; + struct regpair sent_vxlan_pkts; + struct regpair sent_geneve_pkts; + struct regpair gre_drop_pkts; + struct regpair vxlan_drop_pkts; + struct regpair geneve_drop_pkts; +}; + +/* Ethernet TX Per Queue Stats */ +struct eth_pstorm_per_queue_stat { + struct regpair sent_ucast_bytes; + struct regpair sent_mcast_bytes; + struct regpair sent_bcast_bytes; + struct regpair sent_ucast_pkts; + struct regpair sent_mcast_pkts; + struct regpair sent_bcast_pkts; + struct regpair error_drop_pkts; +}; + +/* ETH Rx producers data */ +struct eth_rx_rate_limit { + __le16 mult; + __le16 cnst; + u8 add_sub_cnst; + u8 reserved0; + __le16 reserved1; }; -struct eth_pstorm_per_queue_stat { - struct regpair sent_ucast_bytes; - struct regpair sent_mcast_bytes; - struct regpair sent_bcast_bytes; - struct regpair sent_ucast_pkts; - struct regpair sent_mcast_pkts; - struct regpair sent_bcast_pkts; - struct regpair error_drop_pkts; +struct eth_ustorm_per_pf_stat { + struct regpair rcv_lb_ucast_bytes; + struct regpair rcv_lb_mcast_bytes; + struct regpair rcv_lb_bcast_bytes; + struct regpair rcv_lb_ucast_pkts; + struct regpair rcv_lb_mcast_pkts; + struct regpair rcv_lb_bcast_pkts; + struct regpair rcv_gre_bytes; + struct regpair rcv_vxlan_bytes; + struct regpair rcv_geneve_bytes; + struct regpair rcv_gre_pkts; + struct regpair rcv_vxlan_pkts; + struct regpair rcv_geneve_pkts; }; struct eth_ustorm_per_queue_stat { - struct regpair rcv_ucast_bytes; - struct regpair rcv_mcast_bytes; - struct regpair rcv_bcast_bytes; - struct regpair rcv_ucast_pkts; - struct regpair rcv_mcast_pkts; - struct regpair rcv_bcast_pkts; + struct regpair rcv_ucast_bytes; + struct regpair rcv_mcast_bytes; + struct regpair rcv_bcast_bytes; + struct regpair rcv_ucast_pkts; + struct regpair rcv_mcast_pkts; + struct regpair rcv_bcast_pkts; }; /* Event Ring Next Page Address */ struct event_ring_next_addr { - struct regpair addr /* Next Page Address */; - __le32 reserved[2] /* Reserved */; + struct regpair addr; + __le32 reserved[2]; }; +/* Event Ring Element */ union event_ring_element { - struct event_ring_entry entry /* Event Ring Entry */; - struct event_ring_next_addr next_addr; + struct event_ring_entry entry; + struct event_ring_next_addr next_addr; }; +/* Major and Minor hsi Versions */ +struct hsi_fp_ver_struct { + u8 minor_ver_arr[2]; + u8 major_ver_arr[2]; +}; + +/* Mstorm non-triggering VF zone */ struct mstorm_non_trigger_vf_zone { struct eth_mstorm_per_queue_stat eth_queue_stat; + struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF]; }; +/* Mstorm VF zone */ struct mstorm_vf_zone { struct mstorm_non_trigger_vf_zone non_trigger; + }; +/* personality per PF */ enum personality_type { BAD_PERSONALITY_TYP, - PERSONALITY_RESERVED, + PERSONALITY_ISCSI, PERSONALITY_RESERVED2, - PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */, + PERSONALITY_RDMA_AND_ETH, PERSONALITY_RESERVED3, PERSONALITY_CORE, - PERSONALITY_ETH /* Ethernet */, + PERSONALITY_ETH, PERSONALITY_RESERVED4, MAX_PERSONALITY_TYPE }; +/* tunnel configuration */ struct pf_start_tunnel_config { - u8 set_vxlan_udp_port_flg; - u8 set_geneve_udp_port_flg; - u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */; - u8 tx_enable_l2geneve; - u8 tx_enable_ipgeneve; - u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */; - u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */; - u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */; - u8 tunnel_clss_l2geneve; - u8 tunnel_clss_ipgeneve; - u8 tunnel_clss_l2gre; - u8 tunnel_clss_ipgre; - __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */; - __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */; + u8 set_vxlan_udp_port_flg; + u8 set_geneve_udp_port_flg; + u8 tx_enable_vxlan; + u8 tx_enable_l2geneve; + u8 tx_enable_ipgeneve; + u8 tx_enable_l2gre; + u8 tx_enable_ipgre; + u8 tunnel_clss_vxlan; + u8 tunnel_clss_l2geneve; + u8 tunnel_clss_ipgeneve; + u8 tunnel_clss_l2gre; + u8 tunnel_clss_ipgre; + __le16 vxlan_udp_port; + __le16 geneve_udp_port; }; /* Ramrod data for PF start ramrod */ struct pf_start_ramrod_data { - struct regpair event_ring_pbl_addr; - struct regpair consolid_q_pbl_addr; - struct pf_start_tunnel_config tunnel_config; - __le16 event_ring_sb_id; - u8 base_vf_id; - u8 num_vfs; - u8 event_ring_num_pages; - u8 event_ring_sb_index; - u8 path_id; - u8 warning_as_error; - u8 dont_log_ramrods; - u8 personality; - __le16 log_type_mask; - u8 mf_mode /* Multi function mode */; - u8 integ_phase /* Integration phase */; - u8 allow_npar_tx_switching; - u8 inner_to_outer_pri_map[8]; - u8 pri_map_valid; - u32 outer_tag; - u8 reserved0[4]; -}; - -/* Data for port update ramrod */ + struct regpair event_ring_pbl_addr; + struct regpair consolid_q_pbl_addr; + struct pf_start_tunnel_config tunnel_config; + __le16 event_ring_sb_id; + u8 base_vf_id; + u8 num_vfs; + u8 event_ring_num_pages; + u8 event_ring_sb_index; + u8 path_id; + u8 warning_as_error; + u8 dont_log_ramrods; + u8 personality; + __le16 log_type_mask; + u8 mf_mode; + u8 integ_phase; + u8 allow_npar_tx_switching; + u8 inner_to_outer_pri_map[8]; + u8 pri_map_valid; + __le32 outer_tag; + struct hsi_fp_ver_struct hsi_fp_ver; + +}; + struct protocol_dcb_data { u8 dcb_enable_flag; u8 dcb_priority; @@ -642,25 +710,24 @@ struct protocol_dcb_data { u8 reserved; }; -/* tunnel configuration */ struct pf_update_tunnel_config { - u8 update_rx_pf_clss; - u8 update_tx_pf_clss; - u8 set_vxlan_udp_port_flg; - u8 set_geneve_udp_port_flg; - u8 tx_enable_vxlan; - u8 tx_enable_l2geneve; - u8 tx_enable_ipgeneve; - u8 tx_enable_l2gre; - u8 tx_enable_ipgre; - u8 tunnel_clss_vxlan; - u8 tunnel_clss_l2geneve; - u8 tunnel_clss_ipgeneve; - u8 tunnel_clss_l2gre; - u8 tunnel_clss_ipgre; - __le16 vxlan_udp_port; - __le16 geneve_udp_port; - __le16 reserved[3]; + u8 update_rx_pf_clss; + u8 update_tx_pf_clss; + u8 set_vxlan_udp_port_flg; + u8 set_geneve_udp_port_flg; + u8 tx_enable_vxlan; + u8 tx_enable_l2geneve; + u8 tx_enable_ipgeneve; + u8 tx_enable_l2gre; + u8 tx_enable_ipgre; + u8 tunnel_clss_vxlan; + u8 tunnel_clss_l2geneve; + u8 tunnel_clss_ipgeneve; + u8 tunnel_clss_l2gre; + u8 tunnel_clss_ipgre; + __le16 vxlan_udp_port; + __le16 geneve_udp_port; + __le16 reserved[3]; }; struct pf_update_ramrod_data { @@ -669,38 +736,43 @@ struct pf_update_ramrod_data { u8 update_fcoe_dcb_data_flag; u8 update_iscsi_dcb_data_flag; u8 update_roce_dcb_data_flag; + u8 update_iwarp_dcb_data_flag; u8 update_mf_vlan_flag; - __le16 mf_vlan; + u8 reserved; struct protocol_dcb_data eth_dcb_data; struct protocol_dcb_data fcoe_dcb_data; struct protocol_dcb_data iscsi_dcb_data; struct protocol_dcb_data roce_dcb_data; - struct pf_update_tunnel_config tunnel_config; -}; - -/* Tunnel classification scheme */ -enum tunnel_clss { - TUNNEL_CLSS_MAC_VLAN = 0, - TUNNEL_CLSS_MAC_VNI, - TUNNEL_CLSS_INNER_MAC_VLAN, - TUNNEL_CLSS_INNER_MAC_VNI, - MAX_TUNNEL_CLSS + struct protocol_dcb_data iwarp_dcb_data; + __le16 mf_vlan; + __le16 reserved2; + struct pf_update_tunnel_config tunnel_config; }; +/* Ports mode */ enum ports_mode { - ENGX2_PORTX1 /* 2 engines x 1 port */, - ENGX2_PORTX2 /* 2 engines x 2 ports */, - ENGX1_PORTX1 /* 1 engine x 1 port */, - ENGX1_PORTX2 /* 1 engine x 2 ports */, - ENGX1_PORTX4 /* 1 engine x 4 ports */, + ENGX2_PORTX1, + ENGX2_PORTX2, + ENGX1_PORTX1, + ENGX1_PORTX2, + ENGX1_PORTX4, MAX_PORTS_MODE }; +/* use to index in hsi_fp_[major|minor]_ver_arr per protocol */ +enum protocol_version_array_key { + ETH_VER_KEY = 0, + ROCE_VER_KEY, + MAX_PROTOCOL_VERSION_ARRAY_KEY +}; + +/* Pstorm non-triggering VF zone */ struct pstorm_non_trigger_vf_zone { struct eth_pstorm_per_queue_stat eth_queue_stat; struct regpair reserved[2]; }; +/* Pstorm VF zone */ struct pstorm_vf_zone { struct pstorm_non_trigger_vf_zone non_trigger; struct regpair reserved[7]; @@ -708,56 +780,89 @@ struct pstorm_vf_zone { /* Ramrod Header of SPQE */ struct ramrod_header { - __le32 cid /* Slowpath Connection CID */; - u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */; - u8 protocol_id /* Ramrod Protocol ID */; - __le16 echo /* Ramrod echo */; + __le32 cid; + u8 cmd_id; + u8 protocol_id; + __le16 echo; }; /* Slowpath Element (SPQE) */ struct slow_path_element { - struct ramrod_header hdr /* Ramrod Header */; - struct regpair data_ptr; + struct ramrod_header hdr; + struct regpair data_ptr; +}; + +/* Tstorm non-triggering VF zone */ +struct tstorm_non_trigger_vf_zone { + struct regpair reserved[2]; }; struct tstorm_per_port_stat { - struct regpair trunc_error_discard; - struct regpair mac_error_discard; - struct regpair mftag_filter_discard; - struct regpair eth_mac_filter_discard; - struct regpair ll2_mac_filter_discard; - struct regpair ll2_conn_disabled_discard; - struct regpair iscsi_irregular_pkt; - struct regpair fcoe_irregular_pkt; - struct regpair roce_irregular_pkt; - struct regpair eth_irregular_pkt; - struct regpair toe_irregular_pkt; - struct regpair preroce_irregular_pkt; + struct regpair trunc_error_discard; + struct regpair mac_error_discard; + struct regpair mftag_filter_discard; + struct regpair eth_mac_filter_discard; + struct regpair reserved[5]; + struct regpair eth_irregular_pkt; + struct regpair reserved1[2]; + struct regpair eth_gre_tunn_filter_discard; + struct regpair eth_vxlan_tunn_filter_discard; + struct regpair eth_geneve_tunn_filter_discard; +}; + +/* Tstorm VF zone */ +struct tstorm_vf_zone { + struct tstorm_non_trigger_vf_zone non_trigger; +}; + +/* Tunnel classification scheme */ +enum tunnel_clss { + TUNNEL_CLSS_MAC_VLAN = 0, + TUNNEL_CLSS_MAC_VNI, + TUNNEL_CLSS_INNER_MAC_VLAN, + TUNNEL_CLSS_INNER_MAC_VNI, + TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE, + MAX_TUNNEL_CLSS }; +/* Ustorm non-triggering VF zone */ struct ustorm_non_trigger_vf_zone { struct eth_ustorm_per_queue_stat eth_queue_stat; struct regpair vf_pf_msg_addr; }; +/* Ustorm triggering VF zone */ struct ustorm_trigger_vf_zone { u8 vf_pf_msg_valid; u8 reserved[7]; }; +/* Ustorm VF zone */ struct ustorm_vf_zone { struct ustorm_non_trigger_vf_zone non_trigger; struct ustorm_trigger_vf_zone trigger; }; +/* VF-PF channel data */ +struct vf_pf_channel_data { + __le32 ready; + u8 valid; + u8 reserved0; + __le16 reserved1; +}; + +/* Ramrod data for VF start ramrod */ struct vf_start_ramrod_data { u8 vf_id; u8 enable_flr_ack; __le16 opaque_fid; u8 personality; - u8 reserved[3]; + u8 reserved[7]; + struct hsi_fp_ver_struct hsi_fp_ver; + }; +/* Ramrod data for VF start ramrod */ struct vf_stop_ramrod_data { u8 vf_id; u8 reserved0; @@ -765,94 +870,474 @@ struct vf_stop_ramrod_data { __le32 reserved2; }; +/* Attentions status block */ struct atten_status_block { - __le32 atten_bits; - __le32 atten_ack; - __le16 reserved0; - __le16 sb_index /* status block running index */; - __le32 reserved1; + __le32 atten_bits; + __le32 atten_ack; + __le16 reserved0; + __le16 sb_index; + __le32 reserved1; +}; + +enum command_type_bit { + IGU_COMMAND_TYPE_NOP = 0, + IGU_COMMAND_TYPE_SET = 1, + MAX_COMMAND_TYPE_BIT +}; + +/* DMAE command */ +struct dmae_cmd { + __le32 opcode; +#define DMAE_CMD_SRC_MASK 0x1 +#define DMAE_CMD_SRC_SHIFT 0 +#define DMAE_CMD_DST_MASK 0x3 +#define DMAE_CMD_DST_SHIFT 1 +#define DMAE_CMD_C_DST_MASK 0x1 +#define DMAE_CMD_C_DST_SHIFT 3 +#define DMAE_CMD_CRC_RESET_MASK 0x1 +#define DMAE_CMD_CRC_RESET_SHIFT 4 +#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1 +#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5 +#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1 +#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6 +#define DMAE_CMD_COMP_FUNC_MASK 0x1 +#define DMAE_CMD_COMP_FUNC_SHIFT 7 +#define DMAE_CMD_COMP_WORD_EN_MASK 0x1 +#define DMAE_CMD_COMP_WORD_EN_SHIFT 8 +#define DMAE_CMD_COMP_CRC_EN_MASK 0x1 +#define DMAE_CMD_COMP_CRC_EN_SHIFT 9 +#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7 +#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10 +#define DMAE_CMD_RESERVED1_MASK 0x1 +#define DMAE_CMD_RESERVED1_SHIFT 13 +#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3 +#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14 +#define DMAE_CMD_ERR_HANDLING_MASK 0x3 +#define DMAE_CMD_ERR_HANDLING_SHIFT 16 +#define DMAE_CMD_PORT_ID_MASK 0x3 +#define DMAE_CMD_PORT_ID_SHIFT 18 +#define DMAE_CMD_SRC_PF_ID_MASK 0xF +#define DMAE_CMD_SRC_PF_ID_SHIFT 20 +#define DMAE_CMD_DST_PF_ID_MASK 0xF +#define DMAE_CMD_DST_PF_ID_SHIFT 24 +#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1 +#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28 +#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1 +#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29 +#define DMAE_CMD_RESERVED2_MASK 0x3 +#define DMAE_CMD_RESERVED2_SHIFT 30 + __le32 src_addr_lo; + __le32 src_addr_hi; + __le32 dst_addr_lo; + __le32 dst_addr_hi; + __le16 length_dw; + __le16 opcode_b; +#define DMAE_CMD_SRC_VF_ID_MASK 0xFF +#define DMAE_CMD_SRC_VF_ID_SHIFT 0 +#define DMAE_CMD_DST_VF_ID_MASK 0xFF +#define DMAE_CMD_DST_VF_ID_SHIFT 8 + __le32 comp_addr_lo; + __le32 comp_addr_hi; + __le32 comp_val; + __le32 crc32; + __le32 crc_32_c; + __le16 crc16; + __le16 crc16_c; + __le16 crc10; + __le16 reserved; + __le16 xsum16; + __le16 xsum8; +}; + +enum dmae_cmd_comp_crc_en_enum { + dmae_cmd_comp_crc_disabled, + dmae_cmd_comp_crc_enabled, + MAX_DMAE_CMD_COMP_CRC_EN_ENUM +}; + +enum dmae_cmd_comp_func_enum { + dmae_cmd_comp_func_to_src, + dmae_cmd_comp_func_to_dst, + MAX_DMAE_CMD_COMP_FUNC_ENUM +}; + +enum dmae_cmd_comp_word_en_enum { + dmae_cmd_comp_word_disabled, + dmae_cmd_comp_word_enabled, + MAX_DMAE_CMD_COMP_WORD_EN_ENUM +}; + +enum dmae_cmd_c_dst_enum { + dmae_cmd_c_dst_pcie, + dmae_cmd_c_dst_grc, + MAX_DMAE_CMD_C_DST_ENUM +}; + +enum dmae_cmd_dst_enum { + dmae_cmd_dst_none_0, + dmae_cmd_dst_pcie, + dmae_cmd_dst_grc, + dmae_cmd_dst_none_3, + MAX_DMAE_CMD_DST_ENUM +}; + +enum dmae_cmd_error_handling_enum { + dmae_cmd_error_handling_send_regular_comp, + dmae_cmd_error_handling_send_comp_with_err, + dmae_cmd_error_handling_dont_send_comp, + MAX_DMAE_CMD_ERROR_HANDLING_ENUM +}; + +enum dmae_cmd_src_enum { + dmae_cmd_src_pcie, + dmae_cmd_src_grc, + MAX_DMAE_CMD_SRC_ENUM +}; + +/* IGU cleanup command */ +struct igu_cleanup { + __le32 sb_id_and_flags; +#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF +#define IGU_CLEANUP_RESERVED0_SHIFT 0 +#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1 +#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27 +#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7 +#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28 +#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1 +#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31 + __le32 reserved1; +}; + +/* IGU firmware driver command */ +union igu_command { + struct igu_prod_cons_update prod_cons_update; + struct igu_cleanup cleanup; +}; + +/* IGU firmware driver command */ +struct igu_command_reg_ctrl { + __le16 opaque_fid; + __le16 igu_command_reg_ctrl_fields; +#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF +#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0 +#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7 +#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12 +#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1 +#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15 +}; + +/* IGU mapping line structure */ +struct igu_mapping_line { + __le32 igu_mapping_line_fields; +#define IGU_MAPPING_LINE_VALID_MASK 0x1 +#define IGU_MAPPING_LINE_VALID_SHIFT 0 +#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF +#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1 +#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF +#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9 +#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 +#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17 +#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F +#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18 +#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF +#define IGU_MAPPING_LINE_RESERVED_SHIFT 24 +}; + +/* IGU MSIX line structure */ +struct igu_msix_vector { + struct regpair address; + __le32 data; + __le32 msix_vector_fields; +#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1 +#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0 +#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF +#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1 +#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF +#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16 +#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF +#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24 +}; + +struct mstorm_core_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +/* per encapsulation type enabling flags */ +struct prs_reg_encapsulation_type_en { + u8 flags; +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2 +#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3 +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5 +#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3 +#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6 +}; + +enum pxp_tph_st_hint { + TPH_ST_HINT_BIDIR, + TPH_ST_HINT_REQUESTER, + TPH_ST_HINT_TARGET, + TPH_ST_HINT_TARGET_PRIO, + MAX_PXP_TPH_ST_HINT +}; + +/* QM hardware structure of enable bypass credit mask */ +struct qm_rf_bypass_mask { + u8 flags; +#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0 +#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1 +#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1 +#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2 +#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3 +#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1 +#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4 +#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1 +#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5 +#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1 +#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6 +#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1 +#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7 +}; + +/* QM hardware structure of opportunistic credit mask */ +struct qm_rf_opportunistic_mask { + __le16 flags; +#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0 +#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1 +#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2 +#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3 +#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4 +#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5 +#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7 +#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9 +}; + +/* QM hardware structure of QM map memory */ +struct qm_rf_pq_map { + __le32 reg; +#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0 +#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF +#define QM_RF_PQ_MAP_RL_ID_SHIFT 1 +#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF +#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9 +#define QM_RF_PQ_MAP_VOQ_MASK 0x1F +#define QM_RF_PQ_MAP_VOQ_SHIFT 18 +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23 +#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25 +#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F +#define QM_RF_PQ_MAP_RESERVED_SHIFT 26 +}; + +/* Completion params for aggregated interrupt completion */ +struct sdm_agg_int_comp_params { + __le16 params; +#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F +#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7 }; +/* SDM operation gen command (generate aggregative interrupt) */ +struct sdm_op_gen { + __le32 command; +#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF +#define SDM_OP_GEN_COMP_PARAM_SHIFT 0 +#define SDM_OP_GEN_COMP_TYPE_MASK 0xF +#define SDM_OP_GEN_COMP_TYPE_SHIFT 16 +#define SDM_OP_GEN_RESERVED_MASK 0xFFF +#define SDM_OP_GEN_RESERVED_SHIFT 20 +}; + +struct ystorm_core_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +/****************************************/ +/* Debug Tools HSI constants and macros */ +/****************************************/ + enum block_addr { - GRCBASE_GRC = 0x50000, - GRCBASE_MISCS = 0x9000, - GRCBASE_MISC = 0x8000, - GRCBASE_DBU = 0xa000, - GRCBASE_PGLUE_B = 0x2a8000, - GRCBASE_CNIG = 0x218000, - GRCBASE_CPMU = 0x30000, - GRCBASE_NCSI = 0x40000, - GRCBASE_OPTE = 0x53000, - GRCBASE_BMB = 0x540000, - GRCBASE_PCIE = 0x54000, - GRCBASE_MCP = 0xe00000, - GRCBASE_MCP2 = 0x52000, - GRCBASE_PSWHST = 0x2a0000, - GRCBASE_PSWHST2 = 0x29e000, - GRCBASE_PSWRD = 0x29c000, - GRCBASE_PSWRD2 = 0x29d000, - GRCBASE_PSWWR = 0x29a000, - GRCBASE_PSWWR2 = 0x29b000, - GRCBASE_PSWRQ = 0x280000, - GRCBASE_PSWRQ2 = 0x240000, - GRCBASE_PGLCS = 0x0, - GRCBASE_PTU = 0x560000, - GRCBASE_DMAE = 0xc000, - GRCBASE_TCM = 0x1180000, - GRCBASE_MCM = 0x1200000, - GRCBASE_UCM = 0x1280000, - GRCBASE_XCM = 0x1000000, - GRCBASE_YCM = 0x1080000, - GRCBASE_PCM = 0x1100000, - GRCBASE_QM = 0x2f0000, - GRCBASE_TM = 0x2c0000, - GRCBASE_DORQ = 0x100000, - GRCBASE_BRB = 0x340000, - GRCBASE_SRC = 0x238000, - GRCBASE_PRS = 0x1f0000, - GRCBASE_TSDM = 0xfb0000, - GRCBASE_MSDM = 0xfc0000, - GRCBASE_USDM = 0xfd0000, - GRCBASE_XSDM = 0xf80000, - GRCBASE_YSDM = 0xf90000, - GRCBASE_PSDM = 0xfa0000, - GRCBASE_TSEM = 0x1700000, - GRCBASE_MSEM = 0x1800000, - GRCBASE_USEM = 0x1900000, - GRCBASE_XSEM = 0x1400000, - GRCBASE_YSEM = 0x1500000, - GRCBASE_PSEM = 0x1600000, - GRCBASE_RSS = 0x238800, - GRCBASE_TMLD = 0x4d0000, - GRCBASE_MULD = 0x4e0000, - GRCBASE_YULD = 0x4c8000, - GRCBASE_XYLD = 0x4c0000, - GRCBASE_PRM = 0x230000, - GRCBASE_PBF_PB1 = 0xda0000, - GRCBASE_PBF_PB2 = 0xda4000, - GRCBASE_RPB = 0x23c000, - GRCBASE_BTB = 0xdb0000, - GRCBASE_PBF = 0xd80000, - GRCBASE_RDIF = 0x300000, - GRCBASE_TDIF = 0x310000, - GRCBASE_CDU = 0x580000, - GRCBASE_CCFC = 0x2e0000, - GRCBASE_TCFC = 0x2d0000, - GRCBASE_IGU = 0x180000, - GRCBASE_CAU = 0x1c0000, - GRCBASE_UMAC = 0x51000, - GRCBASE_XMAC = 0x210000, - GRCBASE_DBG = 0x10000, - GRCBASE_NIG = 0x500000, - GRCBASE_WOL = 0x600000, - GRCBASE_BMBN = 0x610000, - GRCBASE_IPC = 0x20000, - GRCBASE_NWM = 0x800000, - GRCBASE_NWS = 0x700000, - GRCBASE_MS = 0x6a0000, - GRCBASE_PHY_PCIE = 0x620000, - GRCBASE_MISC_AEU = 0x8000, - GRCBASE_BAR0_MAP = 0x1c00000, + GRCBASE_GRC = 0x50000, + GRCBASE_MISCS = 0x9000, + GRCBASE_MISC = 0x8000, + GRCBASE_DBU = 0xa000, + GRCBASE_PGLUE_B = 0x2a8000, + GRCBASE_CNIG = 0x218000, + GRCBASE_CPMU = 0x30000, + GRCBASE_NCSI = 0x40000, + GRCBASE_OPTE = 0x53000, + GRCBASE_BMB = 0x540000, + GRCBASE_PCIE = 0x54000, + GRCBASE_MCP = 0xe00000, + GRCBASE_MCP2 = 0x52000, + GRCBASE_PSWHST = 0x2a0000, + GRCBASE_PSWHST2 = 0x29e000, + GRCBASE_PSWRD = 0x29c000, + GRCBASE_PSWRD2 = 0x29d000, + GRCBASE_PSWWR = 0x29a000, + GRCBASE_PSWWR2 = 0x29b000, + GRCBASE_PSWRQ = 0x280000, + GRCBASE_PSWRQ2 = 0x240000, + GRCBASE_PGLCS = 0x0, + GRCBASE_DMAE = 0xc000, + GRCBASE_PTU = 0x560000, + GRCBASE_TCM = 0x1180000, + GRCBASE_MCM = 0x1200000, + GRCBASE_UCM = 0x1280000, + GRCBASE_XCM = 0x1000000, + GRCBASE_YCM = 0x1080000, + GRCBASE_PCM = 0x1100000, + GRCBASE_QM = 0x2f0000, + GRCBASE_TM = 0x2c0000, + GRCBASE_DORQ = 0x100000, + GRCBASE_BRB = 0x340000, + GRCBASE_SRC = 0x238000, + GRCBASE_PRS = 0x1f0000, + GRCBASE_TSDM = 0xfb0000, + GRCBASE_MSDM = 0xfc0000, + GRCBASE_USDM = 0xfd0000, + GRCBASE_XSDM = 0xf80000, + GRCBASE_YSDM = 0xf90000, + GRCBASE_PSDM = 0xfa0000, + GRCBASE_TSEM = 0x1700000, + GRCBASE_MSEM = 0x1800000, + GRCBASE_USEM = 0x1900000, + GRCBASE_XSEM = 0x1400000, + GRCBASE_YSEM = 0x1500000, + GRCBASE_PSEM = 0x1600000, + GRCBASE_RSS = 0x238800, + GRCBASE_TMLD = 0x4d0000, + GRCBASE_MULD = 0x4e0000, + GRCBASE_YULD = 0x4c8000, + GRCBASE_XYLD = 0x4c0000, + GRCBASE_PRM = 0x230000, + GRCBASE_PBF_PB1 = 0xda0000, + GRCBASE_PBF_PB2 = 0xda4000, + GRCBASE_RPB = 0x23c000, + GRCBASE_BTB = 0xdb0000, + GRCBASE_PBF = 0xd80000, + GRCBASE_RDIF = 0x300000, + GRCBASE_TDIF = 0x310000, + GRCBASE_CDU = 0x580000, + GRCBASE_CCFC = 0x2e0000, + GRCBASE_TCFC = 0x2d0000, + GRCBASE_IGU = 0x180000, + GRCBASE_CAU = 0x1c0000, + GRCBASE_UMAC = 0x51000, + GRCBASE_XMAC = 0x210000, + GRCBASE_DBG = 0x10000, + GRCBASE_NIG = 0x500000, + GRCBASE_WOL = 0x600000, + GRCBASE_BMBN = 0x610000, + GRCBASE_IPC = 0x20000, + GRCBASE_NWM = 0x800000, + GRCBASE_NWS = 0x700000, + GRCBASE_MS = 0x6a0000, + GRCBASE_PHY_PCIE = 0x620000, + GRCBASE_LED = 0x6b8000, + GRCBASE_MISC_AEU = 0x8000, + GRCBASE_BAR0_MAP = 0x1c00000, MAX_BLOCK_ADDR }; @@ -879,8 +1364,8 @@ enum block_id { BLOCK_PSWRQ, BLOCK_PSWRQ2, BLOCK_PGLCS, - BLOCK_PTU, BLOCK_DMAE, + BLOCK_PTU, BLOCK_TCM, BLOCK_MCM, BLOCK_UCM, @@ -934,141 +1419,216 @@ enum block_id { BLOCK_NWS, BLOCK_MS, BLOCK_PHY_PCIE, + BLOCK_LED, BLOCK_MISC_AEU, BLOCK_BAR0_MAP, MAX_BLOCK_ID }; -enum command_type_bit { - IGU_COMMAND_TYPE_NOP = 0, - IGU_COMMAND_TYPE_SET = 1, - MAX_COMMAND_TYPE_BIT +/* binary debug buffer types */ +enum bin_dbg_buffer_type { + BIN_BUF_DBG_MODE_TREE, + BIN_BUF_DBG_DUMP_REG, + BIN_BUF_DBG_DUMP_MEM, + BIN_BUF_DBG_IDLE_CHK_REGS, + BIN_BUF_DBG_IDLE_CHK_IMMS, + BIN_BUF_DBG_IDLE_CHK_RULES, + BIN_BUF_DBG_IDLE_CHK_PARSING_DATA, + BIN_BUF_DBG_ATTN_BLOCKS, + BIN_BUF_DBG_ATTN_REGS, + BIN_BUF_DBG_ATTN_INDEXES, + BIN_BUF_DBG_ATTN_NAME_OFFSETS, + BIN_BUF_DBG_PARSING_STRINGS, + MAX_BIN_DBG_BUFFER_TYPE }; -struct dmae_cmd { - __le32 opcode; -#define DMAE_CMD_SRC_MASK 0x1 -#define DMAE_CMD_SRC_SHIFT 0 -#define DMAE_CMD_DST_MASK 0x3 -#define DMAE_CMD_DST_SHIFT 1 -#define DMAE_CMD_C_DST_MASK 0x1 -#define DMAE_CMD_C_DST_SHIFT 3 -#define DMAE_CMD_CRC_RESET_MASK 0x1 -#define DMAE_CMD_CRC_RESET_SHIFT 4 -#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1 -#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5 -#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1 -#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6 -#define DMAE_CMD_COMP_FUNC_MASK 0x1 -#define DMAE_CMD_COMP_FUNC_SHIFT 7 -#define DMAE_CMD_COMP_WORD_EN_MASK 0x1 -#define DMAE_CMD_COMP_WORD_EN_SHIFT 8 -#define DMAE_CMD_COMP_CRC_EN_MASK 0x1 -#define DMAE_CMD_COMP_CRC_EN_SHIFT 9 -#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7 -#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10 -#define DMAE_CMD_RESERVED1_MASK 0x1 -#define DMAE_CMD_RESERVED1_SHIFT 13 -#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3 -#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14 -#define DMAE_CMD_ERR_HANDLING_MASK 0x3 -#define DMAE_CMD_ERR_HANDLING_SHIFT 16 -#define DMAE_CMD_PORT_ID_MASK 0x3 -#define DMAE_CMD_PORT_ID_SHIFT 18 -#define DMAE_CMD_SRC_PF_ID_MASK 0xF -#define DMAE_CMD_SRC_PF_ID_SHIFT 20 -#define DMAE_CMD_DST_PF_ID_MASK 0xF -#define DMAE_CMD_DST_PF_ID_SHIFT 24 -#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1 -#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28 -#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1 -#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29 -#define DMAE_CMD_RESERVED2_MASK 0x3 -#define DMAE_CMD_RESERVED2_SHIFT 30 - __le32 src_addr_lo; - __le32 src_addr_hi; - __le32 dst_addr_lo; - __le32 dst_addr_hi; - __le16 length /* Length in DW */; - __le16 opcode_b; -#define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */ -#define DMAE_CMD_SRC_VF_ID_SHIFT 0 -#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */ -#define DMAE_CMD_DST_VF_ID_SHIFT 8 - __le32 comp_addr_lo /* PCIe completion address low or grc address */; - __le32 comp_addr_hi; - __le32 comp_val /* Value to write to copmletion address */; - __le32 crc32 /* crc16 result */; - __le32 crc_32_c /* crc32_c result */; - __le16 crc16 /* crc16 result */; - __le16 crc16_c /* crc16_c result */; - __le16 crc10 /* crc_t10 result */; - __le16 reserved; - __le16 xsum16 /* checksum16 result */; - __le16 xsum8 /* checksum8 result */; +/* Chip IDs */ +enum chip_ids { + CHIP_RESERVED, + CHIP_BB_B0, + CHIP_RESERVED2, + MAX_CHIP_IDS }; -struct igu_cleanup { - __le32 sb_id_and_flags; -#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF -#define IGU_CLEANUP_RESERVED0_SHIFT 0 -#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1 /* cleanup clear - 0, set - 1 */ -#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27 -#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7 -#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28 -#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1 -#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31 - __le32 reserved1; +/* Attention bit mapping */ +struct dbg_attn_bit_mapping { + __le16 data; +#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF +#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0 +#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1 +#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15 }; -union igu_command { - struct igu_prod_cons_update prod_cons_update; - struct igu_cleanup cleanup; +/* Attention block per-type data */ +struct dbg_attn_block_type_data { + __le16 names_offset; + __le16 reserved1; + u8 num_regs; + u8 reserved2; + __le16 regs_offset; }; -struct igu_command_reg_ctrl { - __le16 opaque_fid; - __le16 igu_command_reg_ctrl_fields; -#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF -#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0 -#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7 -#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12 -#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1 -#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15 +/* Block attentions */ +struct dbg_attn_block { + struct dbg_attn_block_type_data per_type_data[2]; }; -struct igu_mapping_line { - __le32 igu_mapping_line_fields; -#define IGU_MAPPING_LINE_VALID_MASK 0x1 -#define IGU_MAPPING_LINE_VALID_SHIFT 0 -#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF -#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1 -#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF -#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9 -#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */ -#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17 -#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F -#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18 -#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF -#define IGU_MAPPING_LINE_RESERVED_SHIFT 24 +/* Attention register result */ +struct dbg_attn_reg_result { + __le32 data; +#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF +#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0 +#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK 0xFF +#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24 + __le16 attn_idx_offset; + __le16 reserved; + __le32 sts_val; + __le32 mask_val; +}; + +/* Attention block result */ +struct dbg_attn_block_result { + u8 block_id; + u8 data; +#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3 +#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0 +#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F +#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2 + __le16 names_offset; + struct dbg_attn_reg_result reg_results[15]; +}; + +/* mode header */ +struct dbg_mode_hdr { + __le16 data; +#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 +#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0 +#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF +#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1 +}; + +/* Attention register */ +struct dbg_attn_reg { + struct dbg_mode_hdr mode; + __le16 attn_idx_offset; + __le32 data; +#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF +#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0 +#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF +#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT 24 + __le32 sts_clr_address; + __le32 mask_address; +}; + +/* attention types */ +enum dbg_attn_type { + ATTN_TYPE_INTERRUPT, + ATTN_TYPE_PARITY, + MAX_DBG_ATTN_TYPE +}; + +/* Debug status codes */ +enum dbg_status { + DBG_STATUS_OK, + DBG_STATUS_APP_VERSION_NOT_SET, + DBG_STATUS_UNSUPPORTED_APP_VERSION, + DBG_STATUS_DBG_BLOCK_NOT_RESET, + DBG_STATUS_INVALID_ARGS, + DBG_STATUS_OUTPUT_ALREADY_SET, + DBG_STATUS_INVALID_PCI_BUF_SIZE, + DBG_STATUS_PCI_BUF_ALLOC_FAILED, + DBG_STATUS_PCI_BUF_NOT_ALLOCATED, + DBG_STATUS_TOO_MANY_INPUTS, + DBG_STATUS_INPUT_OVERLAP, + DBG_STATUS_HW_ONLY_RECORDING, + DBG_STATUS_STORM_ALREADY_ENABLED, + DBG_STATUS_STORM_NOT_ENABLED, + DBG_STATUS_BLOCK_ALREADY_ENABLED, + DBG_STATUS_BLOCK_NOT_ENABLED, + DBG_STATUS_NO_INPUT_ENABLED, + DBG_STATUS_NO_FILTER_TRIGGER_64B, + DBG_STATUS_FILTER_ALREADY_ENABLED, + DBG_STATUS_TRIGGER_ALREADY_ENABLED, + DBG_STATUS_TRIGGER_NOT_ENABLED, + DBG_STATUS_CANT_ADD_CONSTRAINT, + DBG_STATUS_TOO_MANY_TRIGGER_STATES, + DBG_STATUS_TOO_MANY_CONSTRAINTS, + DBG_STATUS_RECORDING_NOT_STARTED, + DBG_STATUS_DATA_DIDNT_TRIGGER, + DBG_STATUS_NO_DATA_RECORDED, + DBG_STATUS_DUMP_BUF_TOO_SMALL, + DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED, + DBG_STATUS_UNKNOWN_CHIP, + DBG_STATUS_VIRT_MEM_ALLOC_FAILED, + DBG_STATUS_BLOCK_IN_RESET, + DBG_STATUS_INVALID_TRACE_SIGNATURE, + DBG_STATUS_INVALID_NVRAM_BUNDLE, + DBG_STATUS_NVRAM_GET_IMAGE_FAILED, + DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE, + DBG_STATUS_NVRAM_READ_FAILED, + DBG_STATUS_IDLE_CHK_PARSE_FAILED, + DBG_STATUS_MCP_TRACE_BAD_DATA, + DBG_STATUS_MCP_TRACE_NO_META, + DBG_STATUS_MCP_COULD_NOT_HALT, + DBG_STATUS_MCP_COULD_NOT_RESUME, + DBG_STATUS_DMAE_FAILED, + DBG_STATUS_SEMI_FIFO_NOT_EMPTY, + DBG_STATUS_IGU_FIFO_BAD_DATA, + DBG_STATUS_MCP_COULD_NOT_MASK_PRTY, + DBG_STATUS_FW_ASSERTS_PARSE_FAILED, + DBG_STATUS_REG_FIFO_BAD_DATA, + DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA, + DBG_STATUS_DBG_ARRAY_NOT_SET, + MAX_DBG_STATUS }; -struct igu_msix_vector { - struct regpair address; - __le32 data; - __le32 msix_vector_fields; -#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1 -#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0 -#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF -#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1 -#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF -#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16 -#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF -#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24 +/********************************/ +/* HSI Init Functions constants */ +/********************************/ + +/* Number of VLAN priorities */ +#define NUM_OF_VLAN_PRIORITIES 8 + +/* QM per-port init parameters */ +struct init_qm_port_params { + u8 active; + u8 active_phys_tcs; + __le16 num_pbf_cmd_lines; + __le16 num_btb_blocks; + __le16 reserved; +}; + +/* QM per-PQ init parameters */ +struct init_qm_pq_params { + u8 vport_id; + u8 tc_id; + u8 wrr_group; + u8 rl_valid; +}; + +/* QM per-vport init parameters */ +struct init_qm_vport_params { + __le32 vport_rl; + __le16 vport_wfq; + __le16 first_tx_pq_id[NUM_OF_TCS]; }; +/**************************************/ +/* Init Tool HSI constants and macros */ +/**************************************/ + +/* Width of GRC address in bits (addresses are specified in dwords) */ +#define GRC_ADDR_BITS 23 +#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1) + +/* indicates an init that should be applied to any phase ID */ +#define ANY_PHASE_ID 0xffff + +/* Max size in dwords of a zipped array */ +#define MAX_ZIPPED_SIZE 8192 + enum init_modes { - MODE_BB_A0, + MODE_RESERVED, MODE_BB_B0, MODE_RESERVED2, MODE_ASIC, @@ -1083,7 +1643,8 @@ enum init_modes { MODE_PORTS_PER_ENG_2, MODE_PORTS_PER_ENG_4, MODE_100G, - MODE_EAGLE_ENG1_WORKAROUND, + MODE_40G, + MODE_RESERVED7, MAX_INIT_MODES }; @@ -1096,484 +1657,302 @@ enum init_phases { MAX_INIT_PHASES }; -/* per encapsulation type enabling flags */ -struct prs_reg_encapsulation_type_en { - u8 flags; -#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1 -#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0 -#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1 -#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1 -#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1 -#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2 -#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1 -#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3 -#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1 -#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4 -#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1 -#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5 -#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3 -#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6 -}; - -enum pxp_tph_st_hint { - TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */, - TPH_ST_HINT_REQUESTER /* Read/Write access by Device */, - TPH_ST_HINT_TARGET, - TPH_ST_HINT_TARGET_PRIO, - MAX_PXP_TPH_ST_HINT -}; - -/* QM hardware structure of enable bypass credit mask */ -struct qm_rf_bypass_mask { - u8 flags; -#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1 -#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0 -#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1 -#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1 -#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1 -#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2 -#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1 -#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3 -#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1 -#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4 -#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1 -#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5 -#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1 -#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6 -#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1 -#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7 -}; - -/* QM hardware structure of opportunistic credit mask */ -struct qm_rf_opportunistic_mask { - __le16 flags; -#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0 -#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1 -#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2 -#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3 -#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4 -#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5 -#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6 -#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7 -#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8 -#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F -#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9 -}; - -/* QM hardware structure of QM map memory */ -struct qm_rf_pq_map { - u32 reg; -#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */ -#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0 -#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */ -#define QM_RF_PQ_MAP_RL_ID_SHIFT 1 -#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF -#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9 -#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */ -#define QM_RF_PQ_MAP_VOQ_SHIFT 18 -#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */ -#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23 -#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */ -#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25 -#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F -#define QM_RF_PQ_MAP_RESERVED_SHIFT 26 -}; - -/* Completion params for aggregated interrupt completion */ -struct sdm_agg_int_comp_params { - __le16 params; -#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F -#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0 -#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1 -#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6 -#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF -#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7 -}; - -/* SDM operation gen command (generate aggregative interrupt) */ -struct sdm_op_gen { - __le32 command; -#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF /* completion parameters 0-15 */ -#define SDM_OP_GEN_COMP_PARAM_SHIFT 0 -#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */ -#define SDM_OP_GEN_COMP_TYPE_SHIFT 16 -#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */ -#define SDM_OP_GEN_RESERVED_SHIFT 20 +enum init_split_types { + SPLIT_TYPE_NONE, + SPLIT_TYPE_PORT, + SPLIT_TYPE_PF, + SPLIT_TYPE_PORT_PF, + SPLIT_TYPE_VF, + MAX_INIT_SPLIT_TYPES }; -/*********************************** Init ************************************/ - -/* Width of GRC address in bits (addresses are specified in dwords) */ -#define GRC_ADDR_BITS 23 -#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1) - -/* indicates an init that should be applied to any phase ID */ -#define ANY_PHASE_ID 0xffff - -/* init pattern size in bytes */ -#define INIT_PATTERN_SIZE_BITS 4 -#define MAX_INIT_PATTERN_SIZE BIT(INIT_PATTERN_SIZE_BITS) - -/* Max size in dwords of a zipped array */ -#define MAX_ZIPPED_SIZE 8192 - -/* Global PXP window */ -#define NUM_OF_PXP_WIN 19 -#define PXP_WIN_DWORD_SIZE_BITS 10 -#define PXP_WIN_DWORD_SIZE BIT(PXP_WIN_DWORD_SIZE_BITS) -#define PXP_WIN_BYTE_SIZE_BITS (PXP_WIN_DWORD_SIZE_BITS + 2) -#define PXP_WIN_BYTE_SIZE (PXP_WIN_DWORD_SIZE * 4) - -/********************************* GRC Dump **********************************/ - -/* width of GRC dump register sequence length in bits */ -#define DUMP_SEQ_LEN_BITS 8 -#define DUMP_SEQ_LEN_MAX_VAL ((1 << DUMP_SEQ_LEN_BITS) - 1) - -/* width of GRC dump memory length in bits */ -#define DUMP_MEM_LEN_BITS 18 -#define DUMP_MEM_LEN_MAX_VAL ((1 << DUMP_MEM_LEN_BITS) - 1) - -/* width of register type ID in bits */ -#define REG_TYPE_ID_BITS 6 -#define REG_TYPE_ID_MAX_VAL ((1 << REG_TYPE_ID_BITS) - 1) - -/* width of block ID in bits */ -#define BLOCK_ID_BITS 8 -#define BLOCK_ID_MAX_VAL ((1 << BLOCK_ID_BITS) - 1) - -/******************************** Idle Check *********************************/ - -/* max number of idle check predicate immediates */ -#define MAX_IDLE_CHK_PRED_IMM 3 - -/* max number of idle check argument registers */ -#define MAX_IDLE_CHK_READ_REGS 3 - -/* max number of idle check loops */ -#define MAX_IDLE_CHK_LOOPS 0x10000 - -/* max idle check address increment */ -#define MAX_IDLE_CHK_INCREMENT 0x10000 - -/* inicates an undefined idle check line index */ -#define IDLE_CHK_UNDEFINED_LINE_IDX 0xffffff - -/* max number of register values following the idle check header */ -#define IDLE_CHK_MAX_DUMP_REGS 2 - -/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */ -#define IDLE_CHK_QM_RD_WR_PTR 0 -#define IDLE_CHK_QM_RD_WR_BANK 1 - -/**************************************/ -/* HSI Functions constants and macros */ -/**************************************/ - -/* Number of VLAN priorities */ -#define NUM_OF_VLAN_PRIORITIES 8 - -/* the MCP Trace meta data signautre is duplicated in the perl script that - * generats the NVRAM images. - */ -#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa - /* Binary buffer header */ struct bin_buffer_hdr { - u32 offset; - u32 length /* buffer length in bytes */; -}; - -/* binary buffer types */ -enum bin_buffer_type { - BIN_BUF_FW_VER_INFO /* fw_ver_info struct */, - BIN_BUF_INIT_CMD /* init commands */, - BIN_BUF_INIT_VAL /* init data */, - BIN_BUF_INIT_MODE_TREE /* init modes tree */, - BIN_BUF_IRO /* internal RAM offsets array */, - MAX_BIN_BUFFER_TYPE + __le32 offset; + __le32 length; }; -/* Chip IDs */ -enum chip_ids { - CHIP_BB_A0 /* BB A0 chip ID */, - CHIP_BB_B0 /* BB B0 chip ID */, - CHIP_K2 /* AH chip ID */, - MAX_CHIP_IDS +/* binary init buffer types */ +enum bin_init_buffer_type { + BIN_BUF_FW_VER_INFO, + BIN_BUF_INIT_CMD, + BIN_BUF_INIT_VAL, + BIN_BUF_INIT_MODE_TREE, + BIN_BUF_IRO, + MAX_BIN_INIT_BUFFER_TYPE }; +/* init array header: raw */ struct init_array_raw_hdr { __le32 data; -#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF -#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0 -#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF /* init array params */ -#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4 +#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF +#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4 }; +/* init array header: standard */ struct init_array_standard_hdr { __le32 data; -#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF -#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0 -#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF -#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4 +#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF +#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4 }; +/* init array header: zipped */ struct init_array_zipped_hdr { __le32 data; -#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF -#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0 -#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF -#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4 +#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF +#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4 }; +/* init array header: pattern */ struct init_array_pattern_hdr { __le32 data; -#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF -#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0 -#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF -#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4 -#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF -#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8 +#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF +#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4 +#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF +#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8 }; +/* init array header union */ union init_array_hdr { - struct init_array_raw_hdr raw /* raw init array header */; - struct init_array_standard_hdr standard; - struct init_array_zipped_hdr zipped /* zipped init array header */; - struct init_array_pattern_hdr pattern /* pattern init array header */; + struct init_array_raw_hdr raw; + struct init_array_standard_hdr standard; + struct init_array_zipped_hdr zipped; + struct init_array_pattern_hdr pattern; }; +/* init array types */ enum init_array_types { - INIT_ARR_STANDARD /* standard init array */, - INIT_ARR_ZIPPED /* zipped init array */, - INIT_ARR_PATTERN /* a repeated pattern */, + INIT_ARR_STANDARD, + INIT_ARR_ZIPPED, + INIT_ARR_PATTERN, MAX_INIT_ARRAY_TYPES }; /* init operation: callback */ struct init_callback_op { - __le32 op_data; -#define INIT_CALLBACK_OP_OP_MASK 0xF -#define INIT_CALLBACK_OP_OP_SHIFT 0 -#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF -#define INIT_CALLBACK_OP_RESERVED_SHIFT 4 - __le16 callback_id /* Callback ID */; - __le16 block_id /* Blocks ID */; + __le32 op_data; +#define INIT_CALLBACK_OP_OP_MASK 0xF +#define INIT_CALLBACK_OP_OP_SHIFT 0 +#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF +#define INIT_CALLBACK_OP_RESERVED_SHIFT 4 + __le16 callback_id; + __le16 block_id; }; /* init operation: delay */ struct init_delay_op { - __le32 op_data; -#define INIT_DELAY_OP_OP_MASK 0xF -#define INIT_DELAY_OP_OP_SHIFT 0 -#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF -#define INIT_DELAY_OP_RESERVED_SHIFT 4 - __le32 delay /* delay in us */; + __le32 op_data; +#define INIT_DELAY_OP_OP_MASK 0xF +#define INIT_DELAY_OP_OP_SHIFT 0 +#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF +#define INIT_DELAY_OP_RESERVED_SHIFT 4 + __le32 delay; }; /* init operation: if_mode */ struct init_if_mode_op { __le32 op_data; -#define INIT_IF_MODE_OP_OP_MASK 0xF -#define INIT_IF_MODE_OP_OP_SHIFT 0 -#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF -#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4 -#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF -#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16 - __le16 reserved2; - __le16 modes_buf_offset; +#define INIT_IF_MODE_OP_OP_MASK 0xF +#define INIT_IF_MODE_OP_OP_SHIFT 0 +#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF +#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4 +#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF +#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16 + __le16 reserved2; + __le16 modes_buf_offset; }; -/* init operation: if_phase */ +/* init operation: if_phase */ struct init_if_phase_op { __le32 op_data; -#define INIT_IF_PHASE_OP_OP_MASK 0xF -#define INIT_IF_PHASE_OP_OP_SHIFT 0 -#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1 -#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4 -#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF -#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5 -#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF -#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 +#define INIT_IF_PHASE_OP_OP_MASK 0xF +#define INIT_IF_PHASE_OP_OP_SHIFT 0 +#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1 +#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4 +#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF +#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5 +#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF +#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 __le32 phase_data; -#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */ -#define INIT_IF_PHASE_OP_PHASE_SHIFT 0 -#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF -#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8 -#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF /* Init phase ID */ -#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16 +#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF +#define INIT_IF_PHASE_OP_PHASE_SHIFT 0 +#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF +#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8 +#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF +#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16 }; /* init mode operators */ enum init_mode_ops { - INIT_MODE_OP_NOT /* init mode not operator */, - INIT_MODE_OP_OR /* init mode or operator */, - INIT_MODE_OP_AND /* init mode and operator */, + INIT_MODE_OP_NOT, + INIT_MODE_OP_OR, + INIT_MODE_OP_AND, MAX_INIT_MODE_OPS }; /* init operation: raw */ struct init_raw_op { - __le32 op_data; -#define INIT_RAW_OP_OP_MASK 0xF -#define INIT_RAW_OP_OP_SHIFT 0 -#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */ -#define INIT_RAW_OP_PARAM1_SHIFT 4 - __le32 param2 /* Init param 2 */; + __le32 op_data; +#define INIT_RAW_OP_OP_MASK 0xF +#define INIT_RAW_OP_OP_SHIFT 0 +#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF +#define INIT_RAW_OP_PARAM1_SHIFT 4 + __le32 param2; }; /* init array params */ struct init_op_array_params { - __le16 size /* array size in dwords */; - __le16 offset /* array start offset in dwords */; + __le16 size; + __le16 offset; }; /* Write init operation arguments */ union init_write_args { - __le32 inline_val; - __le32 zeros_count; - __le32 array_offset; - struct init_op_array_params runtime; + __le32 inline_val; + __le32 zeros_count; + __le32 array_offset; + struct init_op_array_params runtime; }; /* init operation: write */ struct init_write_op { __le32 data; -#define INIT_WRITE_OP_OP_MASK 0xF -#define INIT_WRITE_OP_OP_SHIFT 0 -#define INIT_WRITE_OP_SOURCE_MASK 0x7 -#define INIT_WRITE_OP_SOURCE_SHIFT 4 -#define INIT_WRITE_OP_RESERVED_MASK 0x1 -#define INIT_WRITE_OP_RESERVED_SHIFT 7 -#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1 -#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8 -#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF -#define INIT_WRITE_OP_ADDRESS_SHIFT 9 - union init_write_args args /* Write init operation arguments */; +#define INIT_WRITE_OP_OP_MASK 0xF +#define INIT_WRITE_OP_OP_SHIFT 0 +#define INIT_WRITE_OP_SOURCE_MASK 0x7 +#define INIT_WRITE_OP_SOURCE_SHIFT 4 +#define INIT_WRITE_OP_RESERVED_MASK 0x1 +#define INIT_WRITE_OP_RESERVED_SHIFT 7 +#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1 +#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8 +#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF +#define INIT_WRITE_OP_ADDRESS_SHIFT 9 + union init_write_args args; }; /* init operation: read */ struct init_read_op { __le32 op_data; -#define INIT_READ_OP_OP_MASK 0xF -#define INIT_READ_OP_OP_SHIFT 0 -#define INIT_READ_OP_POLL_TYPE_MASK 0xF -#define INIT_READ_OP_POLL_TYPE_SHIFT 4 -#define INIT_READ_OP_RESERVED_MASK 0x1 -#define INIT_READ_OP_RESERVED_SHIFT 8 -#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF -#define INIT_READ_OP_ADDRESS_SHIFT 9 +#define INIT_READ_OP_OP_MASK 0xF +#define INIT_READ_OP_OP_SHIFT 0 +#define INIT_READ_OP_POLL_TYPE_MASK 0xF +#define INIT_READ_OP_POLL_TYPE_SHIFT 4 +#define INIT_READ_OP_RESERVED_MASK 0x1 +#define INIT_READ_OP_RESERVED_SHIFT 8 +#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF +#define INIT_READ_OP_ADDRESS_SHIFT 9 __le32 expected_val; + }; /* Init operations union */ union init_op { - struct init_raw_op raw /* raw init operation */; - struct init_write_op write /* write init operation */; - struct init_read_op read /* read init operation */; - struct init_if_mode_op if_mode /* if_mode init operation */; - struct init_if_phase_op if_phase /* if_phase init operation */; - struct init_callback_op callback /* callback init operation */; - struct init_delay_op delay /* delay init operation */; + struct init_raw_op raw; + struct init_write_op write; + struct init_read_op read; + struct init_if_mode_op if_mode; + struct init_if_phase_op if_phase; + struct init_callback_op callback; + struct init_delay_op delay; }; /* Init command operation types */ enum init_op_types { - INIT_OP_READ /* GRC read init command */, - INIT_OP_WRITE /* GRC write init command */, + INIT_OP_READ, + INIT_OP_WRITE, INIT_OP_IF_MODE, INIT_OP_IF_PHASE, - INIT_OP_DELAY /* delay init command */, - INIT_OP_CALLBACK /* callback init command */, + INIT_OP_DELAY, + INIT_OP_CALLBACK, MAX_INIT_OP_TYPES }; +/* init polling types */ enum init_poll_types { - INIT_POLL_NONE /* No polling */, - INIT_POLL_EQ /* init value is included in the init command */, - INIT_POLL_OR /* init value is all zeros */, - INIT_POLL_AND /* init value is an array of values */, + INIT_POLL_NONE, + INIT_POLL_EQ, + INIT_POLL_OR, + INIT_POLL_AND, MAX_INIT_POLL_TYPES }; /* init source types */ enum init_source_types { - INIT_SRC_INLINE /* init value is included in the init command */, - INIT_SRC_ZEROS /* init value is all zeros */, - INIT_SRC_ARRAY /* init value is an array of values */, - INIT_SRC_RUNTIME /* init value is provided during runtime */, + INIT_SRC_INLINE, + INIT_SRC_ZEROS, + INIT_SRC_ARRAY, + INIT_SRC_RUNTIME, MAX_INIT_SOURCE_TYPES }; /* Internal RAM Offsets macro data */ struct iro { - u32 base /* RAM field offset */; - u16 m1 /* multiplier 1 */; - u16 m2 /* multiplier 2 */; - u16 m3 /* multiplier 3 */; - u16 size /* RAM field size */; -}; - -/* QM per-port init parameters */ -struct init_qm_port_params { - u8 active /* Indicates if this port is active */; - u8 num_active_phys_tcs; - u16 num_pbf_cmd_lines; - u16 num_btb_blocks; - __le16 reserved; + __le32 base; + __le16 m1; + __le16 m2; + __le16 m3; + __le16 size; }; -/* QM per-PQ init parameters */ -struct init_qm_pq_params { - u8 vport_id /* VPORT ID */; - u8 tc_id /* TC ID */; - u8 wrr_group /* WRR group */; - u8 reserved; -}; +/** + * @brief qed_dbg_print_attn - Prints attention registers values in the specified results struct. + * + * @param p_hwfn + * @param results - Pointer to the attention read results + * + * @return error if one of the following holds: + * - the version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, + struct dbg_attn_block_result *results); -/* QM per-vport init parameters */ -struct init_qm_vport_params { - u32 vport_rl; - u16 vport_wfq; - u16 first_tx_pq_id[NUM_OF_TCS]; -}; +#define MAX_NAME_LEN 16 /* Win 2 */ #define GTT_BAR0_MAP_REG_IGU_CMD \ 0x00f000UL + /* Win 3 */ #define GTT_BAR0_MAP_REG_TSDM_RAM \ 0x010000UL + /* Win 4 */ #define GTT_BAR0_MAP_REG_MSDM_RAM \ 0x011000UL + /* Win 5 */ #define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \ 0x012000UL + /* Win 6 */ #define GTT_BAR0_MAP_REG_USDM_RAM \ 0x013000UL + /* Win 7 */ #define GTT_BAR0_MAP_REG_USDM_RAM_1024 \ 0x014000UL + /* Win 8 */ #define GTT_BAR0_MAP_REG_USDM_RAM_2048 \ 0x015000UL + /* Win 9 */ #define GTT_BAR0_MAP_REG_XSDM_RAM \ 0x016000UL + /* Win 10 */ #define GTT_BAR0_MAP_REG_YSDM_RAM \ 0x017000UL + /* Win 11 */ #define GTT_BAR0_MAP_REG_PSDM_RAM \ 0x018000UL @@ -1584,785 +1963,718 @@ struct init_qm_vport_params { * Returns the required host memory size in 4KB units. * Must be called before all QM init HSI functions. * - * @param pf_id - physical function ID - * @param num_pf_cids - number of connections used by this PF - * @param num_vf_cids - number of connections used by VFs of this PF - * @param num_tids - number of tasks used by this PF - * @param num_pf_pqs - number of PQs used by this PF - * @param num_vf_pqs - number of PQs used by VFs of this PF + * @param pf_id - physical function ID + * @param num_pf_cids - number of connections used by this PF + * @param num_vf_cids - number of connections used by VFs of this PF + * @param num_tids - number of tasks used by this PF + * @param num_pf_pqs - number of PQs used by this PF + * @param num_vf_pqs - number of PQs used by VFs of this PF * * @return The required host memory size in 4KB units. */ -u32 qed_qm_pf_mem_size(u8 pf_id, - u32 num_pf_cids, - u32 num_vf_cids, - u32 num_tids, - u16 num_pf_pqs, - u16 num_vf_pqs); +u32 qed_qm_pf_mem_size(u8 pf_id, + u32 num_pf_cids, + u32 num_vf_cids, + u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs); struct qed_qm_common_rt_init_params { - u8 max_ports_per_engine; - u8 max_phys_tcs_per_port; - bool pf_rl_en; - bool pf_wfq_en; - bool vport_rl_en; - bool vport_wfq_en; - struct init_qm_port_params *port_params; + u8 max_ports_per_engine; + u8 max_phys_tcs_per_port; + bool pf_rl_en; + bool pf_wfq_en; + bool vport_rl_en; + bool vport_wfq_en; + struct init_qm_port_params *port_params; }; +int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, + struct qed_qm_common_rt_init_params *p_params); + +struct qed_qm_pf_rt_init_params { + u8 port_id; + u8 pf_id; + u8 max_phys_tcs_per_port; + bool is_first_pf; + u32 num_pf_cids; + u32 num_vf_cids; + u32 num_tids; + u16 start_pq; + u16 num_pf_pqs; + u16 num_vf_pqs; + u8 start_vport; + u8 num_vports; + u8 pf_wfq; + u32 pf_rl; + struct init_qm_pq_params *pq_params; + struct init_qm_vport_params *vport_params; +}; + +int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_qm_pf_rt_init_params *p_params); + /** - * @brief qed_qm_common_rt_init - Prepare QM runtime init values for the - * engine phase. + * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF * * @param p_hwfn - * @param max_ports_per_engine - max number of ports per engine in HW - * @param max_phys_tcs_per_port - max number of physical TCs per port in HW - * @param pf_rl_en - enable per-PF rate limiters - * @param pf_wfq_en - enable per-PF WFQ - * @param vport_rl_en - enable per-VPORT rate limiters - * @param vport_wfq_en - enable per-VPORT WFQ - * @param port_params - array of size MAX_NUM_PORTS with - * arameters for each port + * @param p_ptt - ptt window used for writing the registers + * @param pf_id - PF ID + * @param pf_wfq - WFQ weight. Must be non-zero. * * @return 0 on success, -1 on error. */ -int qed_qm_common_rt_init( - struct qed_hwfn *p_hwfn, - struct qed_qm_common_rt_init_params *p_params); - -struct qed_qm_pf_rt_init_params { - u8 port_id; - u8 pf_id; - u8 max_phys_tcs_per_port; - bool is_first_pf; - u32 num_pf_cids; - u32 num_vf_cids; - u32 num_tids; - u16 start_pq; - u16 num_pf_pqs; - u16 num_vf_pqs; - u8 start_vport; - u8 num_vports; - u8 pf_wfq; - u32 pf_rl; - struct init_qm_pq_params *pq_params; - struct init_qm_vport_params *vport_params; -}; - -int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_qm_pf_rt_init_params *p_params); +int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq); /** - * @brief qed_init_pf_rl Initializes the rate limit of the specified PF + * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF * * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param pf_id - PF ID - * @param pf_rl - rate limit in Mb/sec units + * @param p_ptt - ptt window used for writing the registers + * @param pf_id - PF ID + * @param pf_rl - rate limit in Mb/sec units * * @return 0 on success, -1 on error. */ -int qed_init_pf_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 pf_id, - u32 pf_rl); +int qed_init_pf_rl(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl); /** - * @brief qed_init_vport_rl Initializes the rate limit of the specified VPORT + * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT * * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param vport_id - VPORT ID - * @param vport_rl - rate limit in Mb/sec units + * @param p_ptt - ptt window used for writing the registers + * @param first_tx_pq_id- An array containing the first Tx PQ ID associated + * with the VPORT for each TC. This array is filled by + * qed_qm_pf_rt_init + * @param vport_wfq - WFQ weight. Must be non-zero. * * @return 0 on success, -1 on error. */ +int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq); -int qed_init_vport_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 vport_id, - u32 vport_rl); +/** + * @brief qed_init_vport_rl - Initializes the rate limit of the specified VPORT + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers + * @param vport_id - VPORT ID + * @param vport_rl - rate limit in Mb/sec units + * + * @return 0 on success, -1 on error. + */ +int qed_init_vport_rl(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl); /** * @brief qed_send_qm_stop_cmd Sends a stop command to the QM * * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers + * @param p_ptt * @param is_release_cmd - true for release, false for stop. - * @param is_tx_pq - true for Tx PQs, false for Other PQs. - * @param start_pq - first PQ ID to stop - * @param num_pqs - Number of PQs to stop, starting from start_pq. + * @param is_tx_pq - true for Tx PQs, false for Other PQs. + * @param start_pq - first PQ ID to stop + * @param num_pqs - Number of PQs to stop, starting from start_pq. * - * @return bool, true if successful, false if timeout occurred while waiting - * for QM command done. + * @return bool, true if successful, false if timeout occured while waiting for QM command done. */ +bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool is_release_cmd, + bool is_tx_pq, u16 start_pq, u16 num_pqs); -bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - bool is_release_cmd, - bool is_tx_pq, - u16 start_pq, - u16 num_pqs); - +/** + * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port + * + * @param p_ptt - ptt window used for writing the registers. + * @param dest_port - vxlan destination udp port. + */ void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u16 dest_port); + struct qed_ptt *p_ptt, u16 dest_port); + +/** + * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW + * + * @param p_ptt - ptt window used for writing the registers. + * @param vxlan_enable - vxlan enable flag. + */ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool vxlan_enable); + +/** + * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW + * + * @param p_ptt - ptt window used for writing the registers. + * @param eth_gre_enable - eth GRE enable enable flag. + * @param ip_gre_enable - IP GRE enable enable flag. + */ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, bool eth_gre_enable, - bool ip_gre_enable); + struct qed_ptt *p_ptt, + bool eth_gre_enable, bool ip_gre_enable); + +/** + * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port + * + * @param p_ptt - ptt window used for writing the registers. + * @param dest_port - geneve destination udp port. + */ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port); + +/** + * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW + * + * @param p_ptt - ptt window used for writing the registers. + * @param eth_geneve_enable - eth GENEVE enable enable flag. + * @param ip_geneve_enable - IP GENEVE enable enable flag. + */ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, bool eth_geneve_enable, - bool ip_geneve_enable); - -/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ -#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) -#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) -/* Tstorm port statistics */ -#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + ((port_id) * IRO[1].m1)) -#define TSTORM_PORT_STAT_SIZE (IRO[1].size) -/* Tstorm ll2 port statistics */ -#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ - (IRO[2].base + ((port_id) * IRO[2].m1)) -#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size) -/* Ustorm VF-PF Channel ready flag */ -#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ - (IRO[3].base + ((vf_id) * IRO[3].m1)) -#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) -/* Ustorm Final flr cleanup ack */ -#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) (IRO[4].base + ((pf_id) * IRO[4].m1)) -#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) -/* Ustorm Event ring consumer */ -#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[5].base + ((pf_id) * IRO[5].m1)) -#define USTORM_EQE_CONS_SIZE (IRO[5].size) -/* Ustorm Common Queue ring consumer */ -#define USTORM_COMMON_QUEUE_CONS_OFFSET(global_queue_id) \ - (IRO[6].base + ((global_queue_id) * IRO[6].m1)) -#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[6].size) -/* Xstorm Integration Test Data */ -#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base) -#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size) -/* Ystorm Integration Test Data */ -#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base) -#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size) -/* Pstorm Integration Test Data */ -#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) -#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) -/* Tstorm Integration Test Data */ -#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) -#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) -/* Mstorm Integration Test Data */ -#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) -#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) -/* Ustorm Integration Test Data */ -#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base) -#define USTORM_INTEG_TEST_DATA_SIZE (IRO[12].size) -/* Tstorm producers */ -#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \ - (IRO[13].base + ((core_rx_queue_id) * IRO[13].m1)) -#define TSTORM_LL2_RX_PRODS_SIZE (IRO[13].size) -/* Tstorm LightL2 queue statistics */ -#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ - (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1)) -#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[14].size) -/* Ustorm LiteL2 queue statistics */ -#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ - (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1)) -#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[15].size) -/* Pstorm LiteL2 queue statistics */ -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ - (IRO[16].base + ((core_tx_stats_id) * IRO[16].m1)) -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[16].size) -/* Mstorm queue statistics */ -#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[17].base + ((stat_counter_id) * IRO[17].m1)) -#define MSTORM_QUEUE_STAT_SIZE (IRO[17].size) -/* Mstorm producers */ -#define MSTORM_PRODS_OFFSET(queue_id) (IRO[18].base + ((queue_id) * IRO[18].m1)) -#define MSTORM_PRODS_SIZE (IRO[18].size) -/* TPA agregation timeout in us resolution (on ASIC) */ -#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[19].base) -#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[19].size) -/* Ustorm queue statistics */ -#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[20].base + ((stat_counter_id) * IRO[20].m1)) -#define USTORM_QUEUE_STAT_SIZE (IRO[20].size) -/* Ustorm queue zone */ -#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ - (IRO[21].base + ((queue_id) * IRO[21].m1)) -#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[21].size) -/* Pstorm queue statistics */ -#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[22].base + ((stat_counter_id) * IRO[22].m1)) -#define PSTORM_QUEUE_STAT_SIZE (IRO[22].size) -/* Tstorm last parser message */ -#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[23].base) -#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[23].size) -/* Tstorm Eth limit Rx rate */ -#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[24].base + ((pf_id) * IRO[24].m1)) -#define ETH_RX_RATE_LIMIT_SIZE (IRO[24].size) -/* Ystorm queue zone */ -#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ - (IRO[25].base + ((queue_id) * IRO[25].m1)) -#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[25].size) -/* Ystorm cqe producer */ -#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[26].base + ((rss_id) * IRO[26].m1)) -#define YSTORM_TOE_CQ_PROD_SIZE (IRO[26].size) -/* Ustorm cqe producer */ -#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[27].base + ((rss_id) * IRO[27].m1)) -#define USTORM_TOE_CQ_PROD_SIZE (IRO[27].size) -/* Ustorm grq producer */ -#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ - (IRO[28].base + ((pf_id) * IRO[28].m1)) -#define USTORM_TOE_GRQ_PROD_SIZE (IRO[28].size) -/* Tstorm cmdq-cons of given command queue-id */ -#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ - (IRO[29].base + ((cmdq_queue_id) * IRO[29].m1)) -#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[29].size) -/* Mstorm rq-cons of given queue-id */ -#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) \ - (IRO[30].base + ((rq_queue_id) * IRO[30].m1)) -#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[30].size) -/* Mstorm bdq-external-producer of given BDQ function ID, BDqueue-id */ -#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[31].base + ((func_id) * IRO[31].m1) + ((bdq_id) * IRO[31].m2)) -#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[31].size) -/* Tstorm (reflects M-Storm) bdq-external-producer of given fn ID, BDqueue-id */ -#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[32].base + ((func_id) * IRO[32].m1) + ((bdq_id) * IRO[32].m2)) -#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[32].size) -/* Tstorm iSCSI RX stats */ -#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[33].base + ((pf_id) * IRO[33].m1)) -#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[33].size) -/* Mstorm iSCSI RX stats */ -#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[34].base + ((pf_id) * IRO[34].m1)) -#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[34].size) -/* Ustorm iSCSI RX stats */ -#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[35].base + ((pf_id) * IRO[35].m1)) -#define USTORM_ISCSI_RX_STATS_SIZE (IRO[35].size) -/* Xstorm iSCSI TX stats */ -#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[36].base + ((pf_id) * IRO[36].m1)) -#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[36].size) -/* Ystorm iSCSI TX stats */ -#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[37].base + ((pf_id) * IRO[37].m1)) -#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[37].size) -/* Pstorm iSCSI TX stats */ -#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[38].base + ((pf_id) * IRO[38].m1)) -#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[38].size) -/* Tstorm FCoE RX stats */ -#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ - (IRO[39].base + ((pf_id) * IRO[39].m1)) -#define TSTORM_FCOE_RX_STATS_SIZE (IRO[39].size) -/* Mstorm FCoE RX stats */ -#define MSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ - (IRO[40].base + ((pf_id) * IRO[40].m1)) -#define MSTORM_FCOE_RX_STATS_SIZE (IRO[40].size) -/* Pstorm FCoE TX stats */ -#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ - (IRO[41].base + ((pf_id) * IRO[41].m1)) -#define PSTORM_FCOE_TX_STATS_SIZE (IRO[41].size) -/* Pstorm RoCE statistics */ -#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) \ - (IRO[42].base + ((stat_counter_id) * IRO[42].m1)) -#define PSTORM_ROCE_STAT_SIZE (IRO[42].size) -/* Tstorm RoCE statistics */ -#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) \ - (IRO[43].base + ((stat_counter_id) * IRO[43].m1)) -#define TSTORM_ROCE_STAT_SIZE (IRO[43].size) - -static const struct iro iro_arr[44] = { - { 0x10, 0x0, 0x0, 0x0, 0x8 }, - { 0x47c8, 0x60, 0x0, 0x0, 0x60 }, - { 0x5e30, 0x20, 0x0, 0x0, 0x20 }, - { 0x510, 0x8, 0x0, 0x0, 0x4 }, - { 0x490, 0x8, 0x0, 0x0, 0x4 }, - { 0x10, 0x8, 0x0, 0x0, 0x2 }, - { 0x90, 0x8, 0x0, 0x0, 0x2 }, - { 0x4940, 0x0, 0x0, 0x0, 0x78 }, - { 0x3de0, 0x0, 0x0, 0x0, 0x78 }, - { 0x2998, 0x0, 0x0, 0x0, 0x78 }, - { 0x4750, 0x0, 0x0, 0x0, 0x78 }, - { 0x56d0, 0x0, 0x0, 0x0, 0x78 }, - { 0x7e50, 0x0, 0x0, 0x0, 0x78 }, - { 0x100, 0x8, 0x0, 0x0, 0x8 }, - { 0x5c10, 0x10, 0x0, 0x0, 0x10 }, - { 0xb508, 0x30, 0x0, 0x0, 0x30 }, - { 0x95c0, 0x30, 0x0, 0x0, 0x30 }, - { 0x58a0, 0x40, 0x0, 0x0, 0x40 }, - { 0x200, 0x10, 0x0, 0x0, 0x8 }, - { 0xa230, 0x0, 0x0, 0x0, 0x4 }, - { 0x8058, 0x40, 0x0, 0x0, 0x30 }, - { 0xd00, 0x8, 0x0, 0x0, 0x8 }, - { 0x2b30, 0x80, 0x0, 0x0, 0x38 }, - { 0xa808, 0x0, 0x0, 0x0, 0xf0 }, - { 0xa8f8, 0x8, 0x0, 0x0, 0x8 }, - { 0x80, 0x8, 0x0, 0x0, 0x8 }, - { 0xac0, 0x8, 0x0, 0x0, 0x8 }, - { 0x2580, 0x8, 0x0, 0x0, 0x8 }, - { 0x2500, 0x8, 0x0, 0x0, 0x8 }, - { 0x440, 0x8, 0x0, 0x0, 0x2 }, - { 0x1800, 0x8, 0x0, 0x0, 0x2 }, - { 0x1a00, 0x10, 0x8, 0x0, 0x2 }, - { 0x640, 0x10, 0x8, 0x0, 0x2 }, - { 0xd9b8, 0x38, 0x0, 0x0, 0x24 }, - { 0x11048, 0x10, 0x0, 0x0, 0x8 }, - { 0x11678, 0x38, 0x0, 0x0, 0x18 }, - { 0xaec0, 0x30, 0x0, 0x0, 0x10 }, - { 0x8700, 0x28, 0x0, 0x0, 0x18 }, - { 0xec00, 0x10, 0x0, 0x0, 0x10 }, - { 0xde38, 0x40, 0x0, 0x0, 0x30 }, - { 0x121a8, 0x38, 0x0, 0x0, 0x8 }, - { 0xf068, 0x20, 0x0, 0x0, 0x20 }, - { 0x2b68, 0x80, 0x0, 0x0, 0x10 }, - { 0x4ab8, 0x10, 0x0, 0x0, 0x10 }, + struct qed_ptt *p_ptt, + bool eth_geneve_enable, bool ip_geneve_enable); + +#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) +#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) +#define TSTORM_PORT_STAT_OFFSET(port_id) \ + (IRO[1].base + ((port_id) * IRO[1].m1)) +#define TSTORM_PORT_STAT_SIZE (IRO[1].size) +#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ + (IRO[3].base + ((vf_id) * IRO[3].m1)) +#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) +#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \ + (IRO[4].base + (pf_id) * IRO[4].m1) +#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) +#define USTORM_EQE_CONS_OFFSET(pf_id) \ + (IRO[5].base + ((pf_id) * IRO[5].m1)) +#define USTORM_EQE_CONS_SIZE (IRO[5].size) +#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \ + (IRO[6].base + ((queue_zone_id) * IRO[6].m1)) +#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size) +#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \ + (IRO[7].base + ((queue_zone_id) * IRO[7].m1)) +#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size) +#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[18].base + ((stat_counter_id) * IRO[18].m1)) +#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size) +#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \ + (IRO[19].base + ((queue_id) * IRO[19].m1)) +#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size) +#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[20].base) +#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[20].size) +#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[21].base + ((pf_id) * IRO[21].m1)) +#define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size) +#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[22].base + ((stat_counter_id) * IRO[22].m1)) +#define USTORM_QUEUE_STAT_SIZE (IRO[22].size) +#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[23].base + ((pf_id) * IRO[23].m1)) +#define USTORM_ETH_PF_STAT_SIZE (IRO[23].size) +#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[24].base + ((stat_counter_id) * IRO[24].m1)) +#define PSTORM_QUEUE_STAT_SIZE (IRO[24].size) +#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[25].base + ((pf_id) * IRO[25].m1)) +#define PSTORM_ETH_PF_STAT_SIZE (IRO[25].size) +#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \ + (IRO[26].base + ((ethtype) * IRO[26].m1)) +#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[26].size) +#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[27].base) +#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[27].size) +#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ + (IRO[28].base + ((pf_id) * IRO[28].m1)) +#define ETH_RX_RATE_LIMIT_SIZE (IRO[28].size) +#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ + (IRO[29].base + ((queue_id) * IRO[29].m1)) +#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[29].size) + +static const struct iro iro_arr[46] = { + {0x0, 0x0, 0x0, 0x0, 0x8}, + {0x4cb0, 0x78, 0x0, 0x0, 0x78}, + {0x6318, 0x20, 0x0, 0x0, 0x20}, + {0xb00, 0x8, 0x0, 0x0, 0x4}, + {0xa80, 0x8, 0x0, 0x0, 0x4}, + {0x0, 0x8, 0x0, 0x0, 0x2}, + {0x80, 0x8, 0x0, 0x0, 0x4}, + {0x84, 0x8, 0x0, 0x0, 0x2}, + {0x4bc0, 0x0, 0x0, 0x0, 0x78}, + {0x3df0, 0x0, 0x0, 0x0, 0x78}, + {0x29b0, 0x0, 0x0, 0x0, 0x78}, + {0x4c38, 0x0, 0x0, 0x0, 0x78}, + {0x4a48, 0x0, 0x0, 0x0, 0x78}, + {0x7e48, 0x0, 0x0, 0x0, 0x78}, + {0xa28, 0x8, 0x0, 0x0, 0x8}, + {0x60f8, 0x10, 0x0, 0x0, 0x10}, + {0xb820, 0x30, 0x0, 0x0, 0x30}, + {0x95b8, 0x30, 0x0, 0x0, 0x30}, + {0x4c18, 0x80, 0x0, 0x0, 0x40}, + {0x1f8, 0x4, 0x0, 0x0, 0x4}, + {0xc9a8, 0x0, 0x0, 0x0, 0x4}, + {0x4c58, 0x80, 0x0, 0x0, 0x20}, + {0x8050, 0x40, 0x0, 0x0, 0x30}, + {0xe770, 0x60, 0x0, 0x0, 0x60}, + {0x2b48, 0x80, 0x0, 0x0, 0x38}, + {0xdf88, 0x78, 0x0, 0x0, 0x78}, + {0x1f8, 0x4, 0x0, 0x0, 0x4}, + {0xacf0, 0x0, 0x0, 0x0, 0xf0}, + {0xade0, 0x8, 0x0, 0x0, 0x8}, + {0x1f8, 0x8, 0x0, 0x0, 0x8}, + {0xac0, 0x8, 0x0, 0x0, 0x8}, + {0x2578, 0x8, 0x0, 0x0, 0x8}, + {0x24f8, 0x8, 0x0, 0x0, 0x8}, + {0x0, 0x8, 0x0, 0x0, 0x8}, + {0x200, 0x10, 0x8, 0x0, 0x8}, + {0xb78, 0x10, 0x8, 0x0, 0x2}, + {0xd888, 0x38, 0x0, 0x0, 0x24}, + {0x12120, 0x10, 0x0, 0x0, 0x8}, + {0x11b20, 0x38, 0x0, 0x0, 0x18}, + {0xa8c0, 0x30, 0x0, 0x0, 0x10}, + {0x86f8, 0x28, 0x0, 0x0, 0x18}, + {0xeff8, 0x10, 0x0, 0x0, 0x10}, + {0xdd08, 0x48, 0x0, 0x0, 0x38}, + {0xf460, 0x20, 0x0, 0x0, 0x20}, + {0x2b80, 0x80, 0x0, 0x0, 0x10}, + {0x5000, 0x10, 0x0, 0x0, 0x10}, }; /* Runtime array offsets */ -#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 -#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 -#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 -#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 -#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 -#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 -#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 -#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 -#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 -#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 -#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 -#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 -#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 -#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 -#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 -#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 -#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 -#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 -#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18 -#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19 -#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20 -#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21 -#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22 -#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23 -#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 -#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 -#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497 -#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 -#define CAU_REG_PI_MEMORY_RT_OFFSET 2233 -#define CAU_REG_PI_MEMORY_RT_SIZE 4416 -#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649 -#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650 -#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651 -#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652 -#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653 -#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654 -#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655 -#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656 -#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657 -#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658 -#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659 -#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660 -#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661 -#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662 -#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663 -#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664 -#define SRC_REG_FIRSTFREE_RT_OFFSET 6665 -#define SRC_REG_FIRSTFREE_RT_SIZE 2 -#define SRC_REG_LASTFREE_RT_OFFSET 6667 -#define SRC_REG_LASTFREE_RT_SIZE 2 -#define SRC_REG_COUNTFREE_RT_OFFSET 6669 -#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670 -#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671 -#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672 -#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673 -#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674 -#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675 -#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6676 -#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6677 -#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6678 -#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6679 -#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6680 -#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6681 -#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6682 -#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6683 -#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6684 -#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6685 -#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6686 -#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6687 -#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688 -#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689 -#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690 -#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6691 -#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6692 -#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6693 -#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6694 -#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6695 -#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6696 -#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6697 -#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6698 -#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6699 -#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6700 -#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6701 -#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6702 -#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6703 -#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 -#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28703 -#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28704 -#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28705 -#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28706 -#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28707 -#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28708 -#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28709 -#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28710 -#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28711 -#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28712 -#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28713 -#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 -#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29129 -#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 -#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29641 -#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29642 -#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29643 -#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29644 -#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29645 -#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29646 -#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29647 -#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29648 -#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29649 -#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29650 -#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29651 -#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29652 -#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29653 -#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29654 -#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29655 -#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29656 -#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29657 -#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29658 -#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29659 -#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29660 -#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29661 -#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29662 -#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29663 -#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29664 -#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29665 -#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29666 -#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29667 -#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29668 -#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29669 -#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29670 -#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29671 -#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29672 -#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29673 -#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29674 -#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29675 -#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29676 -#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29677 -#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29678 -#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29679 -#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29680 -#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29681 -#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29682 -#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29683 -#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29684 -#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29685 -#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29686 -#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29687 -#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29688 -#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29689 -#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29690 -#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29691 -#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29692 -#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29693 -#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29694 -#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29695 -#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29696 -#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29697 -#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29698 -#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29699 -#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29700 -#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29701 -#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29702 -#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29703 -#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29704 -#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29705 -#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29706 -#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29707 -#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29708 -#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 -#define QM_REG_VOQCRDLINE_RT_OFFSET 29836 -#define QM_REG_VOQCRDLINE_RT_SIZE 20 -#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29856 -#define QM_REG_VOQINITCRDLINE_RT_SIZE 20 -#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29876 -#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29877 -#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29878 -#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29879 -#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29880 -#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29881 -#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29882 -#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29883 -#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29884 -#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29885 -#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29886 -#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29887 -#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29888 -#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29889 -#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29890 -#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29891 -#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29892 -#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29893 -#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29894 -#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29895 -#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29896 -#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29897 -#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29898 -#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29899 -#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29900 -#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29901 -#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29902 -#define QM_REG_PQTX2PF_0_RT_OFFSET 29903 -#define QM_REG_PQTX2PF_1_RT_OFFSET 29904 -#define QM_REG_PQTX2PF_2_RT_OFFSET 29905 -#define QM_REG_PQTX2PF_3_RT_OFFSET 29906 -#define QM_REG_PQTX2PF_4_RT_OFFSET 29907 -#define QM_REG_PQTX2PF_5_RT_OFFSET 29908 -#define QM_REG_PQTX2PF_6_RT_OFFSET 29909 -#define QM_REG_PQTX2PF_7_RT_OFFSET 29910 -#define QM_REG_PQTX2PF_8_RT_OFFSET 29911 -#define QM_REG_PQTX2PF_9_RT_OFFSET 29912 -#define QM_REG_PQTX2PF_10_RT_OFFSET 29913 -#define QM_REG_PQTX2PF_11_RT_OFFSET 29914 -#define QM_REG_PQTX2PF_12_RT_OFFSET 29915 -#define QM_REG_PQTX2PF_13_RT_OFFSET 29916 -#define QM_REG_PQTX2PF_14_RT_OFFSET 29917 -#define QM_REG_PQTX2PF_15_RT_OFFSET 29918 -#define QM_REG_PQTX2PF_16_RT_OFFSET 29919 -#define QM_REG_PQTX2PF_17_RT_OFFSET 29920 -#define QM_REG_PQTX2PF_18_RT_OFFSET 29921 -#define QM_REG_PQTX2PF_19_RT_OFFSET 29922 -#define QM_REG_PQTX2PF_20_RT_OFFSET 29923 -#define QM_REG_PQTX2PF_21_RT_OFFSET 29924 -#define QM_REG_PQTX2PF_22_RT_OFFSET 29925 -#define QM_REG_PQTX2PF_23_RT_OFFSET 29926 -#define QM_REG_PQTX2PF_24_RT_OFFSET 29927 -#define QM_REG_PQTX2PF_25_RT_OFFSET 29928 -#define QM_REG_PQTX2PF_26_RT_OFFSET 29929 -#define QM_REG_PQTX2PF_27_RT_OFFSET 29930 -#define QM_REG_PQTX2PF_28_RT_OFFSET 29931 -#define QM_REG_PQTX2PF_29_RT_OFFSET 29932 -#define QM_REG_PQTX2PF_30_RT_OFFSET 29933 -#define QM_REG_PQTX2PF_31_RT_OFFSET 29934 -#define QM_REG_PQTX2PF_32_RT_OFFSET 29935 -#define QM_REG_PQTX2PF_33_RT_OFFSET 29936 -#define QM_REG_PQTX2PF_34_RT_OFFSET 29937 -#define QM_REG_PQTX2PF_35_RT_OFFSET 29938 -#define QM_REG_PQTX2PF_36_RT_OFFSET 29939 -#define QM_REG_PQTX2PF_37_RT_OFFSET 29940 -#define QM_REG_PQTX2PF_38_RT_OFFSET 29941 -#define QM_REG_PQTX2PF_39_RT_OFFSET 29942 -#define QM_REG_PQTX2PF_40_RT_OFFSET 29943 -#define QM_REG_PQTX2PF_41_RT_OFFSET 29944 -#define QM_REG_PQTX2PF_42_RT_OFFSET 29945 -#define QM_REG_PQTX2PF_43_RT_OFFSET 29946 -#define QM_REG_PQTX2PF_44_RT_OFFSET 29947 -#define QM_REG_PQTX2PF_45_RT_OFFSET 29948 -#define QM_REG_PQTX2PF_46_RT_OFFSET 29949 -#define QM_REG_PQTX2PF_47_RT_OFFSET 29950 -#define QM_REG_PQTX2PF_48_RT_OFFSET 29951 -#define QM_REG_PQTX2PF_49_RT_OFFSET 29952 -#define QM_REG_PQTX2PF_50_RT_OFFSET 29953 -#define QM_REG_PQTX2PF_51_RT_OFFSET 29954 -#define QM_REG_PQTX2PF_52_RT_OFFSET 29955 -#define QM_REG_PQTX2PF_53_RT_OFFSET 29956 -#define QM_REG_PQTX2PF_54_RT_OFFSET 29957 -#define QM_REG_PQTX2PF_55_RT_OFFSET 29958 -#define QM_REG_PQTX2PF_56_RT_OFFSET 29959 -#define QM_REG_PQTX2PF_57_RT_OFFSET 29960 -#define QM_REG_PQTX2PF_58_RT_OFFSET 29961 -#define QM_REG_PQTX2PF_59_RT_OFFSET 29962 -#define QM_REG_PQTX2PF_60_RT_OFFSET 29963 -#define QM_REG_PQTX2PF_61_RT_OFFSET 29964 -#define QM_REG_PQTX2PF_62_RT_OFFSET 29965 -#define QM_REG_PQTX2PF_63_RT_OFFSET 29966 -#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29967 -#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29968 -#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29969 -#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29970 -#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29971 -#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29972 -#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29973 -#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29974 -#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29975 -#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29976 -#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29977 -#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29978 -#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29979 -#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29980 -#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29981 -#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29982 -#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29983 -#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29984 -#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29985 -#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29986 -#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29987 -#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29988 -#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29989 -#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29990 -#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29991 -#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29992 -#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29993 -#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29994 -#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29995 -#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 -#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30251 -#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 -#define QM_REG_RLGLBLCRD_RT_OFFSET 30507 -#define QM_REG_RLGLBLCRD_RT_SIZE 256 -#define QM_REG_RLGLBLENABLE_RT_OFFSET 30763 -#define QM_REG_RLPFPERIOD_RT_OFFSET 30764 -#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30765 -#define QM_REG_RLPFINCVAL_RT_OFFSET 30766 -#define QM_REG_RLPFINCVAL_RT_SIZE 16 -#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30782 -#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_RLPFCRD_RT_OFFSET 30798 -#define QM_REG_RLPFCRD_RT_SIZE 16 -#define QM_REG_RLPFENABLE_RT_OFFSET 30814 -#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30815 -#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30816 -#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 -#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30832 -#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_WFQPFCRD_RT_OFFSET 30848 -#define QM_REG_WFQPFCRD_RT_SIZE 160 -#define QM_REG_WFQPFENABLE_RT_OFFSET 31008 -#define QM_REG_WFQVPENABLE_RT_OFFSET 31009 -#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31010 -#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 -#define QM_REG_TXPQMAP_RT_OFFSET 31522 -#define QM_REG_TXPQMAP_RT_SIZE 512 -#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32034 -#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 32546 -#define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 33058 -#define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33570 -#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 -#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33730 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33731 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33732 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33733 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33734 -#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33735 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33736 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33737 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33741 -#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33745 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33749 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33750 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33782 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33798 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33814 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33830 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33846 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33847 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33848 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33849 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33850 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33851 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33852 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33853 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33854 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33855 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33856 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33857 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33858 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33859 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33860 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33861 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33862 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33863 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33864 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33865 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33866 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33867 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33868 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33869 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33870 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33871 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33872 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33873 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33874 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33875 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33876 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33877 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33878 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33879 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33880 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33881 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33882 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33883 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33884 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33885 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33886 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33887 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33888 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33889 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33890 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33891 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33892 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33893 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33894 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33895 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33896 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33897 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33898 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33899 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33900 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33901 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33902 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33903 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33904 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33905 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33906 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33907 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33908 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33909 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33910 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33911 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33912 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33913 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33914 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33915 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33916 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33917 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33918 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33919 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33920 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33921 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33922 - -#define RUNTIME_ARRAY_SIZE 33923 +#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 +#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 +#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 +#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 +#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 +#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 +#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 +#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 +#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 +#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 +#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 +#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 +#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 +#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 +#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 +#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 +#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 +#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 +#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18 +#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19 +#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20 +#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21 +#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22 +#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23 +#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 +#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 +#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 +#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497 +#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 +#define CAU_REG_PI_MEMORY_RT_OFFSET 2233 +#define CAU_REG_PI_MEMORY_RT_SIZE 4416 +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649 +#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650 +#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651 +#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652 +#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653 +#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654 +#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655 +#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656 +#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657 +#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658 +#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659 +#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660 +#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661 +#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662 +#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663 +#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664 +#define SRC_REG_FIRSTFREE_RT_OFFSET 6665 +#define SRC_REG_FIRSTFREE_RT_SIZE 2 +#define SRC_REG_LASTFREE_RT_OFFSET 6667 +#define SRC_REG_LASTFREE_RT_SIZE 2 +#define SRC_REG_COUNTFREE_RT_OFFSET 6669 +#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670 +#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671 +#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672 +#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673 +#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674 +#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675 +#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676 +#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677 +#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678 +#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679 +#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680 +#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681 +#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682 +#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683 +#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684 +#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685 +#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686 +#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687 +#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688 +#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689 +#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690 +#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691 +#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692 +#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693 +#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694 +#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695 +#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696 +#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697 +#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698 +#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699 +#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700 +#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701 +#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702 +#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703 +#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704 +#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 +#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704 +#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705 +#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706 +#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707 +#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708 +#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709 +#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710 +#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711 +#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712 +#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713 +#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714 +#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 +#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130 +#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 +#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642 +#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643 +#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644 +#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645 +#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646 +#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647 +#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648 +#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649 +#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650 +#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651 +#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652 +#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653 +#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654 +#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655 +#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656 +#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657 +#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658 +#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659 +#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660 +#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661 +#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662 +#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663 +#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664 +#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665 +#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666 +#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667 +#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668 +#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669 +#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670 +#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671 +#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672 +#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673 +#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674 +#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675 +#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676 +#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677 +#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678 +#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679 +#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680 +#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681 +#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682 +#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683 +#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684 +#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685 +#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686 +#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687 +#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688 +#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689 +#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690 +#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691 +#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692 +#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693 +#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694 +#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695 +#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696 +#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697 +#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698 +#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699 +#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700 +#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701 +#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702 +#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703 +#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704 +#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705 +#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706 +#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707 +#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708 +#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709 +#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 +#define QM_REG_VOQCRDLINE_RT_OFFSET 29837 +#define QM_REG_VOQCRDLINE_RT_SIZE 20 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857 +#define QM_REG_VOQINITCRDLINE_RT_SIZE 20 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903 +#define QM_REG_PQTX2PF_0_RT_OFFSET 29904 +#define QM_REG_PQTX2PF_1_RT_OFFSET 29905 +#define QM_REG_PQTX2PF_2_RT_OFFSET 29906 +#define QM_REG_PQTX2PF_3_RT_OFFSET 29907 +#define QM_REG_PQTX2PF_4_RT_OFFSET 29908 +#define QM_REG_PQTX2PF_5_RT_OFFSET 29909 +#define QM_REG_PQTX2PF_6_RT_OFFSET 29910 +#define QM_REG_PQTX2PF_7_RT_OFFSET 29911 +#define QM_REG_PQTX2PF_8_RT_OFFSET 29912 +#define QM_REG_PQTX2PF_9_RT_OFFSET 29913 +#define QM_REG_PQTX2PF_10_RT_OFFSET 29914 +#define QM_REG_PQTX2PF_11_RT_OFFSET 29915 +#define QM_REG_PQTX2PF_12_RT_OFFSET 29916 +#define QM_REG_PQTX2PF_13_RT_OFFSET 29917 +#define QM_REG_PQTX2PF_14_RT_OFFSET 29918 +#define QM_REG_PQTX2PF_15_RT_OFFSET 29919 +#define QM_REG_PQTX2PF_16_RT_OFFSET 29920 +#define QM_REG_PQTX2PF_17_RT_OFFSET 29921 +#define QM_REG_PQTX2PF_18_RT_OFFSET 29922 +#define QM_REG_PQTX2PF_19_RT_OFFSET 29923 +#define QM_REG_PQTX2PF_20_RT_OFFSET 29924 +#define QM_REG_PQTX2PF_21_RT_OFFSET 29925 +#define QM_REG_PQTX2PF_22_RT_OFFSET 29926 +#define QM_REG_PQTX2PF_23_RT_OFFSET 29927 +#define QM_REG_PQTX2PF_24_RT_OFFSET 29928 +#define QM_REG_PQTX2PF_25_RT_OFFSET 29929 +#define QM_REG_PQTX2PF_26_RT_OFFSET 29930 +#define QM_REG_PQTX2PF_27_RT_OFFSET 29931 +#define QM_REG_PQTX2PF_28_RT_OFFSET 29932 +#define QM_REG_PQTX2PF_29_RT_OFFSET 29933 +#define QM_REG_PQTX2PF_30_RT_OFFSET 29934 +#define QM_REG_PQTX2PF_31_RT_OFFSET 29935 +#define QM_REG_PQTX2PF_32_RT_OFFSET 29936 +#define QM_REG_PQTX2PF_33_RT_OFFSET 29937 +#define QM_REG_PQTX2PF_34_RT_OFFSET 29938 +#define QM_REG_PQTX2PF_35_RT_OFFSET 29939 +#define QM_REG_PQTX2PF_36_RT_OFFSET 29940 +#define QM_REG_PQTX2PF_37_RT_OFFSET 29941 +#define QM_REG_PQTX2PF_38_RT_OFFSET 29942 +#define QM_REG_PQTX2PF_39_RT_OFFSET 29943 +#define QM_REG_PQTX2PF_40_RT_OFFSET 29944 +#define QM_REG_PQTX2PF_41_RT_OFFSET 29945 +#define QM_REG_PQTX2PF_42_RT_OFFSET 29946 +#define QM_REG_PQTX2PF_43_RT_OFFSET 29947 +#define QM_REG_PQTX2PF_44_RT_OFFSET 29948 +#define QM_REG_PQTX2PF_45_RT_OFFSET 29949 +#define QM_REG_PQTX2PF_46_RT_OFFSET 29950 +#define QM_REG_PQTX2PF_47_RT_OFFSET 29951 +#define QM_REG_PQTX2PF_48_RT_OFFSET 29952 +#define QM_REG_PQTX2PF_49_RT_OFFSET 29953 +#define QM_REG_PQTX2PF_50_RT_OFFSET 29954 +#define QM_REG_PQTX2PF_51_RT_OFFSET 29955 +#define QM_REG_PQTX2PF_52_RT_OFFSET 29956 +#define QM_REG_PQTX2PF_53_RT_OFFSET 29957 +#define QM_REG_PQTX2PF_54_RT_OFFSET 29958 +#define QM_REG_PQTX2PF_55_RT_OFFSET 29959 +#define QM_REG_PQTX2PF_56_RT_OFFSET 29960 +#define QM_REG_PQTX2PF_57_RT_OFFSET 29961 +#define QM_REG_PQTX2PF_58_RT_OFFSET 29962 +#define QM_REG_PQTX2PF_59_RT_OFFSET 29963 +#define QM_REG_PQTX2PF_60_RT_OFFSET 29964 +#define QM_REG_PQTX2PF_61_RT_OFFSET 29965 +#define QM_REG_PQTX2PF_62_RT_OFFSET 29966 +#define QM_REG_PQTX2PF_63_RT_OFFSET 29967 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996 +#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252 +#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 +#define QM_REG_RLGLBLCRD_RT_OFFSET 30508 +#define QM_REG_RLGLBLCRD_RT_SIZE 256 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764 +#define QM_REG_RLPFPERIOD_RT_OFFSET 30765 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766 +#define QM_REG_RLPFINCVAL_RT_OFFSET 30767 +#define QM_REG_RLPFINCVAL_RT_SIZE 16 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783 +#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_RLPFCRD_RT_OFFSET 30799 +#define QM_REG_RLPFCRD_RT_SIZE 16 +#define QM_REG_RLPFENABLE_RT_OFFSET 30815 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817 +#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833 +#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_WFQPFCRD_RT_OFFSET 30849 +#define QM_REG_WFQPFCRD_RT_SIZE 160 +#define QM_REG_WFQPFENABLE_RT_OFFSET 31009 +#define QM_REG_WFQVPENABLE_RT_OFFSET 31010 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011 +#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 +#define QM_REG_TXPQMAP_RT_OFFSET 31523 +#define QM_REG_TXPQMAP_RT_SIZE 512 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035 +#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 +#define QM_REG_WFQVPCRD_RT_OFFSET 32547 +#define QM_REG_WFQVPCRD_RT_SIZE 512 +#define QM_REG_WFQVPMAP_RT_OFFSET 33059 +#define QM_REG_WFQVPMAP_RT_SIZE 512 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571 +#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735 +#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742 +#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847 +#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33848 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33849 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33850 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33851 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33852 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33853 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33854 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33855 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33856 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33857 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33858 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33859 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33860 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33861 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33862 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33863 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33864 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33865 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33866 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33867 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33868 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33869 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33870 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33871 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33872 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33873 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33874 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33875 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33876 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33877 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33878 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33879 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33880 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33881 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33882 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33883 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33884 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33885 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33886 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33887 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33888 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33889 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33890 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33891 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33892 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33893 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33894 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33895 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33896 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33897 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33898 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33899 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33900 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33901 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33902 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33903 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33904 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33905 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33906 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33907 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33908 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33909 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33910 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33911 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33912 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33913 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33914 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33915 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33916 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33917 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33918 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33919 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33920 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33921 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33922 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33923 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33924 + +#define RUNTIME_ARRAY_SIZE 33925 /* The eth storm context for the Tstorm */ struct tstorm_eth_conn_st_ctx { @@ -2380,266 +2692,266 @@ struct xstorm_eth_conn_st_ctx { }; struct xstorm_eth_conn_ag_ctx { - u8 reserved0 /* cdu_validation */; - u8 eth_state /* state */; - u8 flags0; -#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 /* bit4 */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 - u8 flags1; -#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */ -#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */ -#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */ -#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */ -#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */ -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 + u8 reserved0; + u8 eth_state; + u8 flags0; +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ -#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 - u8 flags4; -#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ -#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ -#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ -#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */ -#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */ -#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */ -#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */ -#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */ -#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */ -#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */ -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */ -#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 u8 flags7; -#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */ -#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */ -#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ -#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ -#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ -#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ -#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */ -#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */ -#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */ -#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */ -#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */ -#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 /* cf16en */ -#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 u8 flags10; -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */ -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */ -#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */ -#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */ -#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 /* cf23en */ +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 #define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */ -#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 /* bit16 */ -#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 /* bit17 */ -#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 /* bit18 */ -#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 /* bit19 */ -#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 /* bit20 */ -#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 /* bit21 */ -#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */ -#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 - u8 edpm_event_id /* byte2 */; - __le16 physical_q0 /* physical_q0 */; - __le16 word1 /* physical_q1 */; - __le16 edpm_num_bds /* physical_q2 */; - __le16 tx_bd_cons /* word3 */; - __le16 tx_bd_prod /* word4 */; - __le16 go_to_bd_cons /* word5 */; - __le16 conn_dpi /* conn_dpi */; - u8 byte3 /* byte3 */; - u8 byte4 /* byte4 */; - u8 byte5 /* byte5 */; - u8 byte6 /* byte6 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le32 reg4 /* reg4 */; - __le32 reg5 /* cf_array0 */; - __le32 reg6 /* cf_array1 */; - __le16 word7 /* word7 */; - __le16 word8 /* word8 */; - __le16 word9 /* word9 */; - __le16 word10 /* word10 */; - __le32 reg7 /* reg7 */; - __le32 reg8 /* reg8 */; - __le32 reg9 /* reg9 */; - u8 byte7 /* byte7 */; - u8 byte8 /* byte8 */; - u8 byte9 /* byte9 */; - u8 byte10 /* byte10 */; - u8 byte11 /* byte11 */; - u8 byte12 /* byte12 */; - u8 byte13 /* byte13 */; - u8 byte14 /* byte14 */; - u8 byte15 /* byte15 */; - u8 byte16 /* byte16 */; - __le16 word11 /* word11 */; - __le32 reg10 /* reg10 */; - __le32 reg11 /* reg11 */; - __le32 reg12 /* reg12 */; - __le32 reg13 /* reg13 */; - __le32 reg14 /* reg14 */; - __le32 reg15 /* reg15 */; - __le32 reg16 /* reg16 */; - __le32 reg17 /* reg17 */; - __le32 reg18 /* reg18 */; - __le32 reg19 /* reg19 */; - __le16 word12 /* word12 */; - __le16 word13 /* word13 */; - __le16 word14 /* word14 */; - __le16 word15 /* word15 */; +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 + u8 edpm_event_id; + __le16 physical_q0; + __le16 quota; + __le16 edpm_num_bds; + __le16 tx_bd_cons; + __le16 tx_bd_prod; + __le16 tx_class; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le16 word7; + __le16 word8; + __le16 word9; + __le16 word10; + __le32 reg7; + __le32 reg8; + __le32 reg9; + u8 byte7; + u8 byte8; + u8 byte9; + u8 byte10; + u8 byte11; + u8 byte12; + u8 byte13; + u8 byte14; + u8 byte15; + u8 byte16; + __le16 word11; + __le32 reg10; + __le32 reg11; + __le32 reg12; + __le32 reg13; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; + __le32 reg18; + __le32 reg19; + __le16 word12; + __le16 word13; + __le16 word14; + __le16 word15; }; /* The eth storm context for the Ystorm */ @@ -2648,220 +2960,220 @@ struct ystorm_eth_conn_st_ctx { }; struct ystorm_eth_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */ -#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 -#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */ -#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 -#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ -#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 byte0; + u8 state; + u8 flags0; +#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf0en */ -#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 -#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */ -#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 -#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* word0 */; - __le32 terminate_spqe /* reg0 */; - __le32 reg1 /* reg1 */; - __le16 tx_bd_cons_upd /* word1 */; - __le16 word2 /* word2 */; - __le16 word3 /* word3 */; - __le16 word4 /* word4 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 tx_q0_int_coallecing_timeset; + u8 byte3; + __le16 word0; + __le32 terminate_spqe; + __le32 reg1; + __le16 tx_bd_cons_upd; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; }; struct tstorm_eth_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 + u8 byte0; + u8 byte1; + u8 flags0; +#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ -#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ -#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ -#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ -#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 +#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ -#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ -#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ -#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ -#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */ -#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le32 reg4 /* reg4 */; - __le32 reg5 /* reg5 */; - __le32 reg6 /* reg6 */; - __le32 reg7 /* reg7 */; - __le32 reg8 /* reg8 */; - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 rx_bd_cons /* word0 */; - u8 byte4 /* byte4 */; - u8 byte5 /* byte5 */; - __le16 rx_bd_prod /* word1 */; - __le16 word2 /* conn_dpi */; - __le16 word3 /* word3 */; - __le32 reg9 /* reg9 */; - __le32 reg10 /* reg10 */; +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 rx_bd_cons; + u8 byte4; + u8 byte5; + __le16 rx_bd_prod; + __le16 word2; + __le16 word3; + __le32 reg9; + __le32 reg10; }; struct ustorm_eth_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 /* timer0cf */ -#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 /* timer1cf */ -#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 byte0; + u8 byte1; + u8 flags0; +#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */ -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */ -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */ -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 u8 flags2; -#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf0en */ -#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */ -#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */ -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */ -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */ -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 -#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* conn_dpi */; - __le16 tx_bd_cons /* word1 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 tx_int_coallecing_timeset /* reg3 */; - __le16 tx_drv_bd_cons /* word2 */; - __le16 rx_drv_cqe_cons /* word3 */; +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 tx_bd_cons; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 tx_int_coallecing_timeset; + __le16 tx_drv_bd_cons; + __le16 rx_drv_cqe_cons; }; /* The eth storm context for the Ustorm */ @@ -2876,47 +3188,75 @@ struct mstorm_eth_conn_st_ctx { /* eth connection context */ struct eth_conn_context { - struct tstorm_eth_conn_st_ctx tstorm_st_context; - struct regpair tstorm_st_padding[2]; - struct pstorm_eth_conn_st_ctx pstorm_st_context; - struct xstorm_eth_conn_st_ctx xstorm_st_context; - struct xstorm_eth_conn_ag_ctx xstorm_ag_context; - struct ystorm_eth_conn_st_ctx ystorm_st_context; - struct ystorm_eth_conn_ag_ctx ystorm_ag_context; - struct tstorm_eth_conn_ag_ctx tstorm_ag_context; - struct ustorm_eth_conn_ag_ctx ustorm_ag_context; - struct ustorm_eth_conn_st_ctx ustorm_st_context; - struct mstorm_eth_conn_st_ctx mstorm_st_context; + struct tstorm_eth_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; + struct pstorm_eth_conn_st_ctx pstorm_st_context; + struct xstorm_eth_conn_st_ctx xstorm_st_context; + struct xstorm_eth_conn_ag_ctx xstorm_ag_context; + struct ystorm_eth_conn_st_ctx ystorm_st_context; + struct ystorm_eth_conn_ag_ctx ystorm_ag_context; + struct tstorm_eth_conn_ag_ctx tstorm_ag_context; + struct ustorm_eth_conn_ag_ctx ustorm_ag_context; + struct ustorm_eth_conn_st_ctx ustorm_st_context; + struct mstorm_eth_conn_st_ctx mstorm_st_context; }; +/* opcodes for the event ring */ +enum eth_event_opcode { + ETH_EVENT_UNUSED, + ETH_EVENT_VPORT_START, + ETH_EVENT_VPORT_UPDATE, + ETH_EVENT_VPORT_STOP, + ETH_EVENT_TX_QUEUE_START, + ETH_EVENT_TX_QUEUE_STOP, + ETH_EVENT_RX_QUEUE_START, + ETH_EVENT_RX_QUEUE_UPDATE, + ETH_EVENT_RX_QUEUE_STOP, + ETH_EVENT_FILTERS_UPDATE, + ETH_EVENT_RESERVED, + ETH_EVENT_RESERVED2, + ETH_EVENT_RESERVED3, + ETH_EVENT_RX_ADD_UDP_FILTER, + ETH_EVENT_RX_DELETE_UDP_FILTER, + ETH_EVENT_RESERVED4, + ETH_EVENT_RESERVED5, + MAX_ETH_EVENT_OPCODE +}; + +/* Classify rule types in E2/E3 */ enum eth_filter_action { + ETH_FILTER_ACTION_UNUSED, ETH_FILTER_ACTION_REMOVE, ETH_FILTER_ACTION_ADD, ETH_FILTER_ACTION_REMOVE_ALL, MAX_ETH_FILTER_ACTION }; +/* Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$ */ struct eth_filter_cmd { - u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */; - u8 vport_id /* the vport id */; - u8 action /* filter command action: add/remove/replace */; - u8 reserved0; - __le32 vni; - __le16 mac_lsb; - __le16 mac_mid; - __le16 mac_msb; - __le16 vlan_id; + u8 type; + u8 vport_id; + u8 action; + u8 reserved0; + __le32 vni; + __le16 mac_lsb; + __le16 mac_mid; + __le16 mac_msb; + __le16 vlan_id; }; +/* $$KEEP_ENDIANNESS$$ */ struct eth_filter_cmd_header { - u8 rx; - u8 tx; - u8 cmd_cnt; - u8 assert_on_error; - u8 reserved1[4]; + u8 rx; + u8 tx; + u8 cmd_cnt; + u8 assert_on_error; + u8 reserved1[4]; }; +/* Ethernet filter types: mac/vlan/pair */ enum eth_filter_type { + ETH_FILTER_TYPE_UNUSED, ETH_FILTER_TYPE_MAC, ETH_FILTER_TYPE_VLAN, ETH_FILTER_TYPE_PAIR, @@ -2929,463 +3269,3515 @@ enum eth_filter_type { MAX_ETH_FILTER_TYPE }; +/* Ethernet Ramrod Command IDs */ enum eth_ramrod_cmd_id { ETH_RAMROD_UNUSED, - ETH_RAMROD_VPORT_START /* VPort Start Ramrod */, - ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */, - ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */, - ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */, - ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */, - ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */, - ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */, - ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */, - ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */, - ETH_RAMROD_RESERVED, - ETH_RAMROD_RESERVED2, - ETH_RAMROD_RESERVED3, - ETH_RAMROD_RESERVED4, - ETH_RAMROD_RESERVED5, - ETH_RAMROD_RESERVED6, - ETH_RAMROD_RESERVED7, - ETH_RAMROD_RESERVED8, + ETH_RAMROD_VPORT_START, + ETH_RAMROD_VPORT_UPDATE, + ETH_RAMROD_VPORT_STOP, + ETH_RAMROD_RX_QUEUE_START, + ETH_RAMROD_RX_QUEUE_STOP, + ETH_RAMROD_TX_QUEUE_START, + ETH_RAMROD_TX_QUEUE_STOP, + ETH_RAMROD_FILTERS_UPDATE, + ETH_RAMROD_RX_QUEUE_UPDATE, + ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION, + ETH_RAMROD_RX_ADD_OPENFLOW_FILTER, + ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER, + ETH_RAMROD_RX_ADD_UDP_FILTER, + ETH_RAMROD_RX_DELETE_UDP_FILTER, + ETH_RAMROD_RX_CREATE_GFT_ACTION, + ETH_RAMROD_GFT_UPDATE_FILTER, MAX_ETH_RAMROD_CMD_ID }; +/* return code from eth sp ramrods */ +struct eth_return_code { + u8 value; +#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F +#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0 +#define ETH_RETURN_CODE_RESERVED_MASK 0x3 +#define ETH_RETURN_CODE_RESERVED_SHIFT 5 +#define ETH_RETURN_CODE_RX_TX_MASK 0x1 +#define ETH_RETURN_CODE_RX_TX_SHIFT 7 +}; + +/* What to do in case an error occurs */ enum eth_tx_err { - ETH_TX_ERR_DROP /* Drop erronous packet. */, + ETH_TX_ERR_DROP, ETH_TX_ERR_ASSERT_MALICIOUS, MAX_ETH_TX_ERR }; +/* Array of the different error type behaviors */ struct eth_tx_err_vals { __le16 values; -#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1 -#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0 -#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1 -#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1 -#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1 -#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2 -#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1 -#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3 -#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1 -#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4 -#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1 -#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5 -#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1 -#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6 -#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF -#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7 -}; - +#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0 +#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1 +#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1 +#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1 +#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2 +#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3 +#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1 +#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4 +#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1 +#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5 +#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6 +#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF +#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7 +}; + +/* vport rss configuration data */ struct eth_vport_rss_config { __le16 capabilities; -#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0 -#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1 -#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2 -#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3 -#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4 -#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5 -#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6 -#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF -#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7 - u8 rss_id; - u8 rss_mode; - u8 update_rss_key; - u8 update_rss_ind_table; - u8 update_rss_capabilities; - u8 tbl_size; - __le32 reserved2[2]; - __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]; - __le32 rss_key[ETH_RSS_KEY_SIZE_REGS]; - __le32 reserved3[2]; -}; - +#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0 +#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1 +#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2 +#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3 +#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4 +#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5 +#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6 +#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF +#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7 + u8 rss_id; + u8 rss_mode; + u8 update_rss_key; + u8 update_rss_ind_table; + u8 update_rss_capabilities; + u8 tbl_size; + __le32 reserved2[2]; + __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]; + + __le32 rss_key[ETH_RSS_KEY_SIZE_REGS]; + __le32 reserved3[2]; +}; + +/* eth vport RSS mode */ enum eth_vport_rss_mode { ETH_VPORT_RSS_MODE_DISABLED, ETH_VPORT_RSS_MODE_REGULAR, MAX_ETH_VPORT_RSS_MODE }; +/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */ struct eth_vport_rx_mode { __le16 state; -#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1 -#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0 -#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 -#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 -#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1 -#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2 -#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1 -#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3 -#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 -#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4 -#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 -#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 -#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF -#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6 +#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2 +#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3 +#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 +#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF +#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6 __le16 reserved2[3]; }; +/* Command for setting tpa parameters */ struct eth_vport_tpa_param { - u8 tpa_ipv4_en_flg; - u8 tpa_ipv6_en_flg; - u8 tpa_ipv4_tunn_en_flg; - u8 tpa_ipv6_tunn_en_flg; - u8 tpa_pkt_split_flg; - u8 tpa_hdr_data_split_flg; - u8 tpa_gro_consistent_flg; - u8 tpa_max_aggs_num; - u16 tpa_max_size; - u16 tpa_min_size_to_start; - u16 tpa_min_size_to_cont; - u8 max_buff_num; - u8 reserved; + u8 tpa_ipv4_en_flg; + u8 tpa_ipv6_en_flg; + u8 tpa_ipv4_tunn_en_flg; + u8 tpa_ipv6_tunn_en_flg; + u8 tpa_pkt_split_flg; + u8 tpa_hdr_data_split_flg; + u8 tpa_gro_consistent_flg; + + u8 tpa_max_aggs_num; + + __le16 tpa_max_size; + __le16 tpa_min_size_to_start; + + __le16 tpa_min_size_to_cont; + u8 max_buff_num; + u8 reserved; }; +/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */ struct eth_vport_tx_mode { __le16 state; -#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1 -#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0 -#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 -#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 -#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1 -#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2 -#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 -#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3 -#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 -#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4 -#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF -#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5 +#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0 +#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 +#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2 +#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3 +#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF +#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5 __le16 reserved2[3]; }; +/* Ramrod data for rx queue start ramrod */ struct rx_queue_start_ramrod_data { - __le16 rx_queue_id; - __le16 num_of_pbl_pages; - __le16 bd_max_bytes; - __le16 sb_id; - u8 sb_index; - u8 vport_id; - u8 default_rss_queue_flg; - u8 complete_cqe_flg; - u8 complete_event_flg; - u8 stats_counter_id; - u8 pin_context; - u8 pxp_tph_valid_bd; - u8 pxp_tph_valid_pkt; - u8 pxp_st_hint; - __le16 pxp_st_index; - u8 pmd_mode; - u8 notify_en; - u8 toggle_val; - u8 reserved[7]; - __le16 reserved1; - struct regpair cqe_pbl_addr; - struct regpair bd_base; - struct regpair reserved2; + __le16 rx_queue_id; + __le16 num_of_pbl_pages; + __le16 bd_max_bytes; + __le16 sb_id; + u8 sb_index; + u8 vport_id; + u8 default_rss_queue_flg; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 stats_counter_id; + u8 pin_context; + u8 pxp_tph_valid_bd; + u8 pxp_tph_valid_pkt; + u8 pxp_st_hint; + + __le16 pxp_st_index; + u8 pmd_mode; + + u8 notify_en; + u8 toggle_val; + + u8 vf_rx_prod_index; + + u8 reserved[6]; + __le16 reserved1; + struct regpair cqe_pbl_addr; + struct regpair bd_base; + struct regpair reserved2; }; +/* Ramrod data for rx queue start ramrod */ struct rx_queue_stop_ramrod_data { - __le16 rx_queue_id; - u8 complete_cqe_flg; - u8 complete_event_flg; - u8 vport_id; - u8 reserved[3]; + __le16 rx_queue_id; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 vport_id; + u8 reserved[3]; }; +/* Ramrod data for rx queue update ramrod */ struct rx_queue_update_ramrod_data { - __le16 rx_queue_id; - u8 complete_cqe_flg; - u8 complete_event_flg; - u8 vport_id; - u8 reserved[4]; - u8 reserved1; - u8 reserved2; - u8 reserved3; - __le16 reserved4; - __le16 reserved5; + __le16 rx_queue_id; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 vport_id; + u8 reserved[4]; + u8 reserved1; + u8 reserved2; + u8 reserved3; + __le16 reserved4; + __le16 reserved5; struct regpair reserved6; }; -struct tx_queue_start_ramrod_data { - __le16 sb_id; - u8 sb_index; - u8 vport_id; - u8 reserved0; - u8 stats_counter_id; - __le16 qm_pq_id; - u8 flags; -#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1 -#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0 -#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1 -#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1 -#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1 -#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2 -#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1 -#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3 -#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1 -#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4 -#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1 -#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5 -#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3 -#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6 - u8 pxp_st_hint; - u8 pxp_tph_valid_bd; - u8 pxp_tph_valid_pkt; - __le16 pxp_st_index; - __le16 comp_agg_size; - __le16 queue_zone_id; - __le16 test_dup_count; - __le16 pbl_size; - __le16 tx_queue_id; - struct regpair pbl_base_addr; - struct regpair bd_cons_address; +/* Ramrod data for rx Add UDP Filter */ +struct rx_udp_filter_data { + __le16 action_icid; + __le16 vlan_id; + u8 ip_type; + u8 tenant_id_exists; + __le16 reserved1; + __le32 ip_dst_addr[4]; + __le32 ip_src_addr[4]; + __le16 udp_dst_port; + __le16 udp_src_port; + __le32 tenant_id; }; +/* Ramrod data for rx queue start ramrod */ +struct tx_queue_start_ramrod_data { + __le16 sb_id; + u8 sb_index; + u8 vport_id; + u8 reserved0; + u8 stats_counter_id; + __le16 qm_pq_id; + u8 flags; +#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2 +#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3 +#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4 +#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6 + u8 pxp_st_hint; + u8 pxp_tph_valid_bd; + u8 pxp_tph_valid_pkt; + __le16 pxp_st_index; + __le16 comp_agg_size; + __le16 queue_zone_id; + __le16 test_dup_count; + __le16 pbl_size; + __le16 tx_queue_id; + + struct regpair pbl_base_addr; + struct regpair bd_cons_address; +}; + +/* Ramrod data for tx queue stop ramrod */ struct tx_queue_stop_ramrod_data { __le16 reserved[4]; }; +/* Ramrod data for vport update ramrod */ struct vport_filter_update_ramrod_data { - struct eth_filter_cmd_header filter_cmd_hdr; - struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT]; + struct eth_filter_cmd_header filter_cmd_hdr; + struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT]; }; +/* Ramrod data for vport start ramrod */ struct vport_start_ramrod_data { - u8 vport_id; - u8 sw_fid; - __le16 mtu; - u8 drop_ttl0_en; - u8 inner_vlan_removal_en; - struct eth_vport_rx_mode rx_mode; - struct eth_vport_tx_mode tx_mode; - struct eth_vport_tpa_param tpa_param; - __le16 default_vlan; - u8 tx_switching_en; - u8 anti_spoofing_en; - u8 default_vlan_en; - u8 handle_ptp_pkts; - u8 silent_vlan_removal_en; - u8 untagged; - struct eth_tx_err_vals tx_err_behav; - u8 zero_placement_offset; - u8 reserved[7]; -}; - + u8 vport_id; + u8 sw_fid; + __le16 mtu; + u8 drop_ttl0_en; + u8 inner_vlan_removal_en; + struct eth_vport_rx_mode rx_mode; + struct eth_vport_tx_mode tx_mode; + struct eth_vport_tpa_param tpa_param; + __le16 default_vlan; + u8 tx_switching_en; + u8 anti_spoofing_en; + + u8 default_vlan_en; + + u8 handle_ptp_pkts; + u8 silent_vlan_removal_en; + u8 untagged; + struct eth_tx_err_vals tx_err_behav; + + u8 zero_placement_offset; + u8 ctl_frame_mac_check_en; + u8 ctl_frame_ethtype_check_en; + u8 reserved[5]; +}; + +/* Ramrod data for vport stop ramrod */ struct vport_stop_ramrod_data { - u8 vport_id; - u8 reserved[7]; + u8 vport_id; + u8 reserved[7]; }; +/* Ramrod data for vport update ramrod */ struct vport_update_ramrod_data_cmn { - u8 vport_id; - u8 update_rx_active_flg; - u8 rx_active_flg; - u8 update_tx_active_flg; - u8 tx_active_flg; - u8 update_rx_mode_flg; - u8 update_tx_mode_flg; - u8 update_approx_mcast_flg; - u8 update_rss_flg; - u8 update_inner_vlan_removal_en_flg; - u8 inner_vlan_removal_en; - u8 update_tpa_param_flg; - u8 update_tpa_en_flg; - u8 update_tx_switching_en_flg; - u8 tx_switching_en; - u8 update_anti_spoofing_en_flg; - u8 anti_spoofing_en; - u8 update_handle_ptp_pkts; - u8 handle_ptp_pkts; - u8 update_default_vlan_en_flg; - u8 default_vlan_en; - u8 update_default_vlan_flg; - __le16 default_vlan; - u8 update_accept_any_vlan_flg; - u8 accept_any_vlan; - u8 silent_vlan_removal_en; - u8 update_mtu_flg; - __le16 mtu; - u8 reserved[2]; + u8 vport_id; + u8 update_rx_active_flg; + u8 rx_active_flg; + u8 update_tx_active_flg; + u8 tx_active_flg; + u8 update_rx_mode_flg; + u8 update_tx_mode_flg; + u8 update_approx_mcast_flg; + + u8 update_rss_flg; + u8 update_inner_vlan_removal_en_flg; + + u8 inner_vlan_removal_en; + u8 update_tpa_param_flg; + u8 update_tpa_en_flg; + u8 update_tx_switching_en_flg; + + u8 tx_switching_en; + u8 update_anti_spoofing_en_flg; + + u8 anti_spoofing_en; + u8 update_handle_ptp_pkts; + + u8 handle_ptp_pkts; + u8 update_default_vlan_en_flg; + + u8 default_vlan_en; + + u8 update_default_vlan_flg; + + __le16 default_vlan; + u8 update_accept_any_vlan_flg; + + u8 accept_any_vlan; + u8 silent_vlan_removal_en; + u8 update_mtu_flg; + + __le16 mtu; + u8 reserved[2]; }; struct vport_update_ramrod_mcast { __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; }; +/* Ramrod data for vport update ramrod */ struct vport_update_ramrod_data { - struct vport_update_ramrod_data_cmn common; - struct eth_vport_rx_mode rx_mode; - struct eth_vport_tx_mode tx_mode; - struct eth_vport_tpa_param tpa_param; - struct vport_update_ramrod_mcast approx_mcast; - struct eth_vport_rss_config rss_config; + struct vport_update_ramrod_data_cmn common; + + struct eth_vport_rx_mode rx_mode; + struct eth_vport_tx_mode tx_mode; + struct eth_vport_tpa_param tpa_param; + struct vport_update_ramrod_mcast approx_mcast; + struct eth_vport_rss_config rss_config; +}; + +struct mstorm_rdma_task_st_ctx { + struct regpair temp[4]; +}; + +struct rdma_close_func_ramrod_data { + u8 cnq_start_offset; + u8 num_cnqs; + u8 vf_id; + u8 vf_valid; + u8 reserved[4]; +}; + +struct rdma_cnq_params { + __le16 sb_num; + u8 sb_index; + u8 num_pbl_pages; + __le32 reserved; + struct regpair pbl_base_addr; + __le16 queue_zone_num; + u8 reserved1[6]; +}; + +struct rdma_create_cq_ramrod_data { + struct regpair cq_handle; + struct regpair pbl_addr; + __le32 max_cqes; + __le16 pbl_num_pages; + __le16 dpi; + u8 is_two_level_pbl; + u8 cnq_id; + u8 pbl_log_page_size; + u8 toggle_bit; + __le16 int_timeout; + __le16 reserved1; +}; + +struct rdma_deregister_tid_ramrod_data { + __le32 itid; + __le32 reserved; +}; + +struct rdma_destroy_cq_output_params { + __le16 cnq_num; + __le16 reserved0; + __le32 reserved1; +}; + +struct rdma_destroy_cq_ramrod_data { + struct regpair output_params_addr; +}; + +enum rdma_event_opcode { + RDMA_EVENT_UNUSED, + RDMA_EVENT_FUNC_INIT, + RDMA_EVENT_FUNC_CLOSE, + RDMA_EVENT_REGISTER_MR, + RDMA_EVENT_DEREGISTER_MR, + RDMA_EVENT_CREATE_CQ, + RDMA_EVENT_RESIZE_CQ, + RDMA_EVENT_DESTROY_CQ, + RDMA_EVENT_CREATE_SRQ, + RDMA_EVENT_MODIFY_SRQ, + RDMA_EVENT_DESTROY_SRQ, + MAX_RDMA_EVENT_OPCODE +}; + +enum rdma_fw_return_code { + RDMA_RETURN_OK = 0, + RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR, + RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR, + RDMA_RETURN_RESIZE_CQ_ERR, + RDMA_RETURN_NIG_DRAIN_REQ, + MAX_RDMA_FW_RETURN_CODE +}; + +struct rdma_init_func_hdr { + u8 cnq_start_offset; + u8 num_cnqs; + u8 cq_ring_mode; + u8 cnp_vlan_priority; + __le32 cnp_send_timeout; + u8 cnp_dscp; + u8 vf_id; + u8 vf_valid; + u8 reserved[5]; +}; + +struct rdma_init_func_ramrod_data { + struct rdma_init_func_hdr params_header; + struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES]; +}; + +enum rdma_ramrod_cmd_id { + RDMA_RAMROD_UNUSED, + RDMA_RAMROD_FUNC_INIT, + RDMA_RAMROD_FUNC_CLOSE, + RDMA_RAMROD_REGISTER_MR, + RDMA_RAMROD_DEREGISTER_MR, + RDMA_RAMROD_CREATE_CQ, + RDMA_RAMROD_RESIZE_CQ, + RDMA_RAMROD_DESTROY_CQ, + RDMA_RAMROD_CREATE_SRQ, + RDMA_RAMROD_MODIFY_SRQ, + RDMA_RAMROD_DESTROY_SRQ, + MAX_RDMA_RAMROD_CMD_ID +}; + +struct rdma_register_tid_ramrod_data { + __le32 flags; +#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_MASK 0x3FFFF +#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_SHIFT 0 +#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F +#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 18 +#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 23 +#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 24 +#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 25 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 26 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 27 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 28 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 29 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 30 +#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 31 + u8 flags1; +#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F +#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0 +#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7 +#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5 + u8 flags2; +#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0 +#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1 +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2 + u8 key; + u8 length_hi; + u8 vf_id; + u8 vf_valid; + __le16 pd; + __le32 length_lo; + __le32 itid; + __le32 reserved2; + struct regpair va; + struct regpair pbl_base; + struct regpair dif_error_addr; + struct regpair dif_runt_addr; + __le32 reserved3[2]; +}; + +struct rdma_resize_cq_output_params { + __le32 old_cq_cons; + __le32 old_cq_prod; +}; + +struct rdma_resize_cq_ramrod_data { + u8 flags; +#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1 +#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0 +#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1 +#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1 +#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F +#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2 + u8 pbl_log_page_size; + __le16 pbl_num_pages; + __le32 max_cqes; + struct regpair pbl_addr; + struct regpair output_params_addr; +}; + +struct rdma_srq_context { + struct regpair temp[8]; +}; + +struct rdma_srq_create_ramrod_data { + struct regpair pbl_base_addr; + __le16 pages_in_srq_pbl; + __le16 pd_id; + struct rdma_srq_id srq_id; + __le16 page_size; + __le16 reserved1; + __le32 reserved2; + struct regpair producers_addr; +}; + +struct rdma_srq_destroy_ramrod_data { + struct rdma_srq_id srq_id; + __le32 reserved; +}; + +struct rdma_srq_modify_ramrod_data { + struct rdma_srq_id srq_id; + __le32 wqe_limit; +}; + +struct ystorm_rdma_task_st_ctx { + struct regpair temp[4]; +}; + +struct ystorm_rdma_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 msem_ctx_upd_seq; + u8 flags0; +#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 +#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 key; + __le32 mw_cnt; + u8 ref_cnt_seq; + u8 ctx_upd_seq; + __le16 dif_flags; + __le16 tx_ref_count; + __le16 last_used_ltid; + __le16 parent_mr_lo; + __le16 parent_mr_hi; + __le32 fbo_lo; + __le32 fbo_hi; +}; + +struct mstorm_rdma_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 +#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 key; + __le32 mw_cnt; + u8 ref_cnt_seq; + u8 ctx_upd_seq; + __le16 dif_flags; + __le16 tx_ref_count; + __le16 last_used_ltid; + __le16 parent_mr_lo; + __le16 parent_mr_hi; + __le32 fbo_lo; + __le32 fbo_hi; +}; + +struct ustorm_rdma_task_st_ctx { + struct regpair temp[2]; +}; + +struct ustorm_rdma_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6 + u8 flags1; +#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5 +#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7 + u8 flags3; +#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 reg2; + __le32 dif_runt_value; + __le32 reg4; + __le32 reg5; +}; + +struct rdma_task_context { + struct ystorm_rdma_task_st_ctx ystorm_st_context; + struct ystorm_rdma_task_ag_ctx ystorm_ag_context; + struct tdif_task_context tdif_context; + struct mstorm_rdma_task_ag_ctx mstorm_ag_context; + struct mstorm_rdma_task_st_ctx mstorm_st_context; + struct rdif_task_context rdif_context; + struct ustorm_rdma_task_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; + struct ustorm_rdma_task_ag_ctx ustorm_ag_context; +}; + +enum rdma_tid_type { + RDMA_TID_REGISTERED_MR, + RDMA_TID_FMR, + RDMA_TID_MW_TYPE1, + RDMA_TID_MW_TYPE2A, + MAX_RDMA_TID_TYPE +}; + +struct mstorm_rdma_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct tstorm_rdma_conn_ag_ctx { + u8 reserved0; + u8 byte1; + u8 flags0; +#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags2; +#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 + u8 flags4; +#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 +#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 word0; + u8 byte4; + u8 byte5; + __le16 word1; + __le16 word2; + __le16 word3; + __le32 reg9; + __le32 reg10; +}; + +struct tstorm_rdma_task_ag_ctx { + u8 byte0; + u8 byte1; + __le16 word0; + u8 flags0; +#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1 +#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6 + u8 flags2; +#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6 + u8 flags3; +#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3 +#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7 + u8 flags4; +#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7 + u8 byte2; + __le16 word1; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg1; + __le32 reg2; +}; + +struct ustorm_rdma_conn_ag_ctx { + u8 reserved; + u8 byte1; + u8 flags0; +#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 +#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 + u8 flags3; +#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 conn_dpi; + __le16 word1; + __le32 cq_cons; + __le32 cq_se_prod; + __le32 cq_prod; + __le32 reg3; + __le16 int_timeout; + __le16 word3; +}; + +struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7 + u8 flags1; +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7 + u8 flags2; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6 + u8 flags3; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6 + u8 flags4; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6 + u8 flags5; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6 + u8 flags6; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6 + u8 flags7; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7 + u8 flags11; +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 snd_nxt_psn; + __le32 reg4; +}; + +struct xstorm_rdma_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7 + u8 flags1; +#define XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 + u8 flags2; +#define XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags4; +#define XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6 + u8 flags7; +#define XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7 + u8 flags11; +#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 snd_nxt_psn; + __le32 reg4; + __le32 reg5; + __le32 reg6; +}; + +struct ystorm_rdma_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +struct mstorm_roce_conn_st_ctx { + struct regpair temp[6]; +}; + +struct pstorm_roce_conn_st_ctx { + struct regpair temp[16]; +}; + +struct ystorm_roce_conn_st_ctx { + struct regpair temp[2]; +}; + +struct xstorm_roce_conn_st_ctx { + struct regpair temp[22]; +}; + +struct tstorm_roce_conn_st_ctx { + struct regpair temp[30]; +}; + +struct ustorm_roce_conn_st_ctx { + struct regpair temp[12]; +}; + +struct roce_conn_context { + struct ystorm_roce_conn_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; + struct pstorm_roce_conn_st_ctx pstorm_st_context; + struct xstorm_roce_conn_st_ctx xstorm_st_context; + struct regpair xstorm_st_padding[2]; + struct xstorm_rdma_conn_ag_ctx xstorm_ag_context; + struct tstorm_rdma_conn_ag_ctx tstorm_ag_context; + struct timers_context timer_context; + struct ustorm_rdma_conn_ag_ctx ustorm_ag_context; + struct tstorm_roce_conn_st_ctx tstorm_st_context; + struct mstorm_roce_conn_st_ctx mstorm_st_context; + struct ustorm_roce_conn_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; +}; + +struct roce_create_qp_req_ramrod_data { + __le16 flags; +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12 + u8 max_ord; + u8 traffic_class; + u8 hop_limit; + u8 orq_num_pages; + __le16 p_key; + __le32 flow_label; + __le32 dst_qp_id; + __le32 ack_timeout_val; + __le32 initial_psn; + __le16 mtu; + __le16 pd; + __le16 sq_num_pages; + __le16 reseved2; + struct regpair sq_pbl_addr; + struct regpair orq_pbl_addr; + __le16 local_mac_addr[3]; + __le16 remote_mac_addr[3]; + __le16 vlan_id; + __le16 udp_src_port; + __le32 src_gid[4]; + __le32 dst_gid[4]; + struct regpair qp_handle_for_cqe; + struct regpair qp_handle_for_async; + u8 stats_counter_id; + u8 reserved3[7]; + __le32 cq_cid; + __le16 physical_queue0; + __le16 dpi; +}; + +struct roce_create_qp_resp_ramrod_data { + __le16 flags; +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_SHIFT 7 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11 + u8 max_ird; + u8 traffic_class; + u8 hop_limit; + u8 irq_num_pages; + __le16 p_key; + __le32 flow_label; + __le32 dst_qp_id; + u8 stats_counter_id; + u8 reserved1; + __le16 mtu; + __le32 initial_psn; + __le16 pd; + __le16 rq_num_pages; + struct rdma_srq_id srq_id; + struct regpair rq_pbl_addr; + struct regpair irq_pbl_addr; + __le16 local_mac_addr[3]; + __le16 remote_mac_addr[3]; + __le16 vlan_id; + __le16 udp_src_port; + __le32 src_gid[4]; + __le32 dst_gid[4]; + struct regpair qp_handle_for_cqe; + struct regpair qp_handle_for_async; + __le32 reserved2[2]; + __le32 cq_cid; + __le16 physical_queue0; + __le16 dpi; +}; + +struct roce_destroy_qp_req_output_params { + __le32 num_bound_mw; + __le32 reserved; +}; + +struct roce_destroy_qp_req_ramrod_data { + struct regpair output_params_addr; +}; + +struct roce_destroy_qp_resp_output_params { + __le32 num_invalidated_mw; + __le32 reserved; +}; + +struct roce_destroy_qp_resp_ramrod_data { + struct regpair output_params_addr; +}; + +enum roce_event_opcode { + ROCE_EVENT_CREATE_QP = 11, + ROCE_EVENT_MODIFY_QP, + ROCE_EVENT_QUERY_QP, + ROCE_EVENT_DESTROY_QP, + MAX_ROCE_EVENT_OPCODE +}; + +struct roce_modify_qp_req_ramrod_data { + __le16 flags; +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x7 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 13 + u8 fields; +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4 + u8 max_ord; + u8 traffic_class; + u8 hop_limit; + __le16 p_key; + __le32 flow_label; + __le32 ack_timeout_val; + __le16 mtu; + __le16 reserved2; + __le32 reserved3[3]; + __le32 src_gid[4]; + __le32 dst_gid[4]; +}; + +struct roce_modify_qp_resp_ramrod_data { + __le16 flags; +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x3F +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 10 + u8 fields; +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3 + u8 max_ird; + u8 traffic_class; + u8 hop_limit; + __le16 p_key; + __le32 flow_label; + __le16 mtu; + __le16 reserved2; + __le32 src_gid[4]; + __le32 dst_gid[4]; +}; + +struct roce_query_qp_req_output_params { + __le32 psn; + __le32 flags; +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2 +}; + +struct roce_query_qp_req_ramrod_data { + struct regpair output_params_addr; +}; + +struct roce_query_qp_resp_output_params { + __le32 psn; + __le32 err_flag; +#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1 +#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0 +#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF +#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1 +}; + +struct roce_query_qp_resp_ramrod_data { + struct regpair output_params_addr; +}; + +enum roce_ramrod_cmd_id { + ROCE_RAMROD_CREATE_QP = 11, + ROCE_RAMROD_MODIFY_QP, + ROCE_RAMROD_QUERY_QP, + ROCE_RAMROD_DESTROY_QP, + MAX_ROCE_RAMROD_CMD_ID +}; + +struct mstorm_roce_req_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct mstorm_roce_resp_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +enum roce_flavor { + PLAIN_ROCE /* RoCE v1 */ , + RROCE_IPV4 /* RoCE v2 (Routable RoCE) over ipv4 */ , + RROCE_IPV6 /* RoCE v2 (Routable RoCE) over ipv6 */ , + MAX_ROCE_FLAVOR +}; + +struct tstorm_roce_req_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6 + u8 flags1; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags2; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6 + u8 flags3; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 + u8 flags4; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 snd_nxt_psn; + __le32 snd_max_psn; + __le32 orq_prod; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 tx_cqe_error_type; + u8 orq_cache_idx; + __le16 snd_sq_cons_th; + u8 byte4; + u8 byte5; + __le16 snd_sq_cons; + __le16 word2; + __le16 word3; + __le32 reg9; + __le32 reg10; +}; + +struct tstorm_roce_resp_conn_ag_ctx { + u8 byte0; + u8 state; + u8 flags0; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags2; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 psn_and_rxmit_id_echo; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 tx_async_error_type; + u8 byte3; + __le16 rq_cons; + u8 byte4; + u8 byte5; + __le16 rq_prod; + __le16 conn_dpi; + __le16 irq_cons; + __le32 num_invlidated_mw; + __le32 reg10; +}; + +struct ustorm_roce_req_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +struct ustorm_roce_resp_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +struct xstorm_roce_req_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 + u8 flags2; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags4; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6 + u8 flags7; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7 + u8 flags11; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7 + u8 flags13; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 word1; + __le16 sq_cmp_cons; + __le16 sq_cons; + __le16 sq_prod; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 lsn; + __le32 ssn; + __le32 snd_una_psn; + __le32 snd_nxt_psn; + __le32 reg4; + __le32 orq_cons_th; + __le32 orq_cons; +}; + +struct xstorm_roce_resp_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 + u8 flags2; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags4; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6 + u8 flags7; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7 + u8 flags11; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 word1; + __le16 irq_prod; + __le16 word3; + __le16 word4; + __le16 word5; + __le16 irq_cons; + u8 rxmit_opcode; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 rxmit_psn_and_id; + __le32 rxmit_bytes_length; + __le32 psn; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 msn_and_syndrome; +}; + +struct ystorm_roce_req_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +struct ystorm_roce_resp_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +struct ystorm_iscsi_conn_st_ctx { + __le32 reserved[4]; }; -#define VF_MAX_STATIC 192 /* In case of K2 */ +struct pstorm_iscsi_tcp_conn_st_ctx { + __le32 tcp[32]; + __le32 iscsi[4]; +}; + +struct xstorm_iscsi_tcp_conn_st_ctx { + __le32 reserved_iscsi[40]; + __le32 reserved_tcp[4]; +}; + +struct xstorm_iscsi_conn_ag_ctx { + u8 cdu_validation; + u8 state; + u8 flags0; +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7 + u8 flags1; +#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7 + u8 flags2; +#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 + u8 flags3; +#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6 + u8 flags6; +#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 + u8 flags7; +#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7 + u8 flags11; +#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 physical_q1; + __le16 dummy_dorq_var; + __le16 sq_cons; + __le16 sq_prod; + __le16 word5; + __le16 slow_io_total_data_tx_update; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 more_to_send_seq; + __le32 reg4; + __le32 reg5; + __le32 hq_scan_next_relevant_ack; + __le16 r2tq_prod; + __le16 r2tq_cons; + __le16 hq_prod; + __le16 hq_cons; + __le32 remain_seq; + __le32 bytes_to_next_pdu; + __le32 hq_tcp_seq; + u8 byte7; + u8 byte8; + u8 byte9; + u8 byte10; + u8 byte11; + u8 byte12; + u8 byte13; + u8 byte14; + u8 byte15; + u8 byte16; + __le16 word11; + __le32 reg10; + __le32 reg11; + __le32 exp_stat_sn; + __le32 reg13; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; +}; + +struct tstorm_iscsi_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 + u8 flags4; +#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 word0; +}; + +struct ustorm_iscsi_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +struct tstorm_iscsi_conn_st_ctx { + __le32 reserved[40]; +}; -#define MCP_GLOB_PATH_MAX 2 -#define MCP_PORT_MAX 2 /* Global */ -#define MCP_GLOB_PORT_MAX 4 /* Global */ -#define MCP_GLOB_FUNC_MAX 16 /* Global */ +struct mstorm_iscsi_conn_ag_ctx { + u8 reserved; + u8 state; + u8 flags0; +#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct mstorm_iscsi_tcp_conn_st_ctx { + __le32 reserved_tcp[20]; + __le32 reserved_iscsi[8]; +}; + +struct ustorm_iscsi_conn_st_ctx { + __le32 reserved[52]; +}; + +struct iscsi_conn_context { + struct ystorm_iscsi_conn_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; + struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct pb_context xpb2_context; + struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context; + struct regpair xstorm_st_padding[2]; + struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context; + struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context; + struct regpair tstorm_ag_padding[2]; + struct timers_context timer_context; + struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context; + struct pb_context upb_context; + struct tstorm_iscsi_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; + struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context; + struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context; + struct ustorm_iscsi_conn_st_ctx ustorm_st_context; +}; + +struct iscsi_init_ramrod_params { + struct iscsi_spe_func_init iscsi_init_spe; + struct tcp_init_params tcp_init; +}; + +struct ystorm_iscsi_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; +#define VF_MAX_STATIC 192 + +#define MCP_GLOB_PATH_MAX 2 +#define MCP_PORT_MAX 2 +#define MCP_GLOB_PORT_MAX 4 +#define MCP_GLOB_FUNC_MAX 16 -typedef u32 offsize_t; /* In DWORDS !!! */ /* Offset from the beginning of the MCP scratchpad */ -#define OFFSIZE_OFFSET_SHIFT 0 -#define OFFSIZE_OFFSET_MASK 0x0000ffff +#define OFFSIZE_OFFSET_SHIFT 0 +#define OFFSIZE_OFFSET_MASK 0x0000ffff /* Size of specific element (not the whole array if any) */ -#define OFFSIZE_SIZE_SHIFT 16 -#define OFFSIZE_SIZE_MASK 0xffff0000 +#define OFFSIZE_SIZE_SHIFT 16 +#define OFFSIZE_SIZE_MASK 0xffff0000 -/* SECTION_OFFSET is calculating the offset in bytes out of offsize */ -#define SECTION_OFFSET(_offsize) ((((_offsize & \ - OFFSIZE_OFFSET_MASK) >> \ - OFFSIZE_OFFSET_SHIFT) << 2)) +#define SECTION_OFFSET(_offsize) ((((_offsize & \ + OFFSIZE_OFFSET_MASK) >> \ + OFFSIZE_OFFSET_SHIFT) << 2)) -/* QED_SECTION_SIZE is calculating the size in bytes out of offsize */ -#define QED_SECTION_SIZE(_offsize) (((_offsize & \ - OFFSIZE_SIZE_MASK) >> \ - OFFSIZE_SIZE_SHIFT) << 2) +#define QED_SECTION_SIZE(_offsize) (((_offsize & \ + OFFSIZE_SIZE_MASK) >> \ + OFFSIZE_SIZE_SHIFT) << 2) -/* SECTION_ADDR returns the GRC addr of a section, given offsize and index - * within section. - */ -#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \ - SECTION_OFFSET(_offsize) + \ - (QED_SECTION_SIZE(_offsize) * idx)) +#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \ + SECTION_OFFSET(_offsize) + \ + (QED_SECTION_SIZE(_offsize) * idx)) + +#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \ + (_pub_base + offsetof(struct mcp_public_data, sections[_section])) -/* SECTION_OFFSIZE_ADDR returns the GRC addr to the offsize address. - * Use offsetof, since the OFFSETUP collide with the firmware definition - */ -#define SECTION_OFFSIZE_ADDR(_pub_base, _section) (_pub_base + \ - offsetof(struct \ - mcp_public_data, \ - sections[_section])) /* PHY configuration */ -struct pmm_phy_cfg { - u32 speed; -#define PMM_SPEED_AUTONEG 0 - - u32 pause; /* bitmask */ -#define PMM_PAUSE_NONE 0x0 -#define PMM_PAUSE_AUTONEG 0x1 -#define PMM_PAUSE_RX 0x2 -#define PMM_PAUSE_TX 0x4 - - u32 adv_speed; /* Default should be the speed_cap_mask */ - u32 loopback_mode; -#define PMM_LOOPBACK_NONE 0 -#define PMM_LOOPBACK_INT_PHY 1 -#define PMM_LOOPBACK_EXT_PHY 2 -#define PMM_LOOPBACK_EXT 3 -#define PMM_LOOPBACK_MAC 4 - - /* features */ +struct eth_phy_cfg { + u32 speed; +#define ETH_SPEED_AUTONEG 0 +#define ETH_SPEED_SMARTLINQ 0x8 + + u32 pause; +#define ETH_PAUSE_NONE 0x0 +#define ETH_PAUSE_AUTONEG 0x1 +#define ETH_PAUSE_RX 0x2 +#define ETH_PAUSE_TX 0x4 + + u32 adv_speed; + u32 loopback_mode; +#define ETH_LOOPBACK_NONE (0) +#define ETH_LOOPBACK_INT_PHY (1) +#define ETH_LOOPBACK_EXT_PHY (2) +#define ETH_LOOPBACK_EXT (3) +#define ETH_LOOPBACK_MAC (4) + u32 feature_config_flags; +#define ETH_EEE_MODE_ADV_LPI (1 << 0) }; struct port_mf_cfg { - u32 dynamic_cfg; /* device control channel */ -#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff -#define PORT_MF_CFG_OV_TAG_SHIFT 0 -#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK - - u32 reserved[1]; -}; - -/* DO NOT add new fields in the middle - * MUST be synced with struct pmm_stats_map - */ -struct pmm_stats { - u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/ - u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/ - u64 r255; - u64 r511; - u64 r1023; - u64 r1518; - u64 r1522; - u64 r2047; - u64 r4095; - u64 r9216; - u64 r16383; - u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/ - u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/ - u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/ - u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter*/ - u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter*/ - u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */ - u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter*/ - u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */ - u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */ - u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */ - u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */ - u64 t127; - u64 t255; - u64 t511; - u64 t1023; - u64 t1518; - u64 t2047; - u64 t4095; - u64 t9216; - u64 t16383; - u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */ - u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */ - u64 tlpiec; - u64 tncl; - u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */ - u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */ - u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */ - u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */ - u64 rxpok; - u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */ - u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */ - u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */ - u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */ - u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */ + u32 dynamic_cfg; +#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff +#define PORT_MF_CFG_OV_TAG_SHIFT 0 +#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK + + u32 reserved[1]; +}; + +struct eth_stats { + u64 r64; + u64 r127; + u64 r255; + u64 r511; + u64 r1023; + u64 r1518; + u64 r1522; + u64 r2047; + u64 r4095; + u64 r9216; + u64 r16383; + u64 rfcs; + u64 rxcf; + u64 rxpf; + u64 rxpp; + u64 raln; + u64 rfcr; + u64 rovr; + u64 rjbr; + u64 rund; + u64 rfrg; + u64 t64; + u64 t127; + u64 t255; + u64 t511; + u64 t1023; + u64 t1518; + u64 t2047; + u64 t4095; + u64 t9216; + u64 t16383; + u64 txpf; + u64 txpp; + u64 tlpiec; + u64 tncl; + u64 rbyte; + u64 rxuca; + u64 rxmca; + u64 rxbca; + u64 rxpok; + u64 tbyte; + u64 txuca; + u64 txmca; + u64 txbca; + u64 txcf; }; struct brb_stats { - u64 brb_truncate[8]; - u64 brb_discard[8]; + u64 brb_truncate[8]; + u64 brb_discard[8]; }; struct port_stats { - struct brb_stats brb; - struct pmm_stats pmm; + struct brb_stats brb; + struct eth_stats eth; }; -#define CMT_TEAM0 0 -#define CMT_TEAM1 1 -#define CMT_TEAM_MAX 2 - struct couple_mode_teaming { u8 port_cmt[MCP_GLOB_PORT_MAX]; -#define PORT_CMT_IN_TEAM BIT(0) +#define PORT_CMT_IN_TEAM (1 << 0) -#define PORT_CMT_PORT_ROLE BIT(1) -#define PORT_CMT_PORT_INACTIVE (0 << 1) -#define PORT_CMT_PORT_ACTIVE BIT(1) +#define PORT_CMT_PORT_ROLE (1 << 1) +#define PORT_CMT_PORT_INACTIVE (0 << 1) +#define PORT_CMT_PORT_ACTIVE (1 << 1) -#define PORT_CMT_TEAM_MASK BIT(2) -#define PORT_CMT_TEAM0 (0 << 2) -#define PORT_CMT_TEAM1 BIT(2) +#define PORT_CMT_TEAM_MASK (1 << 2) +#define PORT_CMT_TEAM0 (0 << 2) +#define PORT_CMT_TEAM1 (1 << 2) }; -/************************************** -* LLDP and DCBX HSI structures -**************************************/ -#define LLDP_CHASSIS_ID_STAT_LEN 4 -#define LLDP_PORT_ID_STAT_LEN 4 -#define DCBX_MAX_APP_PROTOCOL 32 -#define MAX_SYSTEM_LLDP_TLV_DATA 32 +#define LLDP_CHASSIS_ID_STAT_LEN 4 +#define LLDP_PORT_ID_STAT_LEN 4 +#define DCBX_MAX_APP_PROTOCOL 32 +#define MAX_SYSTEM_LLDP_TLV_DATA 32 -enum lldp_agent_e { +enum _lldp_agent { LLDP_NEAREST_BRIDGE = 0, LLDP_NEAREST_NON_TPMR_BRIDGE, LLDP_NEAREST_CUSTOMER_BRIDGE, @@ -3394,690 +6786,525 @@ enum lldp_agent_e { struct lldp_config_params_s { u32 config; -#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff -#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0 -#define LLDP_CONFIG_HOLD_MASK 0x00000f00 -#define LLDP_CONFIG_HOLD_SHIFT 8 -#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000 -#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12 -#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000 -#define LLDP_CONFIG_ENABLE_RX_SHIFT 30 -#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000 -#define LLDP_CONFIG_ENABLE_TX_SHIFT 31 - u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; - u32 local_port_id[LLDP_PORT_ID_STAT_LEN]; +#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff +#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0 +#define LLDP_CONFIG_HOLD_MASK 0x00000f00 +#define LLDP_CONFIG_HOLD_SHIFT 8 +#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000 +#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12 +#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000 +#define LLDP_CONFIG_ENABLE_RX_SHIFT 30 +#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000 +#define LLDP_CONFIG_ENABLE_TX_SHIFT 31 + u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + u32 local_port_id[LLDP_PORT_ID_STAT_LEN]; }; struct lldp_status_params_s { - u32 prefix_seq_num; - u32 status; /* TBD */ - - /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */ - u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; - - /* Holds remote Port ID TLV header, subtype and 9B of payload. */ - u32 peer_port_id[LLDP_PORT_ID_STAT_LEN]; - u32 suffix_seq_num; + u32 prefix_seq_num; + u32 status; + u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + u32 peer_port_id[LLDP_PORT_ID_STAT_LEN]; + u32 suffix_seq_num; }; struct dcbx_ets_feature { u32 flags; -#define DCBX_ETS_ENABLED_MASK 0x00000001 -#define DCBX_ETS_ENABLED_SHIFT 0 -#define DCBX_ETS_WILLING_MASK 0x00000002 -#define DCBX_ETS_WILLING_SHIFT 1 -#define DCBX_ETS_ERROR_MASK 0x00000004 -#define DCBX_ETS_ERROR_SHIFT 2 -#define DCBX_ETS_CBS_MASK 0x00000008 -#define DCBX_ETS_CBS_SHIFT 3 -#define DCBX_ETS_MAX_TCS_MASK 0x000000f0 -#define DCBX_ETS_MAX_TCS_SHIFT 4 - u32 pri_tc_tbl[1]; -#define DCBX_ISCSI_OOO_TC 4 -#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1) - u32 tc_bw_tbl[2]; - u32 tc_tsa_tbl[2]; -#define DCBX_ETS_TSA_STRICT 0 -#define DCBX_ETS_TSA_CBS 1 -#define DCBX_ETS_TSA_ETS 2 +#define DCBX_ETS_ENABLED_MASK 0x00000001 +#define DCBX_ETS_ENABLED_SHIFT 0 +#define DCBX_ETS_WILLING_MASK 0x00000002 +#define DCBX_ETS_WILLING_SHIFT 1 +#define DCBX_ETS_ERROR_MASK 0x00000004 +#define DCBX_ETS_ERROR_SHIFT 2 +#define DCBX_ETS_CBS_MASK 0x00000008 +#define DCBX_ETS_CBS_SHIFT 3 +#define DCBX_ETS_MAX_TCS_MASK 0x000000f0 +#define DCBX_ETS_MAX_TCS_SHIFT 4 +#define DCBX_ISCSI_OOO_TC_MASK 0x00000f00 +#define DCBX_ISCSI_OOO_TC_SHIFT 8 + u32 pri_tc_tbl[1]; +#define DCBX_ISCSI_OOO_TC (4) + +#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1) +#define DCBX_CEE_STRICT_PRIORITY 0xf + u32 tc_bw_tbl[2]; + u32 tc_tsa_tbl[2]; +#define DCBX_ETS_TSA_STRICT 0 +#define DCBX_ETS_TSA_CBS 1 +#define DCBX_ETS_TSA_ETS 2 }; struct dcbx_app_priority_entry { u32 entry; -#define DCBX_APP_PRI_MAP_MASK 0x000000ff -#define DCBX_APP_PRI_MAP_SHIFT 0 -#define DCBX_APP_PRI_0 0x01 -#define DCBX_APP_PRI_1 0x02 -#define DCBX_APP_PRI_2 0x04 -#define DCBX_APP_PRI_3 0x08 -#define DCBX_APP_PRI_4 0x10 -#define DCBX_APP_PRI_5 0x20 -#define DCBX_APP_PRI_6 0x40 -#define DCBX_APP_PRI_7 0x80 -#define DCBX_APP_SF_MASK 0x00000300 -#define DCBX_APP_SF_SHIFT 8 -#define DCBX_APP_SF_ETHTYPE 0 -#define DCBX_APP_SF_PORT 1 -#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 -#define DCBX_APP_PROTOCOL_ID_SHIFT 16 -}; - -/* FW structure in BE */ +#define DCBX_APP_PRI_MAP_MASK 0x000000ff +#define DCBX_APP_PRI_MAP_SHIFT 0 +#define DCBX_APP_PRI_0 0x01 +#define DCBX_APP_PRI_1 0x02 +#define DCBX_APP_PRI_2 0x04 +#define DCBX_APP_PRI_3 0x08 +#define DCBX_APP_PRI_4 0x10 +#define DCBX_APP_PRI_5 0x20 +#define DCBX_APP_PRI_6 0x40 +#define DCBX_APP_PRI_7 0x80 +#define DCBX_APP_SF_MASK 0x00000300 +#define DCBX_APP_SF_SHIFT 8 +#define DCBX_APP_SF_ETHTYPE 0 +#define DCBX_APP_SF_PORT 1 +#define DCBX_APP_SF_IEEE_MASK 0x0000f000 +#define DCBX_APP_SF_IEEE_SHIFT 12 +#define DCBX_APP_SF_IEEE_RESERVED 0 +#define DCBX_APP_SF_IEEE_ETHTYPE 1 +#define DCBX_APP_SF_IEEE_TCP_PORT 2 +#define DCBX_APP_SF_IEEE_UDP_PORT 3 +#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4 + +#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 +#define DCBX_APP_PROTOCOL_ID_SHIFT 16 +}; + struct dcbx_app_priority_feature { u32 flags; -#define DCBX_APP_ENABLED_MASK 0x00000001 -#define DCBX_APP_ENABLED_SHIFT 0 -#define DCBX_APP_WILLING_MASK 0x00000002 -#define DCBX_APP_WILLING_SHIFT 1 -#define DCBX_APP_ERROR_MASK 0x00000004 -#define DCBX_APP_ERROR_SHIFT 2 -/* Not in use - * #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00 - * #define DCBX_APP_DEFAULT_PRI_SHIFT 8 - */ -#define DCBX_APP_MAX_TCS_MASK 0x0000f000 -#define DCBX_APP_MAX_TCS_SHIFT 12 -#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000 -#define DCBX_APP_NUM_ENTRIES_SHIFT 16 +#define DCBX_APP_ENABLED_MASK 0x00000001 +#define DCBX_APP_ENABLED_SHIFT 0 +#define DCBX_APP_WILLING_MASK 0x00000002 +#define DCBX_APP_WILLING_SHIFT 1 +#define DCBX_APP_ERROR_MASK 0x00000004 +#define DCBX_APP_ERROR_SHIFT 2 +#define DCBX_APP_MAX_TCS_MASK 0x0000f000 +#define DCBX_APP_MAX_TCS_SHIFT 12 +#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000 +#define DCBX_APP_NUM_ENTRIES_SHIFT 16 struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; }; -/* FW structure in BE */ struct dcbx_features { - /* PG feature */ struct dcbx_ets_feature ets; + u32 pfc; +#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff +#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80 + +#define DCBX_PFC_FLAGS_MASK 0x0000ff00 +#define DCBX_PFC_FLAGS_SHIFT 8 +#define DCBX_PFC_CAPS_MASK 0x00000f00 +#define DCBX_PFC_CAPS_SHIFT 8 +#define DCBX_PFC_MBC_MASK 0x00004000 +#define DCBX_PFC_MBC_SHIFT 14 +#define DCBX_PFC_WILLING_MASK 0x00008000 +#define DCBX_PFC_WILLING_SHIFT 15 +#define DCBX_PFC_ENABLED_MASK 0x00010000 +#define DCBX_PFC_ENABLED_SHIFT 16 +#define DCBX_PFC_ERROR_MASK 0x00020000 +#define DCBX_PFC_ERROR_SHIFT 17 - /* PFC feature */ - u32 pfc; -#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff -#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80 - -#define DCBX_PFC_FLAGS_MASK 0x0000ff00 -#define DCBX_PFC_FLAGS_SHIFT 8 -#define DCBX_PFC_CAPS_MASK 0x00000f00 -#define DCBX_PFC_CAPS_SHIFT 8 -#define DCBX_PFC_MBC_MASK 0x00004000 -#define DCBX_PFC_MBC_SHIFT 14 -#define DCBX_PFC_WILLING_MASK 0x00008000 -#define DCBX_PFC_WILLING_SHIFT 15 -#define DCBX_PFC_ENABLED_MASK 0x00010000 -#define DCBX_PFC_ENABLED_SHIFT 16 -#define DCBX_PFC_ERROR_MASK 0x00020000 -#define DCBX_PFC_ERROR_SHIFT 17 - - /* APP feature */ struct dcbx_app_priority_feature app; }; struct dcbx_local_params { u32 config; -#define DCBX_CONFIG_VERSION_MASK 0x00000003 -#define DCBX_CONFIG_VERSION_SHIFT 0 -#define DCBX_CONFIG_VERSION_DISABLED 0 -#define DCBX_CONFIG_VERSION_IEEE 1 -#define DCBX_CONFIG_VERSION_CEE 2 +#define DCBX_CONFIG_VERSION_MASK 0x00000007 +#define DCBX_CONFIG_VERSION_SHIFT 0 +#define DCBX_CONFIG_VERSION_DISABLED 0 +#define DCBX_CONFIG_VERSION_IEEE 1 +#define DCBX_CONFIG_VERSION_CEE 2 +#define DCBX_CONFIG_VERSION_STATIC 4 - u32 flags; - struct dcbx_features features; + u32 flags; + struct dcbx_features features; }; struct dcbx_mib { - u32 prefix_seq_num; - u32 flags; - struct dcbx_features features; - u32 suffix_seq_num; + u32 prefix_seq_num; + u32 flags; + struct dcbx_features features; + u32 suffix_seq_num; }; struct lldp_system_tlvs_buffer_s { - u16 valid; - u16 length; - u32 data[MAX_SYSTEM_LLDP_TLV_DATA]; + u16 valid; + u16 length; + u32 data[MAX_SYSTEM_LLDP_TLV_DATA]; }; -/**************************************/ -/* */ -/* P U B L I C G L O B A L */ -/* */ -/**************************************/ -struct public_global { - u32 max_path; -#define MAX_PATH_BIG_BEAR 2 -#define MAX_PATH_K2 1 - u32 max_ports; -#define MODE_1P 1 -#define MODE_2P 2 -#define MODE_3P 3 -#define MODE_4P 4 - u32 debug_mb_offset; - u32 phymod_dbg_mb_offset; - struct couple_mode_teaming cmt; - s32 internal_temperature; - u32 mfw_ver; - u32 running_bundle_id; +struct dcb_dscp_map { + u32 flags; +#define DCB_DSCP_ENABLE_MASK 0x1 +#define DCB_DSCP_ENABLE_SHIFT 0 +#define DCB_DSCP_ENABLE 1 + u32 dscp_pri_map[8]; }; -/**************************************/ -/* */ -/* P U B L I C P A T H */ -/* */ -/**************************************/ +struct public_global { + u32 max_path; + u32 max_ports; + u32 debug_mb_offset; + u32 phymod_dbg_mb_offset; + struct couple_mode_teaming cmt; + s32 internal_temperature; + u32 mfw_ver; + u32 running_bundle_id; + s32 external_temperature; + u32 mdump_reason; +}; -/**************************************************************************** -* Shared Memory 2 Region * -****************************************************************************/ -/* The fw_flr_ack is actually built in the following way: */ -/* 8 bit: PF ack */ -/* 128 bit: VF ack */ -/* 8 bit: ios_dis_ack */ -/* In order to maintain endianity in the mailbox hsi, we want to keep using */ -/* u32. The fw must have the VF right after the PF since this is how it */ -/* access arrays(it expects always the VF to reside after the PF, and that */ -/* makes the calculation much easier for it. ) */ -/* In order to answer both limitations, and keep the struct small, the code */ -/* will abuse the structure defined here to achieve the actual partition */ -/* above */ -/****************************************************************************/ struct fw_flr_mb { - u32 aggint; - u32 opgen_addr; - u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */ -#define ACCUM_ACK_PF_BASE 0 -#define ACCUM_ACK_PF_SHIFT 0 - -#define ACCUM_ACK_VF_BASE 8 -#define ACCUM_ACK_VF_SHIFT 3 - -#define ACCUM_ACK_IOV_DIS_BASE 256 -#define ACCUM_ACK_IOV_DIS_SHIFT 8 + u32 aggint; + u32 opgen_addr; + u32 accum_ack; }; struct public_path { - struct fw_flr_mb flr_mb; - u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; - - u32 process_kill; -#define PROCESS_KILL_COUNTER_MASK 0x0000ffff -#define PROCESS_KILL_COUNTER_SHIFT 0 -#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000 -#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16 + struct fw_flr_mb flr_mb; + u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; + + u32 process_kill; +#define PROCESS_KILL_COUNTER_MASK 0x0000ffff +#define PROCESS_KILL_COUNTER_SHIFT 0 +#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000 +#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16 #define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit) }; -/**************************************/ -/* */ -/* P U B L I C P O R T */ -/* */ -/**************************************/ - -/**************************************************************************** -* Driver <-> FW Mailbox * -****************************************************************************/ - struct public_port { - u32 validity_map; /* 0x0 (4*2 = 0x8) */ - - /* validity bits */ -#define MCP_VALIDITY_PCI_CFG 0x00100000 -#define MCP_VALIDITY_MB 0x00200000 -#define MCP_VALIDITY_DEV_INFO 0x00400000 -#define MCP_VALIDITY_RESERVED 0x00000007 - - /* One licensing bit should be set */ -#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 -#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 -#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 -#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 - - /* Active MFW */ -#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000 -#define MCP_VALIDITY_ACTIVE_MFW_MASK 0x000001c0 -#define MCP_VALIDITY_ACTIVE_MFW_NCSI 0x00000040 -#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0 + u32 validity_map; u32 link_status; -#define LINK_STATUS_LINK_UP \ - 0x00000001 -#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e -#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1) -#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) - -#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 - -#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 -#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 - -#define LINK_STATUS_PFC_ENABLED \ - 0x00000100 -#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 -#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 -#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800 -#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000 -#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000 -#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000 -#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000 -#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000 - -#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 -#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18) -#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18) -#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18) -#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18) - -#define LINK_STATUS_SFP_TX_FAULT \ - 0x00100000 -#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000 -#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000 - - u32 link_status1; - u32 ext_phy_fw_version; - u32 drv_phy_cfg_addr; - - u32 port_stx; - - u32 stat_nig_timer; - - struct port_mf_cfg port_mf_config; - struct port_stats stats; - - u32 media_type; -#define MEDIA_UNSPECIFIED 0x0 -#define MEDIA_SFPP_10G_FIBER 0x1 -#define MEDIA_XFP_FIBER 0x2 -#define MEDIA_DA_TWINAX 0x3 -#define MEDIA_BASE_T 0x4 -#define MEDIA_SFP_1G_FIBER 0x5 -#define MEDIA_MODULE_FIBER 0x6 -#define MEDIA_KR 0xf0 -#define MEDIA_NOT_PRESENT 0xff +#define LINK_STATUS_LINK_UP 0x00000001 +#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e +#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) + +#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 + +#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 +#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 + +#define LINK_STATUS_PFC_ENABLED 0x00000100 +#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 +#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 +#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800 +#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000 +#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000 +#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000 +#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000 +#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000 + +#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 +#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18) +#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18) +#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18) +#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18) + +#define LINK_STATUS_SFP_TX_FAULT 0x00100000 +#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000 +#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000 +#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000 +#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000 +#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000 +#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000 + + u32 link_status1; + u32 ext_phy_fw_version; + u32 drv_phy_cfg_addr; + + u32 port_stx; + + u32 stat_nig_timer; + + struct port_mf_cfg port_mf_config; + struct port_stats stats; + + u32 media_type; +#define MEDIA_UNSPECIFIED 0x0 +#define MEDIA_SFPP_10G_FIBER 0x1 +#define MEDIA_XFP_FIBER 0x2 +#define MEDIA_DA_TWINAX 0x3 +#define MEDIA_BASE_T 0x4 +#define MEDIA_SFP_1G_FIBER 0x5 +#define MEDIA_MODULE_FIBER 0x6 +#define MEDIA_KR 0xf0 +#define MEDIA_NOT_PRESENT 0xff u32 lfa_status; -#define LFA_LINK_FLAP_REASON_OFFSET 0 -#define LFA_LINK_FLAP_REASON_MASK 0x000000ff -#define LFA_NO_REASON (0 << 0) -#define LFA_LINK_DOWN BIT(0) -#define LFA_FORCE_INIT BIT(1) -#define LFA_LOOPBACK_MISMATCH BIT(2) -#define LFA_SPEED_MISMATCH BIT(3) -#define LFA_FLOW_CTRL_MISMATCH BIT(4) -#define LFA_ADV_SPEED_MISMATCH BIT(5) -#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8 -#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00 -#define LINK_FLAP_COUNT_OFFSET 16 -#define LINK_FLAP_COUNT_MASK 0x00ff0000 - - u32 link_change_count; - - /* LLDP params */ - struct lldp_config_params_s lldp_config_params[ - LLDP_MAX_LLDP_AGENTS]; - struct lldp_status_params_s lldp_status_params[ - LLDP_MAX_LLDP_AGENTS]; - struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf; + u32 link_change_count; + + struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS]; + struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS]; + struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf; /* DCBX related MIB */ - struct dcbx_local_params local_admin_dcbx_mib; - struct dcbx_mib remote_dcbx_mib; - struct dcbx_mib operational_dcbx_mib; + struct dcbx_local_params local_admin_dcbx_mib; + struct dcbx_mib remote_dcbx_mib; + struct dcbx_mib operational_dcbx_mib; - u32 fc_npiv_nvram_tbl_addr; - u32 fc_npiv_nvram_tbl_size; - u32 transceiver_data; -#define PMM_TRANSCEIVER_STATE_MASK 0x000000FF -#define PMM_TRANSCEIVER_STATE_SHIFT 0x00000000 -#define PMM_TRANSCEIVER_STATE_PRESENT 0x00000001 -}; + u32 reserved[2]; + u32 transceiver_data; +#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF +#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000 +#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000 +#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001 +#define ETH_TRANSCEIVER_STATE_VALID 0x00000003 +#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008 -/**************************************/ -/* */ -/* P U B L I C F U N C */ -/* */ -/**************************************/ + u32 wol_info; + u32 wol_pkt_len; + u32 wol_pkt_details; + struct dcb_dscp_map dcb_dscp_map; +}; struct public_func { - u32 iscsi_boot_signature; - u32 iscsi_boot_block_offset; - - u32 mtu_size; - u32 c2s_pcp_map_lower; - u32 c2s_pcp_map_upper; - u32 c2s_pcp_map_default; - u32 reserved[4]; - - u32 config; - - /* E/R/I/D */ - /* function 0 of each port cannot be hidden */ -#define FUNC_MF_CFG_FUNC_HIDE 0x00000001 -#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002 -#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001 - -#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0 -#define FUNC_MF_CFG_PROTOCOL_SHIFT 4 -#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000 + u32 reserved0[2]; + + u32 mtu_size; + + u32 reserved[7]; + + u32 config; +#define FUNC_MF_CFG_FUNC_HIDE 0x00000001 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001 + +#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0 +#define FUNC_MF_CFG_PROTOCOL_SHIFT 4 +#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000 #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010 -#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020 #define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030 -#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030 +#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030 - /* MINBW, MAXBW */ - /* value range - 0..100, increments in 1 % */ -#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00 -#define FUNC_MF_CFG_MIN_BW_SHIFT 8 -#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 -#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000 -#define FUNC_MF_CFG_MAX_BW_SHIFT 16 -#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000 +#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00 +#define FUNC_MF_CFG_MIN_BW_SHIFT 8 +#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 +#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000 +#define FUNC_MF_CFG_MAX_BW_SHIFT 16 +#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000 - u32 status; -#define FUNC_STATUS_VLINK_DOWN 0x00000001 + u32 status; +#define FUNC_STATUS_VLINK_DOWN 0x00000001 - u32 mac_upper; /* MAC */ -#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff -#define FUNC_MF_CFG_UPPERMAC_SHIFT 0 -#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK - u32 mac_lower; -#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff + u32 mac_upper; +#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff +#define FUNC_MF_CFG_UPPERMAC_SHIFT 0 +#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK + u32 mac_lower; +#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff - u32 fcoe_wwn_port_name_upper; - u32 fcoe_wwn_port_name_lower; + u32 fcoe_wwn_port_name_upper; + u32 fcoe_wwn_port_name_lower; - u32 fcoe_wwn_node_name_upper; - u32 fcoe_wwn_node_name_lower; + u32 fcoe_wwn_node_name_upper; + u32 fcoe_wwn_node_name_lower; - u32 ovlan_stag; /* tags */ -#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff -#define FUNC_MF_CFG_OV_STAG_SHIFT 0 -#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK + u32 ovlan_stag; +#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff +#define FUNC_MF_CFG_OV_STAG_SHIFT 0 +#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK - u32 pf_allocation; /* vf per pf */ + u32 pf_allocation; - u32 preserve_data; /* Will be used bt CCM */ + u32 preserve_data; - u32 driver_last_activity_ts; + u32 driver_last_activity_ts; - u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */ + u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; - u32 drv_id; -#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff -#define DRV_ID_PDA_COMP_VER_SHIFT 0 + u32 drv_id; +#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff +#define DRV_ID_PDA_COMP_VER_SHIFT 0 -#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 -#define DRV_ID_MCP_HSI_VER_SHIFT 16 -#define DRV_ID_MCP_HSI_VER_CURRENT BIT(DRV_ID_MCP_HSI_VER_SHIFT) +#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 +#define DRV_ID_MCP_HSI_VER_SHIFT 16 +#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT) -#define DRV_ID_DRV_TYPE_MASK 0x7f000000 -#define DRV_ID_DRV_TYPE_SHIFT 24 -#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_MASK 0x7f000000 +#define DRV_ID_DRV_TYPE_SHIFT 24 +#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 -#define DRV_ID_DRV_INIT_HW_SHIFT 31 -#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT) +#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 +#define DRV_ID_DRV_INIT_HW_SHIFT 31 +#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT) }; -/**************************************/ -/* */ -/* P U B L I C M B */ -/* */ -/**************************************/ -/* This is the only section that the driver can write to, and each */ -/* Basically each driver request to set feature parameters, - * will be done using a different command, which will be linked - * to a specific data structure from the union below. - * For huge strucuture, the common blank structure should be used. - */ - struct mcp_mac { - u32 mac_upper; /* Upper 16 bits are always zeroes */ - u32 mac_lower; + u32 mac_upper; + u32 mac_lower; }; struct mcp_val64 { - u32 lo; - u32 hi; + u32 lo; + u32 hi; }; struct mcp_file_att { - u32 nvm_start_addr; - u32 len; + u32 nvm_start_addr; + u32 len; +}; + +struct bist_nvm_image_att { + u32 return_code; + u32 image_type; + u32 nvm_start_addr; + u32 len; }; #define MCP_DRV_VER_STR_SIZE 16 #define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32)) #define MCP_DRV_NVM_BUF_LEN 32 struct drv_version_stc { - u32 version; - u8 name[MCP_DRV_VER_STR_SIZE - 4]; + u32 version; + u8 name[MCP_DRV_VER_STR_SIZE - 4]; +}; + +struct lan_stats_stc { + u64 ucast_rx_pkts; + u64 ucast_tx_pkts; + u32 fcs_err; + u32 rserved; +}; + +struct ocbb_data_stc { + u32 ocbb_host_addr; + u32 ocsd_host_addr; + u32 ocsd_req_update_interval; +}; + +#define MAX_NUM_OF_SENSORS 7 +struct temperature_status_stc { + u32 num_of_sensors; + u32 sensor[MAX_NUM_OF_SENSORS]; +}; + +/* crash dump configuration header */ +struct mdump_config_stc { + u32 version; + u32 config; + u32 epoc; + u32 num_of_logs; + u32 valid_logs; }; union drv_union_data { - u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; - struct mcp_mac wol_mac; + u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; + struct mcp_mac wol_mac; - struct pmm_phy_cfg drv_phy_cfg; + struct eth_phy_cfg drv_phy_cfg; - struct mcp_val64 val64; /* For PHY / AVS commands */ + struct mcp_val64 val64; - u8 raw_data[MCP_DRV_NVM_BUF_LEN]; + u8 raw_data[MCP_DRV_NVM_BUF_LEN]; - struct mcp_file_att file_att; + struct mcp_file_att file_att; - u32 ack_vf_disabled[VF_MAX_STATIC / 32]; + u32 ack_vf_disabled[VF_MAX_STATIC / 32]; - struct drv_version_stc drv_version; + struct drv_version_stc drv_version; + + struct lan_stats_stc lan_stats; + u64 reserved_stats[11]; + struct ocbb_data_stc ocbb_info; + struct temperature_status_stc temp_info; + struct bist_nvm_image_att nvm_image_att; + struct mdump_config_stc mdump_config; }; struct public_drv_mb { u32 drv_mb_header; -#define DRV_MSG_CODE_MASK 0xffff0000 -#define DRV_MSG_CODE_LOAD_REQ 0x10000000 -#define DRV_MSG_CODE_LOAD_DONE 0x11000000 -#define DRV_MSG_CODE_INIT_HW 0x12000000 -#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 -#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 -#define DRV_MSG_CODE_INIT_PHY 0x22000000 - /* Params - FORCE - Reinitialize the link regardless of LFA */ - /* - DONT_CARE - Don't flap the link if up */ -#define DRV_MSG_CODE_LINK_RESET 0x23000000 - -#define DRV_MSG_CODE_SET_LLDP 0x24000000 -#define DRV_MSG_CODE_SET_DCBX 0x25000000 +#define DRV_MSG_CODE_MASK 0xffff0000 +#define DRV_MSG_CODE_LOAD_REQ 0x10000000 +#define DRV_MSG_CODE_LOAD_DONE 0x11000000 +#define DRV_MSG_CODE_INIT_HW 0x12000000 +#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 +#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 +#define DRV_MSG_CODE_INIT_PHY 0x22000000 +#define DRV_MSG_CODE_LINK_RESET 0x23000000 +#define DRV_MSG_CODE_SET_DCBX 0x25000000 + #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 -#define DRV_MSG_CODE_NIG_DRAIN 0x30000000 - -#define DRV_MSG_CODE_INITIATE_FLR 0x02000000 -#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 -#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 -#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000 -#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000 -#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000 -#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000 -#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000 -#define DRV_MSG_CODE_NVM_DEL_FILE 0x00080000 -#define DRV_MSG_CODE_MCP_RESET 0x00090000 -#define DRV_MSG_CODE_SET_SECURE_MODE 0x000a0000 -#define DRV_MSG_CODE_PHY_RAW_READ 0x000b0000 -#define DRV_MSG_CODE_PHY_RAW_WRITE 0x000c0000 -#define DRV_MSG_CODE_PHY_CORE_READ 0x000d0000 -#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000 -#define DRV_MSG_CODE_SET_VERSION 0x000f0000 - -#define DRV_MSG_CODE_BIST_TEST 0x001e0000 -#define DRV_MSG_CODE_SET_LED_MODE 0x00200000 - -#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff +#define DRV_MSG_CODE_NIG_DRAIN 0x30000000 +#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 +#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 +#define DRV_MSG_CODE_MCP_RESET 0x00090000 +#define DRV_MSG_CODE_SET_VERSION 0x000f0000 + +#define DRV_MSG_CODE_BIST_TEST 0x001e0000 +#define DRV_MSG_CODE_SET_LED_MODE 0x00200000 + +#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff u32 drv_mb_param; +#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 +#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF +#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8 +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00 +#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001 +#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0 + - /* UNLOAD_REQ params */ -#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000 -#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 -#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002 -#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003 - - /* UNLOAD_DONE_params */ -#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001 - - /* INIT_PHY params */ -#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001 -#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002 - - /* LLDP / DCBX params*/ -#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001 -#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0 -#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006 -#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1 -#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008 -#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 - -#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF -#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0 - -#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1 -#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2 - -#define DRV_MB_PARAM_NVM_OFFSET_SHIFT 0 -#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF -#define DRV_MB_PARAM_NVM_LEN_SHIFT 24 -#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000 - -#define DRV_MB_PARAM_PHY_ADDR_SHIFT 0 -#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF -#define DRV_MB_PARAM_PHY_LANE_SHIFT 16 -#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000 -#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT 29 -#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000 -#define DRV_MB_PARAM_PHY_PORT_SHIFT 30 -#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000 - -/* configure vf MSIX params*/ -#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 -#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF -#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8 -#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00 - -#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 -#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 -#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 - -#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0 -#define DRV_MB_PARAM_BIST_REGISTER_TEST 1 -#define DRV_MB_PARAM_BIST_CLOCK_TEST 2 - -#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 -#define DRV_MB_PARAM_BIST_RC_PASSED 1 -#define DRV_MB_PARAM_BIST_RC_FAILED 2 -#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3 - -#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0 -#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF +#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 +#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 +#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 + +#define DRV_MB_PARAM_BIST_REGISTER_TEST 1 +#define DRV_MB_PARAM_BIST_CLOCK_TEST 2 + +#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 +#define DRV_MB_PARAM_BIST_RC_PASSED 1 +#define DRV_MB_PARAM_BIST_RC_FAILED 2 +#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3 + +#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0 +#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF u32 fw_mb_header; -#define FW_MSG_CODE_MASK 0xffff0000 -#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000 -#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 -#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000 -#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 -#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000 -#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000 -#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000 -#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 -#define FW_MSG_CODE_INIT_PHY_DONE 0x21200000 -#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS 0x21300000 -#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000 -#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000 -#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000 -#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000 -#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000 -#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 -#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000 -#define FW_MSG_CODE_FLR_ACK 0x02000000 -#define FW_MSG_CODE_FLR_NACK 0x02100000 - -#define FW_MSG_CODE_NVM_OK 0x00010000 -#define FW_MSG_CODE_NVM_INVALID_MODE 0x00020000 -#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED 0x00030000 -#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000 -#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND 0x00050000 -#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND 0x00060000 -#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000 -#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000 -#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC 0x00090000 -#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR 0x000a0000 -#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE 0x000b0000 -#define FW_MSG_CODE_NVM_FILE_NOT_FOUND 0x000c0000 -#define FW_MSG_CODE_NVM_OPERATION_FAILED 0x000d0000 -#define FW_MSG_CODE_NVM_FAILED_UNALIGNED 0x000e0000 -#define FW_MSG_CODE_NVM_BAD_OFFSET 0x000f0000 -#define FW_MSG_CODE_NVM_BAD_SIGNATURE 0x00100000 -#define FW_MSG_CODE_NVM_FILE_READ_ONLY 0x00200000 -#define FW_MSG_CODE_NVM_UNKNOWN_FILE 0x00300000 -#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000 -#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000 -#define FW_MSG_CODE_PHY_OK 0x00110000 -#define FW_MSG_CODE_PHY_ERROR 0x00120000 -#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000 -#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000 -#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000 -#define FW_MSG_CODE_OK 0x00160000 - -#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff - - u32 fw_mb_param; - - u32 drv_pulse_mb; -#define DRV_PULSE_SEQ_MASK 0x00007fff -#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 -#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 +#define FW_MSG_CODE_MASK 0xffff0000 +#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000 +#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 +#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000 +#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 +#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000 +#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000 +#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000 +#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 +#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000 +#define FW_MSG_CODE_OK 0x00160000 + +#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff + + u32 fw_mb_param; + + u32 drv_pulse_mb; +#define DRV_PULSE_SEQ_MASK 0x00007fff +#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 +#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 + u32 mcp_pulse_mb; -#define MCP_PULSE_SEQ_MASK 0x00007fff -#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 -#define MCP_EVENT_MASK 0xffff0000 -#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 +#define MCP_PULSE_SEQ_MASK 0x00007fff +#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 +#define MCP_EVENT_MASK 0xffff0000 +#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 union drv_union_data union_data; }; -/* MFW - DRV MB */ -/********************************************************************** -* Description -* Incremental Aggregative -* 8-bit MFW counter per message -* 8-bit ack-counter per message -* Capabilities -* Provides up to 256 aggregative message per type -* Provides 4 message types in dword -* Message type pointers to byte offset -* Backward Compatibility by using sizeof for the counters. -* No lock requires for 32bit messages -* Limitations: -* In case of messages greater than 32bit, a dedicated mechanism(e.g lock) -* is required to prevent data corruption. -**********************************************************************/ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_LINK_CHANGE, MFW_DRV_MSG_FLR_FW_ACK_FAILED, @@ -4085,37 +7312,33 @@ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_LLDP_DATA_UPDATED, MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, - MFW_DRV_MSG_ERROR_RECOVERY, + MFW_DRV_MSG_RESERVED4, MFW_DRV_MSG_BW_UPDATE, - MFW_DRV_MSG_S_TAG_UPDATE, - MFW_DRV_MSG_GET_LAN_STATS, - MFW_DRV_MSG_GET_FCOE_STATS, - MFW_DRV_MSG_GET_ISCSI_STATS, - MFW_DRV_MSG_GET_RDMA_STATS, - MFW_DRV_MSG_FAILURE_DETECTED, + MFW_DRV_MSG_BW_UPDATE5, + MFW_DRV_MSG_BW_UPDATE6, + MFW_DRV_MSG_BW_UPDATE7, + MFW_DRV_MSG_BW_UPDATE8, + MFW_DRV_MSG_BW_UPDATE9, + MFW_DRV_MSG_BW_UPDATE10, MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, + MFW_DRV_MSG_BW_UPDATE11, MFW_DRV_MSG_MAX }; -#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1) -#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2) -#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3) -#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id)) +#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1) +#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2) +#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3) +#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id)) struct public_mfw_mb { - u32 sup_msgs; - u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; - u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; + u32 sup_msgs; + u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; + u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; }; -/**************************************/ -/* */ -/* P U B L I C D A T A */ -/* */ -/**************************************/ enum public_sections { - PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */ - PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */ + PUBLIC_DRV_MB, + PUBLIC_MFW_MB, PUBLIC_GLOBAL, PUBLIC_PATH, PUBLIC_PORT, @@ -4123,1080 +7346,179 @@ enum public_sections { PUBLIC_MAX_SECTIONS }; -struct drv_ver_info_stc { - u32 ver; - u8 name[32]; -}; - struct mcp_public_data { - /* The sections fields is an array */ - u32 num_sections; - offsize_t sections[PUBLIC_MAX_SECTIONS]; - struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX]; - struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX]; - struct public_global global; - struct public_path path[MCP_GLOB_PATH_MAX]; - struct public_port port[MCP_GLOB_PORT_MAX]; - struct public_func func[MCP_GLOB_FUNC_MAX]; - struct drv_ver_info_stc drv_info; + u32 num_sections; + u32 sections[PUBLIC_MAX_SECTIONS]; + struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX]; + struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX]; + struct public_global global; + struct public_path path[MCP_GLOB_PATH_MAX]; + struct public_port port[MCP_GLOB_PORT_MAX]; + struct public_func func[MCP_GLOB_FUNC_MAX]; }; struct nvm_cfg_mac_address { - u32 mac_addr_hi; -#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF -#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0 - - u32 mac_addr_lo; + u32 mac_addr_hi; +#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF +#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0 + u32 mac_addr_lo; }; -/****************************************** -* nvm_cfg1 structs -******************************************/ - struct nvm_cfg1_glob { - u32 generic_cont0; /* 0x0 */ -#define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F -#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0 -#define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0 -#define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1 -#define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2 -#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3 -#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0 -#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 -#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 -#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 -#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 -#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 -#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 -#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5 -#define NVM_CFG1_GLOB_MF_MODE_BD 0x6 -#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7 -#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000 -#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12 -#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0 -#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1 -#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000 -#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13 -#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000 -#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21 -#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000 -#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29 -#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0 -#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1 -#define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000 -#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30 -#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0 -#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1 -#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000 -#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31 -#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0 -#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1 - - u32 engineering_change[3]; /* 0x4 */ - - u32 manufacturing_id; /* 0x10 */ - - u32 serial_number[4]; /* 0x14 */ - - u32 pcie_cfg; /* 0x24 */ -#define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003 -#define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0 -#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0 -#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1 -#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2 -#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004 -#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2 -#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0 -#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1 -#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018 -#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3 -#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0 -#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_DISABLED 0x1 -#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2 -#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_DISABLED 0x3 -#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_MASK 0x00000020 -#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_OFFSET 5 -#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_DISABLED 0x0 -#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_ENABLED 0x1 -#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0 -#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6 -#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00 -#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10 -#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0 -#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1 -#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2 -#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3 -#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000 -#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13 -#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000 -#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21 -#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000 -#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29 - - u32 mgmt_traffic; /* 0x28 */ -#define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001 -#define NVM_CFG1_GLOB_RESERVED60_OFFSET 0 -#define NVM_CFG1_GLOB_RESERVED60_100KHZ 0x0 -#define NVM_CFG1_GLOB_RESERVED60_400KHZ 0x1 -#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE -#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1 -#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00 -#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9 -#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000 -#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17 -#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000 -#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25 -#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0 -#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1 -#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2 - - u32 core_cfg; /* 0x2C */ -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G 0x0 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G 0x1 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G 0x2 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F 0x3 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E 0x4 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G 0x5 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G 0xB -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G 0xC -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G 0xD -#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK 0x00000100 -#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET 8 -#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED 0x0 -#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED 0x1 -#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK 0x00000200 -#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET 9 -#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED 0x0 -#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED 0x1 -#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK 0x0003FC00 -#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET 10 -#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK 0x03FC0000 -#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET 18 -#define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000 -#define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26 -#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0 -#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP 0x1 -#define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3 -#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000 -#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29 -#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0 -#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1 - - u32 e_lane_cfg1; /* 0x30 */ -#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F -#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0 -#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0 -#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4 -#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00 -#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8 -#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000 -#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12 -#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000 -#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16 -#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000 -#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20 -#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000 -#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24 -#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000 -#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28 - - u32 e_lane_cfg2; /* 0x34 */ -#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001 -#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0 -#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002 -#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1 -#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004 -#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2 -#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008 -#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3 -#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010 -#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4 -#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020 -#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5 -#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040 -#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6 -#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080 -#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7 -#define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00 -#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8 -#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0 -#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1 -#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2 -#define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000 -#define NVM_CFG1_GLOB_NCSI_OFFSET 12 -#define NVM_CFG1_GLOB_NCSI_DISABLED 0x0 -#define NVM_CFG1_GLOB_NCSI_ENABLED 0x1 - - u32 f_lane_cfg1; /* 0x38 */ -#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F -#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0 -#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0 -#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4 -#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00 -#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8 -#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000 -#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12 -#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000 -#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16 -#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000 -#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20 -#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000 -#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24 -#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000 -#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28 - - u32 f_lane_cfg2; /* 0x3C */ -#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001 -#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0 -#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002 -#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1 -#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004 -#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2 -#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008 -#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3 -#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010 -#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4 -#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020 -#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5 -#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040 -#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6 -#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080 -#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7 - - u32 eagle_preemphasis; /* 0x40 */ -#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF -#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0 -#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8 -#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16 -#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000 -#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24 - - u32 eagle_driver_current; /* 0x44 */ -#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF -#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0 -#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8 -#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16 -#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000 -#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24 - - u32 falcon_preemphasis; /* 0x48 */ -#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF -#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0 -#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8 -#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16 -#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000 -#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24 - - u32 falcon_driver_current; /* 0x4C */ -#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF -#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0 -#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8 -#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16 -#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000 -#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24 - - u32 pci_id; /* 0x50 */ -#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF -#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0 - - u32 pci_subsys_id; /* 0x54 */ -#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF -#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0 -#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000 -#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16 - - u32 bar; /* 0x58 */ -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF -#define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00 -#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8 -#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0 -#define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1 -#define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2 -#define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3 -#define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4 -#define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5 -#define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6 -#define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7 -#define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8 -#define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9 -#define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA -#define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB -#define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC -#define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD -#define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE -#define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF - - u32 eagle_txfir_main; /* 0x5C */ -#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF -#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0 -#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8 -#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16 -#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000 -#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24 - - u32 eagle_txfir_post; /* 0x60 */ -#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF -#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0 -#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8 -#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16 -#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000 -#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24 - - u32 falcon_txfir_main; /* 0x64 */ -#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF -#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0 -#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8 -#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16 -#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000 -#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24 - - u32 falcon_txfir_post; /* 0x68 */ -#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF -#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0 -#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8 -#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16 -#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000 -#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24 - - u32 manufacture_ver; /* 0x6C */ -#define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F -#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0 -#define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0 -#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6 -#define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000 -#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12 -#define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000 -#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18 -#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000 -#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24 - - u32 manufacture_time; /* 0x70 */ -#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F -#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0 -#define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0 -#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6 -#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000 -#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12 - - u32 led_global_settings; /* 0x74 */ -#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F -#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0 -#define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0 -#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4 -#define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00 -#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8 -#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000 -#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12 - - u32 generic_cont1; /* 0x78 */ -#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF -#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0 - - u32 mbi_version; /* 0x7C */ -#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF -#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0 -#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8 -#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16 - - u32 mbi_date; /* 0x80 */ - - u32 misc_sig; /* 0x84 */ - - /* Define the GPIO mapping to switch i2c mux */ -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20 - u32 device_capabilities; /* 0x88 */ -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 - u32 power_dissipated; /* 0x8C */ - u32 power_consumed; /* 0x90 */ - u32 efi_version; /* 0x94 */ - u32 reserved[42]; /* 0x98 */ + u32 generic_cont0; +#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0 +#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 +#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 +#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 +#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 +#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5 +#define NVM_CFG1_GLOB_MF_MODE_BD 0x6 +#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7 + u32 engineering_change[3]; + u32 manufacturing_id; + u32 serial_number[4]; + u32 pcie_cfg; + u32 mgmt_traffic; + u32 core_cfg; +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xB +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE + u32 e_lane_cfg1; + u32 e_lane_cfg2; + u32 f_lane_cfg1; + u32 f_lane_cfg2; + u32 mps10_preemphasis; + u32 mps10_driver_current; + u32 mps25_preemphasis; + u32 mps25_driver_current; + u32 pci_id; + u32 pci_subsys_id; + u32 bar; + u32 mps10_txfir_main; + u32 mps10_txfir_post; + u32 mps25_txfir_main; + u32 mps25_txfir_post; + u32 manufacture_ver; + u32 manufacture_time; + u32 led_global_settings; + u32 generic_cont1; + u32 mbi_version; + u32 mbi_date; + u32 misc_sig; + u32 device_capabilities; +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8 + u32 power_dissipated; + u32 power_consumed; + u32 efi_version; + u32 multi_network_modes_capability; + u32 reserved[41]; }; struct nvm_cfg1_path { - u32 reserved[30]; /* 0x0 */ + u32 reserved[30]; }; struct nvm_cfg1_port { - u32 reserved__m_relocated_to_option_123; /* 0x0 */ - u32 reserved__m_relocated_to_option_124; /* 0x4 */ - u32 generic_cont0; /* 0x8 */ -#define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF -#define NVM_CFG1_PORT_LED_MODE_OFFSET 0 -#define NVM_CFG1_PORT_LED_MODE_MAC1 0x0 -#define NVM_CFG1_PORT_LED_MODE_PHY1 0x1 -#define NVM_CFG1_PORT_LED_MODE_PHY2 0x2 -#define NVM_CFG1_PORT_LED_MODE_PHY3 0x3 -#define NVM_CFG1_PORT_LED_MODE_MAC2 0x4 -#define NVM_CFG1_PORT_LED_MODE_PHY4 0x5 -#define NVM_CFG1_PORT_LED_MODE_PHY5 0x6 -#define NVM_CFG1_PORT_LED_MODE_PHY6 0x7 -#define NVM_CFG1_PORT_LED_MODE_MAC3 0x8 -#define NVM_CFG1_PORT_LED_MODE_PHY7 0x9 -#define NVM_CFG1_PORT_LED_MODE_PHY8 0xA -#define NVM_CFG1_PORT_LED_MODE_PHY9 0xB -#define NVM_CFG1_PORT_LED_MODE_MAC4 0xC -#define NVM_CFG1_PORT_LED_MODE_PHY10 0xD -#define NVM_CFG1_PORT_LED_MODE_PHY11 0xE -#define NVM_CFG1_PORT_LED_MODE_PHY12 0xF -#define NVM_CFG1_PORT_ROCE_PRIORITY_MASK 0x0000FF00 -#define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET 8 -#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000 -#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16 -#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0 -#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 -#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 -#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 - u32 pcie_cfg; /* 0xC */ -#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007 -#define NVM_CFG1_PORT_RESERVED15_OFFSET 0 - - u32 features; /* 0x10 */ -#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001 -#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0 -#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0 -#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1 -#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002 -#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1 -#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0 -#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1 - - u32 speed_cap_mask; /* 0x14 */ -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G 0x40 -#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000 -#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16 -#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1 -#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2 -#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8 -#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10 -#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20 -#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G 0x40 - - u32 link_settings; /* 0x18 */ -#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F -#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G 0x7 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G 0x7 -#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800 -#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11 -#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1 -#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2 -#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4 -#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK 0x00004000 -#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14 -#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED 0x0 -#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED 0x1 - - u32 phy_cfg; /* 0x1C */ -#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF -#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0 -#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1 -#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2 -#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4 -#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8 -#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31 -#define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000 -#define NVM_CFG1_PORT_AN_MODE_OFFSET 24 -#define NVM_CFG1_PORT_AN_MODE_NONE 0x0 -#define NVM_CFG1_PORT_AN_MODE_CL73 0x1 -#define NVM_CFG1_PORT_AN_MODE_CL37 0x2 -#define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3 -#define NVM_CFG1_PORT_AN_MODE_CL37_BAM 0x4 -#define NVM_CFG1_PORT_AN_MODE_HPAM 0x5 -#define NVM_CFG1_PORT_AN_MODE_SGMII 0x6 - - u32 mgmt_traffic; /* 0x20 */ -#define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F -#define NVM_CFG1_PORT_RESERVED61_OFFSET 0 - - u32 ext_phy; /* 0x24 */ -#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF -#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0 -#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0 -#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1 -#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00 -#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8 - - u32 mba_cfg1; /* 0x28 */ -#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001 -#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0 -#define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0 -#define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1 -#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006 -#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1 -#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078 -#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3 -#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080 -#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7 -#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0 -#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1 -#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100 -#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8 -#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0 -#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1 -#define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00 -#define NVM_CFG1_PORT_RESERVED5_OFFSET 9 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_100G 0x7 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8 -#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000 -#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21 - - u32 mba_cfg2; /* 0x2C */ -#define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF -#define NVM_CFG1_PORT_RESERVED65_OFFSET 0 -#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000 -#define NVM_CFG1_PORT_RESERVED66_OFFSET 16 - - u32 vf_cfg; /* 0x30 */ -#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF -#define NVM_CFG1_PORT_RESERVED8_OFFSET 0 -#define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000 -#define NVM_CFG1_PORT_RESERVED6_OFFSET 16 - - struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */ - - u32 led_port_settings; /* 0x3C */ -#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF -#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0 -#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00 -#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8 -#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000 -#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16 -#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1 -#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2 -#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8 -#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10 -#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20 -#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G 0x40 - - u32 transceiver_00; /* 0x40 */ - - /* Define for mapping of transceiver signal module absent */ -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20 - /* Define the GPIO mux settings to switch i2c mux to this port */ -#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00 -#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8 -#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000 -#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12 - - u32 reserved[133]; /* 0x44 */ + u32 reserved__m_relocated_to_option_123; + u32 reserved__m_relocated_to_option_124; + u32 generic_cont0; +#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000 +#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16 +#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0 +#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 +#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 +#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4 + u32 pcie_cfg; + u32 features; + u32 speed_cap_mask; +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40 + u32 link_settings; +#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F +#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 + u32 phy_cfg; + u32 mgmt_traffic; + u32 ext_phy; + u32 mba_cfg1; + u32 mba_cfg2; + u32 vf_cfg; + struct nvm_cfg_mac_address lldp_mac_address; + u32 led_port_settings; + u32 transceiver_00; + u32 device_ids; + u32 board_cfg; + u32 mnm_10g_cap; + u32 mnm_10g_ctrl; + u32 mnm_10g_misc; + u32 mnm_25g_cap; + u32 mnm_25g_ctrl; + u32 mnm_25g_misc; + u32 mnm_40g_cap; + u32 mnm_40g_ctrl; + u32 mnm_40g_misc; + u32 mnm_50g_cap; + u32 mnm_50g_ctrl; + u32 mnm_50g_misc; + u32 mnm_100g_cap; + u32 mnm_100g_ctrl; + u32 mnm_100g_misc; + u32 reserved[116]; }; struct nvm_cfg1_func { - struct nvm_cfg_mac_address mac_address; /* 0x0 */ - - u32 rsrv1; /* 0x8 */ -#define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF -#define NVM_CFG1_FUNC_RESERVED1_OFFSET 0 -#define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000 -#define NVM_CFG1_FUNC_RESERVED2_OFFSET 16 - - u32 rsrv2; /* 0xC */ -#define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF -#define NVM_CFG1_FUNC_RESERVED3_OFFSET 0 -#define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000 -#define NVM_CFG1_FUNC_RESERVED4_OFFSET 16 - - u32 device_id; /* 0x10 */ -#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF -#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0 -#define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000 -#define NVM_CFG1_FUNC_RESERVED77_OFFSET 16 - - u32 cmn_cfg; /* 0x14 */ -#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007 -#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0 -#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0 -#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_ISCSI_BOOT 0x3 -#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_FCOE_BOOT 0x4 -#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7 -#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8 -#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3 -#define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000 -#define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19 -#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0 -#define NVM_CFG1_FUNC_PERSONALITY_ISCSI 0x1 -#define NVM_CFG1_FUNC_PERSONALITY_FCOE 0x2 -#define NVM_CFG1_FUNC_PERSONALITY_ROCE 0x3 -#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000 -#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23 -#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000 -#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31 -#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0 -#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1 - - u32 pci_cfg; /* 0x18 */ -#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F -#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0 -#define NVM_CFG1_FUNC_RESERVESD12_MASK 0x00003F80 -#define NVM_CFG1_FUNC_RESERVESD12_OFFSET 7 -#define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000 -#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14 -#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0 -#define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1 -#define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2 -#define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3 -#define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4 -#define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5 -#define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6 -#define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7 -#define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8 -#define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9 -#define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA -#define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB -#define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC -#define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD -#define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE -#define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF -#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000 -#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18 - - struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */ - - struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */ - u32 preboot_generic_cfg; /* 0x2C */ - u32 reserved[8]; /* 0x30 */ + struct nvm_cfg_mac_address mac_address; + u32 rsrv1; + u32 rsrv2; + u32 device_id; + u32 cmn_cfg; + u32 pci_cfg; + struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; + struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; + u32 preboot_generic_cfg; + u32 reserved[8]; }; struct nvm_cfg1 { - struct nvm_cfg1_glob glob; /* 0x0 */ - - struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */ - - struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */ - - struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */ -}; - -/****************************************** -* nvm_cfg structs -******************************************/ - -enum nvm_cfg_sections { - NVM_CFG_SECTION_NVM_CFG1, - NVM_CFG_SECTION_MAX -}; - -struct nvm_cfg { - u32 num_sections; - u32 sections_offset[NVM_CFG_SECTION_MAX]; - struct nvm_cfg1 cfg1; -}; - -#define PORT_0 0 -#define PORT_1 1 -#define PORT_2 2 -#define PORT_3 3 - -extern struct spad_layout g_spad; - -#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */ - -#define SPAD_OFFSET(addr) (((u32)addr - (u32)CPU_SPAD_BASE)) - -#define TO_OFFSIZE(_offset, _size) \ - (u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_SHIFT) | \ - (((u32)(_size) >> 2) << OFFSIZE_SIZE_SHIFT)) - -enum spad_sections { - SPAD_SECTION_TRACE, - SPAD_SECTION_NVM_CFG, - SPAD_SECTION_PUBLIC, - SPAD_SECTION_PRIVATE, - SPAD_SECTION_MAX -}; - -struct spad_layout { - struct nvm_cfg nvm_cfg; - struct mcp_public_data public_data; -}; - -#define CRC_MAGIC_VALUE 0xDEBB20E3 -#define CRC32_POLYNOMIAL 0xEDB88320 -#define NVM_CRC_SIZE (sizeof(u32)) - -enum nvm_sw_arbitrator { - NVM_SW_ARB_HOST, - NVM_SW_ARB_MCP, - NVM_SW_ARB_UART, - NVM_SW_ARB_RESERVED + struct nvm_cfg1_glob glob; + struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; + struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; + struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; }; - -/**************************************************************************** -* Boot Strap Region * -****************************************************************************/ -struct legacy_bootstrap_region { - u32 magic_value; -#define NVM_MAGIC_VALUE 0x669955aa - u32 sram_start_addr; - u32 code_len; /* boot code length (in dwords) */ - u32 code_start_addr; - u32 crc; /* 32-bit CRC */ -}; - -/**************************************************************************** -* Directories Region * -****************************************************************************/ -struct nvm_code_entry { - u32 image_type; /* Image type */ - u32 nvm_start_addr; /* NVM address of the image */ - u32 len; /* Include CRC */ - u32 sram_start_addr; - u32 sram_run_addr; /* Relevant in case of MIM only */ -}; - -enum nvm_image_type { - NVM_TYPE_TIM1 = 0x01, - NVM_TYPE_TIM2 = 0x02, - NVM_TYPE_MIM1 = 0x03, - NVM_TYPE_MIM2 = 0x04, - NVM_TYPE_MBA = 0x05, - NVM_TYPE_MODULES_PN = 0x06, - NVM_TYPE_VPD = 0x07, - NVM_TYPE_MFW_TRACE1 = 0x08, - NVM_TYPE_MFW_TRACE2 = 0x09, - NVM_TYPE_NVM_CFG1 = 0x0a, - NVM_TYPE_L2B = 0x0b, - NVM_TYPE_DIR1 = 0x0c, - NVM_TYPE_EAGLE_FW1 = 0x0d, - NVM_TYPE_FALCON_FW1 = 0x0e, - NVM_TYPE_PCIE_FW1 = 0x0f, - NVM_TYPE_HW_SET = 0x10, - NVM_TYPE_LIM = 0x11, - NVM_TYPE_AVS_FW1 = 0x12, - NVM_TYPE_DIR2 = 0x13, - NVM_TYPE_CCM = 0x14, - NVM_TYPE_EAGLE_FW2 = 0x15, - NVM_TYPE_FALCON_FW2 = 0x16, - NVM_TYPE_PCIE_FW2 = 0x17, - NVM_TYPE_AVS_FW2 = 0x18, - - NVM_TYPE_MAX, -}; - -#define MAX_NVM_DIR_ENTRIES 200 - -struct nvm_dir { - s32 seq; -#define NVM_DIR_NEXT_MFW_MASK 0x00000001 -#define NVM_DIR_SEQ_MASK 0xfffffffe -#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK) - -#define IS_DIR_SEQ_VALID(seq) ((seq & NVM_DIR_SEQ_MASK) != NVM_DIR_SEQ_MASK) - - u32 num_images; - u32 rsrv; - struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */ -}; - -#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \ - (_num_images - \ - 1) * sizeof(struct nvm_code_entry) + \ - NVM_CRC_SIZE) - -struct nvm_vpd_image { - u32 format_revision; -#define VPD_IMAGE_VERSION 1 - - /* This array length depends on the number of VPD fields */ - u8 vpd_data[1]; -}; - -/**************************************************************************** -* NVRAM FULL MAP * -****************************************************************************/ -#define DIR_ID_1 (0) -#define DIR_ID_2 (1) -#define MAX_DIR_IDS (2) - -#define MFW_BUNDLE_1 (0) -#define MFW_BUNDLE_2 (1) -#define MAX_MFW_BUNDLES (2) - -#define FLASH_PAGE_SIZE 0x1000 -#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE) /* 4Kb */ -#define ASIC_MIM_MAX_SIZE (300 * FLASH_PAGE_SIZE) /* 1.2Mb */ -#define FPGA_MIM_MAX_SIZE (25 * FLASH_PAGE_SIZE) /* 60Kb */ - -#define LIM_MAX_SIZE ((2 * \ - FLASH_PAGE_SIZE) - \ - sizeof(struct legacy_bootstrap_region) - \ - NVM_RSV_SIZE) -#define LIM_OFFSET (NVM_OFFSET(lim_image)) -#define NVM_RSV_SIZE (44) -#define MIM_MAX_SIZE(is_asic) ((is_asic) ? ASIC_MIM_MAX_SIZE : \ - FPGA_MIM_MAX_SIZE) -#define MIM_OFFSET(idx, is_asic) (NVM_OFFSET(dir[MAX_MFW_BUNDLES]) + \ - ((idx == \ - NVM_TYPE_MIM2) ? MIM_MAX_SIZE(is_asic) : 0)) -#define NVM_FIXED_AREA_SIZE(is_asic) (sizeof(struct nvm_image) + \ - MIM_MAX_SIZE(is_asic) * 2) - -union nvm_dir_union { - struct nvm_dir dir; - u8 page[FLASH_PAGE_SIZE]; -}; - -/* Address - * +-------------------+ 0x000000 - * | Bootstrap: | - * | magic_number | - * | sram_start_addr | - * | code_len | - * | code_start_addr | - * | crc | - * +-------------------+ 0x000014 - * | rsrv | - * +-------------------+ 0x000040 - * | LIM | - * +-------------------+ 0x002000 - * | Dir1 | - * +-------------------+ 0x003000 - * | Dir2 | - * +-------------------+ 0x004000 - * | MIM1 | - * +-------------------+ 0x130000 - * | MIM2 | - * +-------------------+ 0x25C000 - * | Rest Images: | - * | TIM1/2 | - * | MFW_TRACE1/2 | - * | Eagle/Falcon FW | - * | PCIE/AVS FW | - * | MBA/CCM/L2B | - * | VPD | - * | optic_modules | - * | ... | - * +-------------------+ 0x400000 - */ -struct nvm_image { -/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/ - /* NVM Offset (size) */ - struct legacy_bootstrap_region bootstrap; - u8 rsrv[NVM_RSV_SIZE]; - u8 lim_image[LIM_MAX_SIZE]; - union nvm_dir_union dir[MAX_MFW_BUNDLES]; - - /* MIM1_IMAGE 0x004000 (0x12c000) */ - /* MIM2_IMAGE 0x130000 (0x12c000) */ -/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/ -}; /* 0x134 */ - -#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->f)))) - -struct hw_set_info { - u32 reg_type; -#define GRC_REG_TYPE 1 -#define PHY_REG_TYPE 2 -#define PCI_REG_TYPE 4 - - u32 bank_num; - u32 pf_num; - u32 operation; -#define READ_OP 1 -#define WRITE_OP 2 -#define RMW_SET_OP 3 -#define RMW_CLR_OP 4 - - u32 reg_addr; - u32 reg_data; - - u32 reset_type; -#define POR_RESET_TYPE BIT(0) -#define HARD_RESET_TYPE BIT(1) -#define CORE_RESET_TYPE BIT(2) -#define MCP_RESET_TYPE BIT(3) -#define PERSET_ASSERT BIT(4) -#define PERSET_DEASSERT BIT(5) -}; - -struct hw_set_image { - u32 format_version; -#define HW_SET_IMAGE_VERSION 1 - u32 no_hw_sets; - - /* This array length depends on the no_hw_sets */ - struct hw_set_info hw_sets[1]; -}; - -int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u8 pf_id, u16 pf_wfq); -int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 0ada7fdb9..e17885321 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -446,7 +446,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn, idx_cmd, le32_to_cpu(command->opcode), le16_to_cpu(command->opcode_b), - le16_to_cpu(command->length), + le16_to_cpu(command->length_dw), le32_to_cpu(command->src_addr_hi), le32_to_cpu(command->src_addr_lo), le32_to_cpu(command->dst_addr_hi), @@ -461,7 +461,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn, idx_cmd, le32_to_cpu(command->opcode), le16_to_cpu(command->opcode_b), - le16_to_cpu(command->length), + le16_to_cpu(command->length_dw), le32_to_cpu(command->src_addr_hi), le32_to_cpu(command->src_addr_lo), le32_to_cpu(command->dst_addr_hi), @@ -645,7 +645,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, return -EINVAL; } - cmd->length = cpu_to_le16((u16)length); + cmd->length_dw = cpu_to_le16((u16)length); qed_dmae_post_command(p_hwfn, p_ptt); @@ -768,6 +768,29 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, return rc; } +int +qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr, + dma_addr_t dest_addr, u32 size_in_dwords, u32 flags) +{ + u32 grc_addr_in_dw = grc_addr / sizeof(u32); + struct qed_dmae_params params; + int rc; + + memset(¶ms, 0, sizeof(struct qed_dmae_params)); + params.flags = flags; + + mutex_lock(&p_hwfn->dmae_info.mutex); + + rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw, + dest_addr, QED_DMAE_ADDRESS_GRC, + QED_DMAE_ADDRESS_HOST_VIRT, + size_in_dwords, ¶ms); + + mutex_unlock(&p_hwfn->dmae_info.mutex); + + return rc; +} + int qed_dmae_host2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -791,16 +814,16 @@ qed_dmae_host2host(struct qed_hwfn *p_hwfn, } u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, - enum protocol_type proto, - union qed_qm_pq_params *p_params) + enum protocol_type proto, union qed_qm_pq_params *p_params) { u16 pq_id = 0; - if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) && - !p_params) { + if ((proto == PROTOCOLID_CORE || + proto == PROTOCOLID_ETH || + proto == PROTOCOLID_ISCSI || + proto == PROTOCOLID_ROCE) && !p_params) { DP_NOTICE(p_hwfn, - "Protocol %d received NULL PQ params\n", - proto); + "Protocol %d received NULL PQ params\n", proto); return 0; } @@ -808,6 +831,8 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, case PROTOCOLID_CORE: if (p_params->core.tc == LB_TC) pq_id = p_hwfn->qm_info.pure_lb_pq; + else if (p_params->core.tc == OOO_LB_TC) + pq_id = p_hwfn->qm_info.ooo_pq; else pq_id = p_hwfn->qm_info.offload_pq; break; @@ -817,6 +842,18 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, pq_id += p_hwfn->qm_info.vf_queues_offset + p_params->eth.vf_id; break; + case PROTOCOLID_ISCSI: + if (p_params->iscsi.q_idx == 1) + pq_id = p_hwfn->qm_info.pure_ack_pq; + break; + case PROTOCOLID_ROCE: + if (p_params->roce.dcqcn) + pq_id = p_params->roce.qpid; + else + pq_id = p_hwfn->qm_info.offload_pq; + if (pq_id > p_hwfn->qm_info.num_pf_rls) + pq_id = p_hwfn->qm_info.offload_pq; + break; default: pq_id = 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index 4367363ad..d01557092 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -253,6 +253,10 @@ int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn); void qed_dmae_info_free(struct qed_hwfn *p_hwfn); union qed_qm_pq_params { + struct { + u8 q_idx; + } iscsi; + struct { u8 tc; } core; @@ -262,11 +266,15 @@ union qed_qm_pq_params { u8 vf_id; u8 tc; } eth; + + struct { + u8 dcqcn; + u8 qpid; /* roce relative */ + } roce; }; u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, - enum protocol_type proto, - union qed_qm_pq_params *params); + enum protocol_type proto, union qed_qm_pq_params *params); int qed_init_fw_data(struct qed_dev *cdev, const u8 *fw_data); diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index e8a3b9da5..23e455f22 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -31,7 +31,6 @@ enum cminterface { }; /* general constants */ -#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */ #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \ QM_PQ_ELEMENT_SIZE, \ 0x1000) : 0) @@ -44,28 +43,28 @@ enum cminterface { /* other PQ constants */ #define QM_OTHER_PQS_PER_PF 4 /* WFQ constants */ -#define QM_WFQ_UPPER_BOUND 6250000 +#define QM_WFQ_UPPER_BOUND 62500000 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0 #define QM_WFQ_VP_PQ_PF_SHIFT 5 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) -#define QM_WFQ_MAX_INC_VAL 4375000 -#define QM_WFQ_INIT_CRD(inc_val) (2 * (inc_val)) +#define QM_WFQ_MAX_INC_VAL 43750000 + /* RL constants */ -#define QM_RL_UPPER_BOUND 6250000 +#define QM_RL_UPPER_BOUND 62500000 #define QM_RL_PERIOD 5 /* in us */ #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) +#define QM_RL_MAX_INC_VAL 43750000 #define QM_RL_INC_VAL(rate) max_t(u32, \ - (((rate ? rate : 1000000) \ - * QM_RL_PERIOD) / 8), 1) -#define QM_RL_MAX_INC_VAL 4375000 + (u32)(((rate ? rate : \ + 1000000) * \ + QM_RL_PERIOD * \ + 101) / (8 * 100)), 1) /* AFullOprtnstcCrdMask constants */ #define QM_OPPOR_LINE_VOQ_DEF 1 #define QM_OPPOR_FW_STOP_DEF 0 #define QM_OPPOR_PQ_EMPTY_DEF 1 -#define EAGLE_WORKAROUND_TC 7 /* Command Queue constants */ #define PBF_CMDQ_PURE_LB_LINES 150 -#define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8 #define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \ (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \ @@ -80,7 +79,6 @@ enum cminterface { /* BTB: blocks constants (block size = 256B) */ #define BTB_JUMBO_PKT_BLOCKS 38 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS -#define BTB_EAGLE_WORKAROUND_BLOCKS 4 #define BTB_PURE_LB_FACTOR 10 #define BTB_PURE_LB_RATIO 7 /* QM stop command constants */ @@ -107,9 +105,9 @@ enum cminterface { cmd ## _ ## field, \ value) /* QM: VOQ macros */ -#define PHYS_VOQ(port, tc, max_phy_tcs_pr_port) ((port) * \ - (max_phy_tcs_pr_port) \ - + (tc)) +#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \ + (max_phys_tcs_per_port) + \ + (tc)) #define LB_VOQ(port) ( \ MAX_PHYS_VOQS + (port)) #define VOQ(port, tc, max_phy_tcs_pr_port) \ @@ -120,8 +118,7 @@ enum cminterface { : LB_VOQ(port)) /******************** INTERNAL IMPLEMENTATION *********************/ /* Prepare PF RL enable/disable runtime init values */ -static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, - bool pf_rl_en) +static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) { STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); if (pf_rl_en) { @@ -130,8 +127,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, (1 << MAX_NUM_VOQS) - 1); /* write RL period */ STORE_RT_REG(p_hwfn, - QM_REG_RLPFPERIOD_RT_OFFSET, - QM_RL_PERIOD_CLK_25M); + QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M); STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET, QM_RL_PERIOD_CLK_25M); @@ -144,8 +140,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, } /* Prepare PF WFQ enable/disable runtime init values */ -static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, - bool pf_wfq_en) +static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en) { STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); /* set credit threshold for QM bypass flow */ @@ -156,8 +151,7 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, } /* Prepare VPORT RL enable/disable runtime init values */ -static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, - bool vport_rl_en) +static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en) { STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, vport_rl_en ? 1 : 0); @@ -178,8 +172,7 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, } /* Prepare VPORT WFQ enable/disable runtime init values */ -static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, - bool vport_wfq_en) +static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en) { STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, vport_wfq_en ? 1 : 0); @@ -194,8 +187,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, * the specified VOQ */ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, - u8 voq, - u16 cmdq_lines) + u8 voq, u16 cmdq_lines) { u32 qm_line_crd; @@ -221,7 +213,7 @@ static void qed_cmdq_lines_rt_init( u8 max_phys_tcs_per_port, struct init_qm_port_params port_params[MAX_NUM_PORTS]) { - u8 tc, voq, port_id; + u8 tc, voq, port_id, num_tcs_in_port; /* clear PBF lines for all VOQs */ for (voq = 0; voq < MAX_NUM_VOQS; voq++) @@ -229,22 +221,31 @@ static void qed_cmdq_lines_rt_init( for (port_id = 0; port_id < max_ports_per_engine; port_id++) { if (port_params[port_id].active) { u16 phys_lines, phys_lines_per_tc; - u8 phys_tcs = port_params[port_id].num_active_phys_tcs; - /* find #lines to divide between the active - * physical TCs. - */ + /* find #lines to divide between active phys TCs */ phys_lines = port_params[port_id].num_pbf_cmd_lines - PBF_CMDQ_PURE_LB_LINES; /* find #lines per active physical TC */ - phys_lines_per_tc = phys_lines / phys_tcs; + num_tcs_in_port = 0; + for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { + if (((port_params[port_id].active_phys_tcs >> + tc) & 0x1) == 1) + num_tcs_in_port++; + } + + phys_lines_per_tc = phys_lines / num_tcs_in_port; /* init registers per active TC */ - for (tc = 0; tc < phys_tcs; tc++) { + for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { + if (((port_params[port_id].active_phys_tcs >> + tc) & 0x1) != 1) + continue; + voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port); qed_cmdq_lines_voq_rt_init(p_hwfn, voq, phys_lines_per_tc); } + /* init registers for pure LB TC */ qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id), PBF_CMDQ_PURE_LB_LINES); @@ -259,34 +260,42 @@ static void qed_btb_blocks_rt_init( struct init_qm_port_params port_params[MAX_NUM_PORTS]) { u32 usable_blocks, pure_lb_blocks, phys_blocks; - u8 tc, voq, port_id; + u8 tc, voq, port_id, num_tcs_in_port; for (port_id = 0; port_id < max_ports_per_engine; port_id++) { u32 temp; - u8 phys_tcs; if (!port_params[port_id].active) continue; - phys_tcs = port_params[port_id].num_active_phys_tcs; - /* subtract headroom blocks */ usable_blocks = port_params[port_id].num_btb_blocks - BTB_HEADROOM_BLOCKS; - /* find blocks per physical TC. use factor to avoid - * floating arithmethic. - */ + /* find blocks per physical TC */ + num_tcs_in_port = 0; + for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { + if (((port_params[port_id].active_phys_tcs >> + tc) & 0x1) == 1) + num_tcs_in_port++; + } + pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / - (phys_tcs * BTB_PURE_LB_FACTOR + + (num_tcs_in_port * BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO); pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS, pure_lb_blocks / BTB_PURE_LB_FACTOR); - phys_blocks = (usable_blocks - pure_lb_blocks) / phys_tcs; + phys_blocks = (usable_blocks - pure_lb_blocks) / + num_tcs_in_port; /* init physical TCs */ - for (tc = 0; tc < phys_tcs; tc++) { - voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port); + for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { + if (((port_params[port_id].active_phys_tcs >> + tc) & 0x1) != 1) + continue; + + voq = PHYS_VOQ(port_id, tc, + max_phys_tcs_per_port); STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq), phys_blocks); } @@ -360,10 +369,11 @@ static void qed_tx_pq_map_rt_init( memset(&tx_pq_map, 0, sizeof(tx_pq_map)); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID, - is_vf_pq ? 1 : 0); + p_params->pq_params[i].rl_valid ? 1 : 0); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID, - is_vf_pq ? p_params->pq_params[i].vport_id : 0); + p_params->pq_params[i].rl_valid ? + p_params->pq_params[i].vport_id : 0); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, p_params->pq_params[i].wrr_group); @@ -390,25 +400,11 @@ static void qed_tx_pq_map_rt_init( /* store Tx PQ VF mask to size select register */ for (i = 0; i < num_tx_pq_vf_masks; i++) { if (tx_pq_vf_mask[i]) { - if (is_bb_a0) { - u32 curr_mask = 0, addr; - - addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4); - if (!p_params->is_first_pf) - curr_mask = qed_rd(p_hwfn, p_ptt, - addr); - - addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i; - - STORE_RT_REG(p_hwfn, addr, - curr_mask | tx_pq_vf_mask[i]); - } else { - u32 addr; + u32 addr; - addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i; - STORE_RT_REG(p_hwfn, addr, - tx_pq_vf_mask[i]); - } + addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i; + STORE_RT_REG(p_hwfn, addr, + tx_pq_vf_mask[i]); } } } @@ -418,8 +414,7 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, u8 port_id, u8 pf_id, u32 num_pf_cids, - u32 num_tids, - u32 base_mem_addr_4kb) + u32 num_tids, u32 base_mem_addr_4kb) { u16 i, pq_id; @@ -465,15 +460,10 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, (p_params->pf_id % MAX_NUM_PFS_BB); inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq); - if (inc_val > QM_WFQ_MAX_INC_VAL) { + if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration"); return -1; } - STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, - inc_val); - STORE_RT_REG(p_hwfn, - QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, - QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT); for (i = 0; i < num_tx_pqs; i++) { u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id, @@ -481,19 +471,21 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB, - QM_WFQ_INIT_CRD(inc_val) | QM_WFQ_CRD_REG_SIGN_BIT); } + STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, + inc_val); + STORE_RT_REG(p_hwfn, + QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, + QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT); return 0; } /* Prepare PF RL runtime init values for the specified PF. * Return -1 on error. */ -static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, - u8 pf_id, - u32 pf_rl) +static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl) { u32 inc_val = QM_RL_INC_VAL(pf_rl); @@ -607,9 +599,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn, static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 cmd_addr, - u32 cmd_data_lsb, - u32 cmd_data_msb) + u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb) { if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt)) return false; @@ -627,9 +617,7 @@ static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn, u32 qed_qm_pf_mem_size(u8 pf_id, u32 num_pf_cids, u32 num_vf_cids, - u32 num_tids, - u16 num_pf_pqs, - u16 num_vf_pqs) + u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs) { return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs + QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs + @@ -713,8 +701,7 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, } int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 pf_id, u16 pf_wfq) + struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq) { u32 inc_val = QM_WFQ_INC_VAL(pf_wfq); @@ -728,9 +715,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, } int qed_init_pf_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 pf_id, - u32 pf_rl) + struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl) { u32 inc_val = QM_RL_INC_VAL(pf_rl); @@ -749,8 +734,7 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn, int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 first_tx_pq_id[NUM_OF_TCS], - u16 vport_wfq) + u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq) { u32 inc_val = QM_WFQ_INC_VAL(vport_wfq); u8 tc; @@ -773,9 +757,7 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, } int qed_init_vport_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 vport_id, - u32 vport_rl) + struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl) { u32 inc_val = QM_RL_INC_VAL(vport_rl); @@ -795,9 +777,7 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn, bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool is_release_cmd, - bool is_tx_pq, - u16 start_pq, - u16 num_pqs) + bool is_tx_pq, u16 start_pq, u16 num_pqs) { u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 }; u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id; @@ -841,17 +821,15 @@ qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable) #define PRS_ETH_TUNN_FIC_FORMAT -188897008 void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 dest_port) + struct qed_ptt *p_ptt, u16 dest_port) { qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port); - qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port); + qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port); qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port); } void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - bool vxlan_enable) + struct qed_ptt *p_ptt, bool vxlan_enable) { unsigned long reg_val = 0; u8 shift; @@ -908,8 +886,7 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, } void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 dest_port) + struct qed_ptt *p_ptt, u16 dest_port) { qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port); qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port); @@ -918,8 +895,7 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - bool eth_geneve_enable, - bool ip_geneve_enable) + bool eth_geneve_enable, bool ip_geneve_enable) { unsigned long reg_val = 0; u8 shift; diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index d358c3bb1..9866a20d2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -543,8 +543,7 @@ void qed_gtt_init(struct qed_hwfn *p_hwfn) pxp_global_win[i]); } -int qed_init_fw_data(struct qed_dev *cdev, - const u8 *data) +int qed_init_fw_data(struct qed_dev *cdev, const u8 *data) { struct qed_fw_data *fw = cdev->fw_data; struct bin_buffer_hdr *buf_hdr; @@ -555,7 +554,11 @@ int qed_init_fw_data(struct qed_dev *cdev, return -EINVAL; } - buf_hdr = (struct bin_buffer_hdr *)data; + /* First Dword contains metadata and should be skipped */ + buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32)); + + offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset; + fw->fw_ver_info = (struct fw_ver_info *)(data + offset); offset = buf_hdr[BIN_BUF_INIT_CMD].offset; fw->init_ops = (union init_op *)(data + offset); diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 09a6ad3d2..8fa50fa23 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -2418,6 +2418,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, { struct qed_dev *cdev = p_hwfn->cdev; u32 cau_state; + u8 timer_res; memset(p_sb_entry, 0, sizeof(*p_sb_entry)); @@ -2443,6 +2444,23 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS; } + /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ + if (cdev->rx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (cdev->rx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); + + if (cdev->tx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (cdev->tx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); + SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); } @@ -2484,17 +2502,28 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, /* Configure pi coalescing if set */ if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { - u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >> - (QED_CAU_DEF_RX_TIMER_RES + 1); + u8 timeset, timer_res; u8 num_tc = 1, i; + /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ + if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res); qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, QED_COAL_RX_STATE_MACHINE, timeset); - timeset = p_hwfn->cdev->tx_coalesce_usecs >> - (QED_CAU_DEF_TX_TIMER_RES + 1); - + if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res); for (i = 0; i < num_tc; i++) { qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, TX_PI(i), @@ -3199,3 +3228,39 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev) for_each_hwfn(cdev, i) cdev->hwfns[i].b_int_requested = false; } + +int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u8 timer_res, u16 sb_id, bool tx) +{ + struct cau_sb_entry sb_entry; + int rc; + + if (!p_hwfn->hw_init_done) { + DP_ERR(p_hwfn, "hardware not initialized yet\n"); + return -EINVAL; + } + + rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + + sb_id * sizeof(u64), + (u64)(uintptr_t)&sb_entry, 2, 0); + if (rc) { + DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); + return rc; + } + + if (tx) + SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); + else + SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); + + rc = qed_dmae_host2grc(p_hwfn, p_ptt, + (u64)(uintptr_t)&sb_entry, + CAU_REG_SB_VAR_MEMORY + + sb_id * sizeof(u64), 2, 0); + if (rc) { + DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); + return rc; + } + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 20b468637..0948be64d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -389,6 +389,9 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, u16 vf_number, u8 vf_valid); +int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u8 timer_res, u16 sb_id, bool tx); + #define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev)) #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index aada4c7e0..401e73854 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -572,9 +572,12 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); - rc = qed_spq_post(p_hwfn, p_ent, NULL); + p_ramrod->vf_rx_prod_index = params->vf_qid; + if (params->vf_qid) + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Queue is meant for VF rxq[%04x]\n", params->vf_qid); - return rc; + return qed_spq_post(p_hwfn, p_ent, NULL); } static int @@ -587,7 +590,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, u16 cqe_pbl_size, void __iomem **pp_prod) { struct qed_hw_cid_data *p_rx_cid; - u64 init_prod_val = 0; + u32 init_prod_val = 0; u16 abs_l2_queue = 0; u8 abs_stats_id = 0; int rc; @@ -612,10 +615,10 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, *pp_prod = (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_PRODS_OFFSET(abs_l2_queue); + MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue); /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ - __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), (u32 *)(&init_prod_val)); /* Allocate a CID for the queue */ @@ -756,9 +759,9 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; struct qed_hw_cid_data *p_tx_cid; - u8 abs_vport_id; + u16 pq_id, abs_tx_q_id = 0; int rc = -EINVAL; - u16 pq_id; + u8 abs_vport_id; /* Store information for the stop */ p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; @@ -769,6 +772,10 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, if (rc) return rc; + rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id); + if (rc) + return rc; + /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = cid; @@ -788,6 +795,7 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod->sb_index = p_params->sb_idx; p_ramrod->stats_counter_id = stats_id; + p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id); p_ramrod->pbl_size = cpu_to_le16(pbl_size); DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); @@ -1482,51 +1490,51 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, offsetof(struct public_port, stats), sizeof(port_stats)); - p_stats->rx_64_byte_packets += port_stats.pmm.r64; - p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127; - p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255; - p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511; - p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023; - p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518; - p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522; - p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047; - p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095; - p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216; - p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383; - p_stats->rx_crc_errors += port_stats.pmm.rfcs; - p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf; - p_stats->rx_pause_frames += port_stats.pmm.rxpf; - p_stats->rx_pfc_frames += port_stats.pmm.rxpp; - p_stats->rx_align_errors += port_stats.pmm.raln; - p_stats->rx_carrier_errors += port_stats.pmm.rfcr; - p_stats->rx_oversize_packets += port_stats.pmm.rovr; - p_stats->rx_jabbers += port_stats.pmm.rjbr; - p_stats->rx_undersize_packets += port_stats.pmm.rund; - p_stats->rx_fragments += port_stats.pmm.rfrg; - p_stats->tx_64_byte_packets += port_stats.pmm.t64; - p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127; - p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255; - p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511; - p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023; - p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518; - p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047; - p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095; - p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216; - p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383; - p_stats->tx_pause_frames += port_stats.pmm.txpf; - p_stats->tx_pfc_frames += port_stats.pmm.txpp; - p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec; - p_stats->tx_total_collisions += port_stats.pmm.tncl; - p_stats->rx_mac_bytes += port_stats.pmm.rbyte; - p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca; - p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca; - p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca; - p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok; - p_stats->tx_mac_bytes += port_stats.pmm.tbyte; - p_stats->tx_mac_uc_packets += port_stats.pmm.txuca; - p_stats->tx_mac_mc_packets += port_stats.pmm.txmca; - p_stats->tx_mac_bc_packets += port_stats.pmm.txbca; - p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf; + p_stats->rx_64_byte_packets += port_stats.eth.r64; + p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127; + p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255; + p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511; + p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023; + p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; + p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522; + p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047; + p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095; + p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216; + p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383; + p_stats->rx_crc_errors += port_stats.eth.rfcs; + p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf; + p_stats->rx_pause_frames += port_stats.eth.rxpf; + p_stats->rx_pfc_frames += port_stats.eth.rxpp; + p_stats->rx_align_errors += port_stats.eth.raln; + p_stats->rx_carrier_errors += port_stats.eth.rfcr; + p_stats->rx_oversize_packets += port_stats.eth.rovr; + p_stats->rx_jabbers += port_stats.eth.rjbr; + p_stats->rx_undersize_packets += port_stats.eth.rund; + p_stats->rx_fragments += port_stats.eth.rfrg; + p_stats->tx_64_byte_packets += port_stats.eth.t64; + p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127; + p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255; + p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511; + p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023; + p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; + p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047; + p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095; + p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216; + p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383; + p_stats->tx_pause_frames += port_stats.eth.txpf; + p_stats->tx_pfc_frames += port_stats.eth.txpp; + p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec; + p_stats->tx_total_collisions += port_stats.eth.tncl; + p_stats->rx_mac_bytes += port_stats.eth.rbyte; + p_stats->rx_mac_uc_packets += port_stats.eth.rxuca; + p_stats->rx_mac_mc_packets += port_stats.eth.rxmca; + p_stats->rx_mac_bc_packets += port_stats.eth.rxbca; + p_stats->rx_mac_frames_ok += port_stats.eth.rxpok; + p_stats->tx_mac_bytes += port_stats.eth.tbyte; + p_stats->tx_mac_uc_packets += port_stats.eth.txuca; + p_stats->tx_mac_mc_packets += port_stats.eth.txmca; + p_stats->tx_mac_bc_packets += port_stats.eth.txbca; + p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf; for (j = 0; j < 8; j++) { p_stats->brb_truncates += port_stats.brb.brb_truncate[j]; p_stats->brb_discards += port_stats.brb.brb_discard[j]; @@ -1656,6 +1664,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, info->num_tc = 1; if (IS_PF(cdev)) { + int max_vf_vlan_filters = 0; + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { for_each_hwfn(cdev, i) info->num_queues += @@ -1668,7 +1678,12 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, info->num_queues = cdev->num_hwfns; } - info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN); + if (IS_QED_SRIOV(cdev)) + max_vf_vlan_filters = cdev->p_iov_info->total_vfs * + QED_ETH_VF_NUM_VLAN_FILTERS; + info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN) - + max_vf_vlan_filters; + ether_addr_copy(info->port_mac, cdev->hwfns[0].hw_info.hw_mac_addr); } else { @@ -2156,10 +2171,17 @@ static int qed_fp_cqe_completion(struct qed_dev *dev, extern const struct qed_iov_hv_ops qed_iov_ops_pass; #endif +#ifdef CONFIG_DCB +extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; +#endif + static const struct qed_eth_ops qed_eth_ops_pass = { .common = &qed_common_ops_pass, #ifdef CONFIG_QED_SRIOV .iov = &qed_iov_ops_pass, +#endif +#ifdef CONFIG_DCB + .dcb = &qed_dcbnl_ops_pass, #endif .fill_dev_info = &qed_fill_eth_dev_info, .register_ops = &qed_register_eth_ops, diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index a41c6b559..d5a32a1e6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -207,6 +207,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->pci_mem_start = cdev->pci_params.mem_start; dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_irq = cdev->pci_params.irq; + dev_info->rdma_supported = + (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE); dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); @@ -657,8 +659,13 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, struct qed_sb_cnt_info sb_cnt_info; int rc; int i; - memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); + if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { + DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); + return -EINVAL; + } + + memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); cdev->int_params.in.int_mode = int_mode; for_each_hwfn(cdev, i) { memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); @@ -832,7 +839,8 @@ static int qed_slowpath_start(struct qed_dev *cdev, goto err2; } - data = cdev->firmware->data; + /* First Dword used to diffrentiate between various sources */ + data = cdev->firmware->data + sizeof(u32); } memset(&tunn_info, 0, sizeof(tunn_info)); @@ -900,7 +908,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev) if (IS_PF(cdev)) { qed_free_stream_mem(cdev); - qed_sriov_disable(cdev, true); + if (IS_QED_ETH_IF(cdev)) + qed_sriov_disable(cdev, true); qed_nic_stop(cdev); qed_slowpath_irq_free(cdev); @@ -991,8 +1000,7 @@ static bool qed_can_link_change(struct qed_dev *cdev) return true; } -static int qed_set_link(struct qed_dev *cdev, - struct qed_link_params *params) +static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) { struct qed_hwfn *hwfn; struct qed_mcp_link_params *link_params; @@ -1032,7 +1040,7 @@ static int qed_set_link(struct qed_dev *cdev, NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; if (params->adv_speeds & 0) link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G; + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; } if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) link_params->speed.forced_speed = params->forced_speed; @@ -1053,19 +1061,19 @@ static int qed_set_link(struct qed_dev *cdev, if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { switch (params->loopback_mode) { case QED_LINK_LOOPBACK_INT_PHY: - link_params->loopback_mode = PMM_LOOPBACK_INT_PHY; + link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; break; case QED_LINK_LOOPBACK_EXT_PHY: - link_params->loopback_mode = PMM_LOOPBACK_EXT_PHY; + link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; break; case QED_LINK_LOOPBACK_EXT: - link_params->loopback_mode = PMM_LOOPBACK_EXT; + link_params->loopback_mode = ETH_LOOPBACK_EXT; break; case QED_LINK_LOOPBACK_MAC: - link_params->loopback_mode = PMM_LOOPBACK_MAC; + link_params->loopback_mode = ETH_LOOPBACK_MAC; break; default: - link_params->loopback_mode = PMM_LOOPBACK_NONE; + link_params->loopback_mode = ETH_LOOPBACK_NONE; break; } } @@ -1185,7 +1193,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn, NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) if_link->advertised_caps |= 0; if (params.speed.advertised_speeds & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) if_link->advertised_caps |= 0; if (link_caps.speed_capabilities & @@ -1202,7 +1210,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn, NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) if_link->supported_caps |= 0; if (link_caps.speed_capabilities & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) if_link->supported_caps |= 0; if (link.link_up) @@ -1301,6 +1309,38 @@ static int qed_drain(struct qed_dev *cdev) return 0; } +static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal) +{ + *rx_coal = cdev->rx_coalesce_usecs; + *tx_coal = cdev->tx_coalesce_usecs; +} + +static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, + u8 qid, u16 sb_id) +{ + struct qed_hwfn *hwfn; + struct qed_ptt *ptt; + int hwfn_index; + int status = 0; + + hwfn_index = qid % cdev->num_hwfns; + hwfn = &cdev->hwfns[hwfn_index]; + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal, + qid / cdev->num_hwfns, sb_id); + if (status) + goto out; + status = qed_set_txq_coalesce(hwfn, ptt, tx_coal, + qid / cdev->num_hwfns, sb_id); +out: + qed_ptt_release(hwfn, ptt); + + return status; +} + static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); @@ -1347,5 +1387,7 @@ const struct qed_common_ops qed_common_ops_pass = { .update_msglvl = &qed_init_dp, .chain_alloc = &qed_chain_alloc, .chain_free = &qed_chain_free, + .get_coalesce = &qed_get_coalesce, + .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 118236179..f776a7779 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -531,9 +531,9 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, transceiver_data))); transceiver_state = GET_FIELD(transceiver_state, - PMM_TRANSCEIVER_STATE); + ETH_TRANSCEIVER_STATE); - if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT) + if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) DP_NOTICE(p_hwfn, "Transceiver is present.\n"); else DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n"); @@ -668,14 +668,12 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, qed_link_update(p_hwfn); } -int qed_mcp_set_link(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - bool b_up) +int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) { struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; struct qed_mcp_mb_params mb_params; union drv_union_data union_data; - struct pmm_phy_cfg *phy_cfg; + struct eth_phy_cfg *phy_cfg; int rc = 0; u32 cmd; @@ -685,9 +683,9 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; if (!params->speed.autoneg) phy_cfg->speed = params->speed.forced_speed; - phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0; - phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0; - phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0; + phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0; + phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0; + phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; phy_cfg->adv_speed = params->speed.advertised_speeds; phy_cfg->loopback_mode = params->loopback_mode; @@ -773,6 +771,34 @@ static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, return size; } +int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 *p_pf) +{ + struct public_func shmem_info; + int i; + + /* Find first Ethernet interface in port */ + for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev); + i += p_hwfn->cdev->num_ports_in_engines) { + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID_BY_REL(p_hwfn, i)); + + if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE) + continue; + + if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) == + FUNC_MF_CFG_PROTOCOL_ETHERNET) { + *p_pf = (u8)i; + return 0; + } + } + + DP_NOTICE(p_hwfn, + "Failed to find on port an ethernet interface in MF_SI mode\n"); + + return -EINVAL; +} + static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -951,7 +977,18 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { case FUNC_MF_CFG_PROTOCOL_ETHERNET: - *p_proto = QED_PCI_ETH; + if (test_bit(QED_DEV_CAP_ROCE, + &p_hwfn->hw_info.device_capabilities)) + *p_proto = QED_PCI_ETH_ROCE; + else + *p_proto = QED_PCI_ETH; + break; + case FUNC_MF_CFG_PROTOCOL_ISCSI: + *p_proto = QED_PCI_ISCSI; + break; + case FUNC_MF_CFG_PROTOCOL_ROCE: + DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n"); + rc = -EINVAL; break; default: rc = -EINVAL; @@ -1116,8 +1153,8 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, p_drv_version = &union_data.drv_version; p_drv_version->version = p_ver->version; - for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) { - val = cpu_to_be32(p_ver->name[i]); + for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) { + val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)])); *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 6dd59eb7f..7f319aa1b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -457,4 +457,7 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_link_state *p_link, u8 min_bw); + +int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 *p_pf); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 3a6c506f0..f6b86ca1f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -27,6 +27,35 @@ #define CDU_REG_CID_ADDR_PARAMS_NCIB ( \ 0xff << 24) +#define CDU_REG_SEGMENT0_PARAMS \ + 0x580904UL +#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK \ + (0xfff << 0) +#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT \ + 0 +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE \ + (0xff << 16) +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT \ + 16 +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE \ + (0xff << 24) +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT \ + 24 +#define CDU_REG_SEGMENT1_PARAMS \ + 0x580908UL +#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK \ + (0xfff << 0) +#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT \ + 0 +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE \ + (0xff << 16) +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT \ + 16 +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE \ + (0xff << 24) +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT \ + 24 + #define XSDM_REG_OPERATION_GEN \ 0xf80408UL #define NIG_REG_RX_BRB_OUT_EN \ @@ -51,6 +80,8 @@ 0x1f00000UL #define BAR0_MAP_REG_TSDM_RAM \ 0x1c80000UL +#define BAR0_MAP_REG_XSDM_RAM \ + 0x1e00000UL #define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \ 0x5011f4UL #define PRS_REG_SEARCH_TCP \ @@ -167,6 +198,10 @@ 0x1800004UL #define NIG_REG_CM_HDR \ 0x500840UL +#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR \ + 0x50196cUL +#define NIG_REG_LLH_CLS_TYPE_DUALMODE \ + 0x501964UL #define NCSI_REG_CONFIG \ 0x040200UL #define PBF_REG_INIT \ @@ -219,6 +254,10 @@ 0x230000UL #define PRS_REG_SOFT_RST \ 0x1f0000UL +#define PRS_REG_MSG_INFO \ + 0x1f0a1cUL +#define PRS_REG_ROCE_DEST_QP_MAX_PF \ + 0x1f0430UL #define PSDM_REG_ENABLE_IN1 \ 0xfa0004UL #define PSEM_REG_ENABLE_IN \ @@ -227,6 +266,8 @@ 0x280020UL #define PSWRQ2_REG_CDUT_P_SIZE \ 0x24000cUL +#define PSWRQ2_REG_ILT_MEMORY \ + 0x260000UL #define PSWHST_REG_DISCARD_INTERNAL_WRITES \ 0x2a0040UL #define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \ @@ -460,7 +501,7 @@ #define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2) #define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2 -#define NIG_REG_VXLAN_PORT 0x50105cUL +#define NIG_REG_VXLAN_CTRL 0x50105cUL #define PBF_REG_VXLAN_PORT 0xd80518UL #define PBF_REG_NGE_PORT 0xd8051cUL #define PRS_REG_NGE_PORT 0x1f086cUL diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index ea4e9ce53..a548504c3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -63,6 +63,32 @@ union ramrod_data { struct vport_update_ramrod_data vport_update; struct vport_filter_update_ramrod_data vport_filter_update; + struct rdma_init_func_ramrod_data rdma_init_func; + struct rdma_close_func_ramrod_data rdma_close_func; + struct rdma_register_tid_ramrod_data rdma_register_tid; + struct rdma_deregister_tid_ramrod_data rdma_deregister_tid; + struct roce_create_qp_resp_ramrod_data roce_create_qp_resp; + struct roce_create_qp_req_ramrod_data roce_create_qp_req; + struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp; + struct roce_modify_qp_req_ramrod_data roce_modify_qp_req; + struct roce_query_qp_resp_ramrod_data roce_query_qp_resp; + struct roce_query_qp_req_ramrod_data roce_query_qp_req; + struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp; + struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req; + struct rdma_create_cq_ramrod_data rdma_create_cq; + struct rdma_resize_cq_ramrod_data rdma_resize_cq; + struct rdma_destroy_cq_ramrod_data rdma_destroy_cq; + struct rdma_srq_create_ramrod_data rdma_create_srq; + struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; + struct rdma_srq_modify_ramrod_data rdma_modify_srq; + + struct iscsi_slow_path_hdr iscsi_empty; + struct iscsi_init_ramrod_params iscsi_init; + struct iscsi_spe_func_dstry iscsi_destroy; + struct iscsi_spe_conn_offload iscsi_conn_offload; + struct iscsi_conn_update_ramrod_params iscsi_conn_update; + struct iscsi_spe_conn_termination iscsi_conn_terminate; + struct vf_start_ramrod_data vf_start; struct vf_stop_ramrod_data vf_stop; }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 67f6ce3c8..a52f3fc05 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -308,6 +308,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; + u8 page_cnt; /* update initial eq producer */ qed_eq_prod_update(p_hwfn, @@ -332,7 +333,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->path_id = QED_PATH_ID(p_hwfn); p_ramrod->dont_log_ramrods = 0; p_ramrod->log_type_mask = cpu_to_le16(0xf); - p_ramrod->mf_mode = mode; + switch (mode) { case QED_MF_DEFAULT: case QED_MF_NPAR: @@ -350,24 +351,41 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, p_hwfn->p_eq->chain.pbl.p_phys_table); - p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt; - + page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); + p_ramrod->event_ring_num_pages = page_cnt; DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, p_hwfn->p_consq->chain.pbl.p_phys_table); qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); - p_hwfn->hw_info.personality = PERSONALITY_ETH; if (IS_MF_SI(p_hwfn)) p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ETH: + p_ramrod->personality = PERSONALITY_ETH; + break; + case QED_PCI_ISCSI: + p_ramrod->personality = PERSONALITY_ISCSI; + break; + case QED_PCI_ETH_ROCE: + p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; + break; + default: + DP_NOTICE(p_hwfn, "Unkown personality %d\n", + p_hwfn->hw_info.personality); + p_ramrod->personality = PERSONALITY_ETH; + } + if (p_hwfn->cdev->p_iov_info) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf; p_ramrod->num_vfs = (u8) p_iov->total_vfs; } + p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; + p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 03601dfc0..d73456eab 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -339,6 +339,7 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, if (qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_PRODUCE, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, num_elem, sizeof(union event_ring_element), &p_eq->chain)) { @@ -412,10 +413,10 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, ***************************************************************************/ void qed_spq_setup(struct qed_hwfn *p_hwfn) { - struct qed_spq *p_spq = p_hwfn->p_spq; - struct qed_spq_entry *p_virt = NULL; - dma_addr_t p_phys = 0; - unsigned int i = 0; + struct qed_spq *p_spq = p_hwfn->p_spq; + struct qed_spq_entry *p_virt = NULL; + dma_addr_t p_phys = 0; + u32 i, capacity; INIT_LIST_HEAD(&p_spq->pending); INIT_LIST_HEAD(&p_spq->completion_pending); @@ -427,7 +428,8 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod); p_virt = p_spq->p_virt; - for (i = 0; i < p_spq->chain.capacity; i++) { + capacity = qed_chain_get_capacity(&p_spq->chain); + for (i = 0; i < capacity; i++) { DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys); list_add_tail(&p_virt->list, &p_spq->free_pool); @@ -455,9 +457,10 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) int qed_spq_alloc(struct qed_hwfn *p_hwfn) { - struct qed_spq *p_spq = NULL; - dma_addr_t p_phys = 0; - struct qed_spq_entry *p_virt = NULL; + struct qed_spq_entry *p_virt = NULL; + struct qed_spq *p_spq = NULL; + dma_addr_t p_phys = 0; + u32 capacity; /* SPQ struct */ p_spq = @@ -471,6 +474,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) if (qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_PRODUCE, QED_CHAIN_MODE_SINGLE, + QED_CHAIN_CNT_TYPE_U16, 0, /* N/A when the mode is SINGLE */ sizeof(struct slow_path_element), &p_spq->chain)) { @@ -479,11 +483,11 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) } /* allocate and fill the SPQ elements (incl. ramrod data list) */ + capacity = qed_chain_get_capacity(&p_spq->chain); p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - p_spq->chain.capacity * + capacity * sizeof(struct qed_spq_entry), - &p_phys, - GFP_KERNEL); + &p_phys, GFP_KERNEL); if (!p_virt) goto spq_allocate_fail; @@ -503,16 +507,18 @@ spq_allocate_fail: void qed_spq_free(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; + u32 capacity; if (!p_spq) return; - if (p_spq->p_virt) + if (p_spq->p_virt) { + capacity = qed_chain_get_capacity(&p_spq->chain); dma_free_coherent(&p_hwfn->cdev->pdev->dev, - p_spq->chain.capacity * + capacity * sizeof(struct qed_spq_entry), - p_spq->p_virt, - p_spq->p_phys); + p_spq->p_virt, p_spq->p_phys); + } qed_chain_free(p_hwfn->cdev, &p_spq->chain); ; @@ -881,9 +887,9 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn) if (qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_PRODUCE, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_PAGE_SIZE / 0x80, - 0x80, - &p_consq->chain)) { + 0x80, &p_consq->chain)) { DP_NOTICE(p_hwfn, "Failed to allocate consq chain"); goto consq_allocate_fail; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index c325ee857..15399da26 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -21,18 +21,18 @@ #include "qed_vf.h" /* IOV ramrods */ -static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, - u32 concrete_vfid, u16 opaque_vfid) +static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) { struct vf_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; + u8 fp_minor; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); - init_data.opaque_fid = opaque_vfid; + init_data.opaque_fid = p_vf->opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, @@ -43,10 +43,39 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, p_ramrod = &p_ent->ramrod.vf_start; - p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); - p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid); + p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); + p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid); - p_ramrod->personality = PERSONALITY_ETH; + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ETH: + p_ramrod->personality = PERSONALITY_ETH; + break; + case QED_PCI_ETH_ROCE: + p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; + break; + default: + DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", + p_hwfn->hw_info.personality); + return -EINVAL; + } + + fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; + if (fp_minor > ETH_HSI_VER_MINOR) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n", + p_vf->abs_vf_id, + ETH_HSI_VER_MAJOR, + fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); + fp_minor = ETH_HSI_VER_MINOR; + } + + p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; + p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] - Starting using HSI %02x.%02x\n", + p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -117,6 +146,45 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, return vf; } +static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, u16 rx_qid) +{ + if (rx_qid >= p_vf->num_rxqs) + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n", + p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); + return rx_qid < p_vf->num_rxqs; +} + +static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, u16 tx_qid) +{ + if (tx_qid >= p_vf->num_txqs) + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n", + p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); + return tx_qid < p_vf->num_txqs; +} + +static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, u16 sb_idx) +{ + int i; + + for (i = 0; i < p_vf->num_sbs; i++) + if (p_vf->igu_sbs[i] == sb_idx) + return true; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n", + p_vf->abs_vf_id, sb_idx, p_vf->num_sbs); + + return false; +} + int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, int vfid, struct qed_ptt *p_ptt) { @@ -293,6 +361,9 @@ static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | (vf->abs_vf_id << 8); vf->vport_id = idx + 1; + + vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; + vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; } } @@ -598,17 +669,6 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, /* unpretend */ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); - if (vf->state != VF_STOPPED) { - DP_NOTICE(p_hwfn, "VF[%02x] is already started\n", - vf->abs_vf_id); - return -EINVAL; - } - - /* Start VF */ - rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid); - if (rc) - DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); - vf->state = VF_FREE; return rc; @@ -852,7 +912,6 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params params; struct qed_mcp_link_state link; struct qed_vf_info *vf = NULL; - int rc = 0; vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); if (!vf) { @@ -874,18 +933,8 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps); - if (vf->state != VF_STOPPED) { - /* Stopping the VF */ - rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid); - - if (rc != 0) { - DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n", - rc); - return rc; - } - - vf->state = VF_STOPPED; - } + /* Forget the VF's acquisition message */ + memset(&vf->acquire, 0, sizeof(vf->acquire)); /* disablng interrupts and resetting permission table was done during * vf-close, however, we could get here without going through vf_close @@ -1116,8 +1165,6 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, p_vf->vf_bulletin = 0; p_vf->vport_instance = 0; - p_vf->num_mac_filters = 0; - p_vf->num_vlan_filters = 0; p_vf->configured_features = 0; /* If VF previously requested less resources, go back to default */ @@ -1130,9 +1177,95 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, p_vf->vf_queues[i].rxq_active = 0; memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); + memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); } +static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf, + struct vf_pf_resc_request *p_req, + struct pf_vf_resc *p_resp) +{ + int i; + + /* Queue related information */ + p_resp->num_rxqs = p_vf->num_rxqs; + p_resp->num_txqs = p_vf->num_txqs; + p_resp->num_sbs = p_vf->num_sbs; + + for (i = 0; i < p_resp->num_sbs; i++) { + p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i]; + p_resp->hw_sbs[i].sb_qid = 0; + } + + /* These fields are filled for backward compatibility. + * Unused by modern vfs. + */ + for (i = 0; i < p_resp->num_rxqs; i++) { + qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid, + (u16 *)&p_resp->hw_qid[i]); + p_resp->cid[i] = p_vf->vf_queues[i].fw_cid; + } + + /* Filter related information */ + p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters, + p_req->num_mac_filters); + p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, + p_req->num_vlan_filters); + + /* This isn't really needed/enforced, but some legacy VFs might depend + * on the correct filling of this field. + */ + p_resp->num_mc_filters = QED_MAX_MC_ADDRS; + + /* Validate sufficient resources for VF */ + if (p_resp->num_rxqs < p_req->num_rxqs || + p_resp->num_txqs < p_req->num_txqs || + p_resp->num_sbs < p_req->num_sbs || + p_resp->num_mac_filters < p_req->num_mac_filters || + p_resp->num_vlan_filters < p_req->num_vlan_filters || + p_resp->num_mc_filters < p_req->num_mc_filters) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n", + p_vf->abs_vf_id, + p_req->num_rxqs, + p_resp->num_rxqs, + p_req->num_rxqs, + p_resp->num_txqs, + p_req->num_sbs, + p_resp->num_sbs, + p_req->num_mac_filters, + p_resp->num_mac_filters, + p_req->num_vlan_filters, + p_resp->num_vlan_filters, + p_req->num_mc_filters, p_resp->num_mc_filters); + return PFVF_STATUS_NO_RESOURCE; + } + + return PFVF_STATUS_SUCCESS; +} + +static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn, + struct pfvf_stats_info *p_stats) +{ + p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B + + offsetof(struct mstorm_vf_zone, + non_trigger.eth_queue_stat); + p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); + p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B + + offsetof(struct ustorm_vf_zone, + non_trigger.eth_queue_stat); + p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); + p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B + + offsetof(struct pstorm_vf_zone, + non_trigger.eth_queue_stat); + p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); + p_stats->tstats.address = 0; + p_stats->tstats.len = 0; +} + static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) @@ -1141,25 +1274,27 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; - u8 i, vfpf_status = PFVF_STATUS_SUCCESS; + u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED; struct pf_vf_resc *resc = &resp->resc; + int rc; + + memset(resp, 0, sizeof(*resp)); /* Validate FW compatibility */ - if (req->vfdev_info.fw_major != FW_MAJOR_VERSION || - req->vfdev_info.fw_minor != FW_MINOR_VERSION || - req->vfdev_info.fw_revision != FW_REVISION_VERSION || - req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) { + if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { DP_INFO(p_hwfn, - "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n", + "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", vf->abs_vf_id, - req->vfdev_info.fw_major, - req->vfdev_info.fw_minor, - req->vfdev_info.fw_revision, - req->vfdev_info.fw_engineering, - FW_MAJOR_VERSION, - FW_MINOR_VERSION, - FW_REVISION_VERSION, FW_ENGINEERING_VERSION); - vfpf_status = PFVF_STATUS_NOT_SUPPORTED; + req->vfdev_info.eth_fp_hsi_major, + req->vfdev_info.eth_fp_hsi_minor, + ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); + + /* Write the PF version so that VF would know which version + * is supported. + */ + pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; + pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; + goto out; } @@ -1169,16 +1304,13 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, DP_INFO(p_hwfn, "VF[%d] is running an old driver that doesn't support 100g\n", vf->abs_vf_id); - vfpf_status = PFVF_STATUS_NOT_SUPPORTED; goto out; } - memset(resp, 0, sizeof(*resp)); + /* Store the acquire message */ + memcpy(&vf->acquire, req, sizeof(vf->acquire)); - /* Fill in vf info stuff */ vf->opaque_fid = req->vfdev_info.opaque_fid; - vf->num_mac_filters = 1; - vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; vf->vf_bulletin = req->bulletin_addr; vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? @@ -1194,26 +1326,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, if (p_hwfn->cdev->num_hwfns > 1) pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; - pfdev_info->stats_info.mstats.address = - PXP_VF_BAR0_START_MSDM_ZONE_B + - offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat); - pfdev_info->stats_info.mstats.len = - sizeof(struct eth_mstorm_per_queue_stat); - - pfdev_info->stats_info.ustats.address = - PXP_VF_BAR0_START_USDM_ZONE_B + - offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat); - pfdev_info->stats_info.ustats.len = - sizeof(struct eth_ustorm_per_queue_stat); - - pfdev_info->stats_info.pstats.address = - PXP_VF_BAR0_START_PSDM_ZONE_B + - offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat); - pfdev_info->stats_info.pstats.len = - sizeof(struct eth_pstorm_per_queue_stat); - - pfdev_info->stats_info.tstats.address = 0; - pfdev_info->stats_info.tstats.len = 0; + qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info); memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); @@ -1221,36 +1334,31 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, pfdev_info->fw_minor = FW_MINOR_VERSION; pfdev_info->fw_rev = FW_REVISION_VERSION; pfdev_info->fw_eng = FW_ENGINEERING_VERSION; + pfdev_info->minor_fp_hsi = min_t(u8, + ETH_HSI_VER_MINOR, + req->vfdev_info.eth_fp_hsi_minor); pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); pfdev_info->dev_type = p_hwfn->cdev->type; pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; - resc->num_rxqs = vf->num_rxqs; - resc->num_txqs = vf->num_txqs; - resc->num_sbs = vf->num_sbs; - for (i = 0; i < resc->num_sbs; i++) { - resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i]; - resc->hw_sbs[i].sb_qid = 0; - } + /* Fill resources available to VF; Make sure there are enough to + * satisfy the VF's request. + */ + vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf, + &req->resc_request, resc); + if (vfpf_status != PFVF_STATUS_SUCCESS) + goto out; - for (i = 0; i < resc->num_rxqs; i++) { - qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid, - (u16 *)&resc->hw_qid[i]); - resc->cid[i] = vf->vf_queues[i].fw_cid; + /* Start the VF in FW */ + rc = qed_sp_vf_start(p_hwfn, vf); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); + vfpf_status = PFVF_STATUS_FAILURE; + goto out; } - resc->num_mac_filters = min_t(u8, vf->num_mac_filters, - req->resc_request.num_mac_filters); - resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters, - req->resc_request.num_vlan_filters); - - /* This isn't really required as VF isn't limited, but some VFs might - * actually test this value, so need to provide it. - */ - resc->num_mc_filters = req->resc_request.num_mc_filters; - /* Fill agreed size of bulletin board in response */ resp->bulletin_size = vf->bulletin.size; qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); @@ -1296,7 +1404,7 @@ static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, params.anti_spoofing_en = val; rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); - if (rc) { + if (!rc) { p_vf->spoof_chk = val; p_vf->req_spoofchk_val = p_vf->spoof_chk; DP_VERBOSE(p_hwfn, QED_MSG_IOV, @@ -1585,10 +1693,6 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, sizeof(struct pfvf_def_resp_tlv), status); } -#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A -#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ - (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) - static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf, u8 status) @@ -1606,16 +1710,11 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, /* Update the TLV with the response */ if (status == PFVF_STATUS_SUCCESS) { - u16 hw_qid = 0; - req = &mbx->req_virt->start_rxq; - qed_fw_l2_queue(p_hwfn, vf->vf_queues[req->rx_qid].fw_rx_qid, - &hw_qid); - - p_tlv->offset = MSTORM_QZONE_START(p_hwfn->cdev) + - hw_qid * MSTORM_QZONE_SIZE + - offsetof(struct mstorm_eth_queue_zone, - rx_producers); + p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + + offsetof(struct mstorm_vf_zone, + non_trigger.eth_rx_queue_producers) + + sizeof(struct eth_rx_prod_data) * req->rx_qid; } qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status); @@ -1627,13 +1726,19 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, { struct qed_queue_start_common_params params; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; - u8 status = PFVF_STATUS_SUCCESS; + u8 status = PFVF_STATUS_NO_RESOURCE; struct vfpf_start_rxq_tlv *req; int rc; memset(¶ms, 0, sizeof(params)); req = &mbx->req_virt->start_rxq; + + if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) || + !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) + goto out; + params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid; + params.vf_qid = req->rx_qid; params.vport_id = vf->vport_id; params.sb = req->hw_sb; params.sb_idx = req->sb_index; @@ -1649,22 +1754,48 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, if (rc) { status = PFVF_STATUS_FAILURE; } else { + status = PFVF_STATUS_SUCCESS; vf->vf_queues[req->rx_qid].rxq_active = true; vf->num_active_rxqs++; } +out: qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status); } +static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf, u8 status) +{ + struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct pfvf_start_queue_resp_tlv *p_tlv; + + mbx->offset = (u8 *)mbx->reply_virt; + + p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ, + sizeof(*p_tlv)); + qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* Update the TLV with the response */ + if (status == PFVF_STATUS_SUCCESS) { + u16 qid = mbx->req_virt->start_txq.tx_qid; + + p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid, + DQ_DEMS_LEGACY); + } + + qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_tlv), status); +} + static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { - u16 length = sizeof(struct pfvf_def_resp_tlv); struct qed_queue_start_common_params params; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + u8 status = PFVF_STATUS_NO_RESOURCE; union qed_qm_pq_params pq_params; - u8 status = PFVF_STATUS_SUCCESS; struct vfpf_start_txq_tlv *req; int rc; @@ -1675,6 +1806,11 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, memset(¶ms, 0, sizeof(params)); req = &mbx->req_virt->start_txq; + + if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) || + !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) + goto out; + params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid; params.vport_id = vf->vport_id; params.sb = req->hw_sb; @@ -1688,13 +1824,15 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, req->pbl_addr, req->pbl_size, &pq_params); - if (rc) + if (rc) { status = PFVF_STATUS_FAILURE; - else + } else { + status = PFVF_STATUS_SUCCESS; vf->vf_queues[req->tx_qid].txq_active = true; + } - qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ, - length, status); +out: + qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status); } static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, @@ -2119,6 +2257,16 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, u16 length; int rc; + /* Valiate PF can send such a request */ + if (!vf->vport_instance) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "No VPORT instance available for VF[%d], failing vport update\n", + vf->abs_vf_id); + status = PFVF_STATUS_FAILURE; + goto out; + } + memset(¶ms, 0, sizeof(params)); params.opaque_fid = vf->opaque_fid; params.vport_id = vf->vport_id; @@ -2161,15 +2309,12 @@ out: qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); } -static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, - struct qed_vf_info *p_vf, - struct qed_filter_ucast *p_params) +static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + struct qed_filter_ucast *p_params) { int i; - if (p_params->type == QED_FILTER_MAC) - return 0; - /* First remove entries and then add new ones */ if (p_params->opcode == QED_FILTER_REMOVE) { for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) @@ -2222,6 +2367,80 @@ static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, return 0; } +static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + struct qed_filter_ucast *p_params) +{ + int i; + + /* If we're in forced-mode, we don't allow any change */ + if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) + return 0; + + /* First remove entries and then add new ones */ + if (p_params->opcode == QED_FILTER_REMOVE) { + for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { + if (ether_addr_equal(p_vf->shadow_config.macs[i], + p_params->mac)) { + memset(p_vf->shadow_config.macs[i], 0, + ETH_ALEN); + break; + } + } + + if (i == QED_ETH_VF_NUM_MAC_FILTERS) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "MAC isn't configured\n"); + return -EINVAL; + } + } else if (p_params->opcode == QED_FILTER_REPLACE || + p_params->opcode == QED_FILTER_FLUSH) { + for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) + memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN); + } + + /* List the new MAC address */ + if (p_params->opcode != QED_FILTER_ADD && + p_params->opcode != QED_FILTER_REPLACE) + return 0; + + for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { + if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) { + ether_addr_copy(p_vf->shadow_config.macs[i], + p_params->mac); + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Added MAC at %d entry in shadow\n", i); + break; + } + } + + if (i == QED_ETH_VF_NUM_MAC_FILTERS) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n"); + return -EINVAL; + } + + return 0; +} + +static int +qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + struct qed_filter_ucast *p_params) +{ + int rc = 0; + + if (p_params->type == QED_FILTER_MAC) { + rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params); + if (rc) + return rc; + } + + if (p_params->type == QED_FILTER_VLAN) + rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params); + + return rc; +} + int qed_iov_chk_ucast(struct qed_hwfn *hwfn, int vfid, struct qed_filter_ucast *params) { @@ -2366,11 +2585,27 @@ static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) { u16 length = sizeof(struct pfvf_def_resp_tlv); + u8 status = PFVF_STATUS_SUCCESS; + int rc = 0; qed_iov_vf_cleanup(p_hwfn, p_vf); + if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { + /* Stopping the VF */ + rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid, + p_vf->opaque_fid); + + if (rc) { + DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n", + rc); + status = PFVF_STATUS_FAILURE; + } + + p_vf->state = VF_STOPPED; + } + qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, - length, PFVF_STATUS_SUCCESS); + length, status); } static int @@ -2622,7 +2857,6 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, { struct qed_iov_vf_mbx *mbx; struct qed_vf_info *p_vf; - int i; p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); if (!p_vf) @@ -2631,9 +2865,8 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, mbx = &p_vf->vf_mbx; /* qed_iov_process_mbx_request */ - DP_VERBOSE(p_hwfn, - QED_MSG_IOV, - "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id); + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id); mbx->first_tlv = mbx->req_virt->first_tlv; @@ -2687,15 +2920,28 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, * support them. Or this may be because someone wrote a crappy * VF driver and is sending garbage over the channel. */ - DP_ERR(p_hwfn, - "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n", - mbx->first_tlv.tl.type, mbx->first_tlv.tl.length); - - for (i = 0; i < 20; i++) { + DP_NOTICE(p_hwfn, + "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n", + p_vf->abs_vf_id, + mbx->first_tlv.tl.type, + mbx->first_tlv.tl.length, + mbx->first_tlv.padding, mbx->first_tlv.reply_address); + + /* Try replying in case reply address matches the acquisition's + * posted address. + */ + if (p_vf->acquire.first_tlv.reply_address && + (mbx->first_tlv.reply_address == + p_vf->acquire.first_tlv.reply_address)) { + qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, + mbx->first_tlv.tl.type, + sizeof(struct pfvf_def_resp_tlv), + PFVF_STATUS_NOT_SUPPORTED); + } else { DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "%x ", - mbx->req_virt->tlv_buf_size.tlv_buffer[i]); + "VF[%02x]: Can't respond to TLV - no valid reply address\n", + p_vf->abs_vf_id); } } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index c90b2b6ad..0dd23e409 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -10,6 +10,9 @@ #define _QED_SRIOV_H #include #include "qed_vf.h" + +#define QED_ETH_VF_NUM_MAC_FILTERS 1 +#define QED_ETH_VF_NUM_VLAN_FILTERS 2 #define QED_VF_ARRAY_LENGTH (3) #ifdef CONFIG_QED_SRIOV @@ -24,7 +27,6 @@ #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) #define QED_MAX_VF_CHAINS_PER_PF 16 -#define QED_ETH_VF_NUM_VLAN_FILTERS 2 #define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \ (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS) @@ -120,6 +122,8 @@ struct qed_vf_shadow_config { /* Shadow copy of all guest vlans */ struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1]; + /* Shadow copy of all configured MACs; Empty if forcing MACs */ + u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN]; u8 inner_vlan_removal; }; @@ -133,6 +137,9 @@ struct qed_vf_info { struct qed_bulletin bulletin; dma_addr_t vf_bulletin; + /* PF saves a copy of the last VF acquire message */ + struct vfpf_acquire_tlv acquire; + u32 concrete_fid; u16 opaque_fid; u16 mtu; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 72e69c0ec..9b780b31b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -117,36 +117,64 @@ exit: } #define VF_ACQUIRE_THRESH 3 -#define VF_ACQUIRE_MAC_FILTERS 1 +static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, + struct vf_pf_resc_request *p_req, + struct pf_vf_resc *p_resp) +{ + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n", + p_req->num_rxqs, + p_resp->num_rxqs, + p_req->num_rxqs, + p_resp->num_txqs, + p_req->num_sbs, + p_resp->num_sbs, + p_req->num_mac_filters, + p_resp->num_mac_filters, + p_req->num_vlan_filters, + p_resp->num_vlan_filters, + p_req->num_mc_filters, p_resp->num_mc_filters); + + /* humble our request */ + p_req->num_txqs = p_resp->num_txqs; + p_req->num_rxqs = p_resp->num_rxqs; + p_req->num_sbs = p_resp->num_sbs; + p_req->num_mac_filters = p_resp->num_mac_filters; + p_req->num_vlan_filters = p_resp->num_vlan_filters; + p_req->num_mc_filters = p_resp->num_mc_filters; +} static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; - u8 rx_count = 1, tx_count = 1, num_sbs = 1; - u8 num_mac = VF_ACQUIRE_MAC_FILTERS; + struct vf_pf_resc_request *p_resc; bool resources_acquired = false; struct vfpf_acquire_tlv *req; int rc = 0, attempts = 0; /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); + p_resc = &req->resc_request; /* starting filling the request */ req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; - req->resc_request.num_rxqs = rx_count; - req->resc_request.num_txqs = tx_count; - req->resc_request.num_sbs = num_sbs; - req->resc_request.num_mac_filters = num_mac; - req->resc_request.num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; + p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF; + p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF; + p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF; + p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; + p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; req->vfdev_info.fw_major = FW_MAJOR_VERSION; req->vfdev_info.fw_minor = FW_MINOR_VERSION; req->vfdev_info.fw_revision = FW_REVISION_VERSION; req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; + req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR; + req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR; /* Fill capability field with any non-deprecated config we support */ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; @@ -185,21 +213,21 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) resources_acquired = true; } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && attempts < VF_ACQUIRE_THRESH) { - DP_VERBOSE(p_hwfn, - QED_MSG_IOV, - "PF unwilling to fullfill resource request. Try PF recommended amount\n"); - - /* humble our request */ - req->resc_request.num_txqs = resp->resc.num_txqs; - req->resc_request.num_rxqs = resp->resc.num_rxqs; - req->resc_request.num_sbs = resp->resc.num_sbs; - req->resc_request.num_mac_filters = - resp->resc.num_mac_filters; - req->resc_request.num_vlan_filters = - resp->resc.num_vlan_filters; + qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc, + &resp->resc); /* Clear response buffer */ memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); + } else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) && + pfdev_info->major_fp_hsi && + (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { + DP_NOTICE(p_hwfn, + "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n", + pfdev_info->major_fp_hsi, + pfdev_info->minor_fp_hsi, + ETH_HSI_VER_MAJOR, + ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi); + return -EINVAL; } else { DP_ERR(p_hwfn, "PF returned error %d to VF acquisition request\n", @@ -225,6 +253,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) } } + if (ETH_HSI_VER_MINOR && + (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { + DP_INFO(p_hwfn, + "PF is using older fastpath HSI; %02x.%02x is configured\n", + ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi); + } + return 0; } @@ -353,7 +388,7 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, /* Learn the address of the producer from the response */ if (pp_prod) { - u64 init_prod_val = 0; + u32 init_prod_val = 0; *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; DP_VERBOSE(p_hwfn, QED_MSG_IOV, @@ -361,7 +396,7 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, rx_qid, *pp_prod, resp->offset); /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ - __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), (u32 *)&init_prod_val); } @@ -405,8 +440,8 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, u16 pbl_size, void __iomem **pp_doorbell) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_start_queue_resp_tlv *resp; struct vfpf_start_txq_tlv *req; - struct pfvf_def_resp_tlv *resp; int rc; /* clear mailbox and prep first tlv */ @@ -424,20 +459,24 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); - resp = &p_iov->pf2vf_reply->default_resp; + resp = &p_iov->pf2vf_reply->queue_start; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EINVAL; + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } if (pp_doorbell) { - u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset; - *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + - qed_db_addr(cid, DQ_DEMS_LEGACY); + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", + tx_queue_id, *pp_doorbell, resp->offset); } +exit: return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index b82fda964..b23ce58e9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -96,7 +96,9 @@ struct vfpf_acquire_tlv { u32 driver_version; u16 opaque_fid; /* ME register value */ u8 os_type; /* VFPF_ACQUIRE_OS_* value */ - u8 padding[5]; + u8 eth_fp_hsi_major; + u8 eth_fp_hsi_minor; + u8 padding[3]; } vfdev_info; struct vf_pf_resc_request resc_request; @@ -171,7 +173,14 @@ struct pfvf_acquire_resp_tlv { struct pfvf_stats_info stats_info; u8 port_mac[ETH_ALEN]; - u8 padding2[2]; + + /* It's possible PF had to configure an older fastpath HSI + * [in case VF is newer than PF]. This is communicated back + * to the VF. It can also be used in case of error due to + * non-matching versions to shed light in VF about failure. + */ + u8 major_fp_hsi; + u8 minor_fp_hsi; } pfdev_info; struct pf_vf_resc { diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile index 06ff90d87..74a49850d 100644 --- a/drivers/net/ethernet/qlogic/qede/Makefile +++ b/drivers/net/ethernet/qlogic/qede/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_QEDE) := qede.o qede-y := qede_main.o qede_ethtool.o +qede-$(CONFIG_DCB) += qede_dcbnl.o diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 47d6b2225..02b06d4e4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -24,7 +24,7 @@ #include #define QEDE_MAJOR_VERSION 8 -#define QEDE_MINOR_VERSION 7 +#define QEDE_MINOR_VERSION 10 #define QEDE_REVISION_VERSION 1 #define QEDE_ENGINEERING_VERSION 20 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ @@ -143,6 +143,8 @@ struct qede_dev { struct mutex qede_lock; u32 state; /* Protected by qede_lock */ u16 rx_buf_size; + u32 rx_copybreak; + /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ #define ETH_OVERHEAD (ETH_HLEN + 8 + 8) /* Max supported alignment is 256 (8 shift) @@ -235,6 +237,7 @@ struct qede_rx_queue { u64 rx_hw_errors; u64 rx_alloc_errors; + u64 rx_ip_frags; }; union db_prod { @@ -304,6 +307,9 @@ union qede_reload_args { u16 mtu; }; +#ifdef CONFIG_DCB +void qede_set_dcbnl_ops(struct net_device *ndev); +#endif void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level); void qede_set_ethtool_ops(struct net_device *netdev); void qede_reload(struct qede_dev *edev, @@ -329,6 +335,7 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, #define NUM_TX_BDS_MIN 128 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX +#define QEDE_MIN_PKT_LEN 64 #define QEDE_RX_HDR_SIZE 256 #define for_each_rss(i) for (i = 0; i < edev->num_rss; i++) diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c new file mode 100644 index 000000000..03e8c0212 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c @@ -0,0 +1,348 @@ +/* QLogic qede NIC Driver +* Copyright (c) 2015 QLogic Corporation +* +* This software is available under the terms of the GNU General Public License +* (GPL) Version 2, available from the file COPYING in the main directory of +* this source tree. +*/ + +#include +#include +#include +#include +#include "qede.h" + +static u8 qede_dcbnl_getstate(struct net_device *netdev) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getstate(edev->cdev); +} + +static u8 qede_dcbnl_setstate(struct net_device *netdev, u8 state) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setstate(edev->cdev, state); +} + +static void qede_dcbnl_getpermhwaddr(struct net_device *netdev, + u8 *perm_addr) +{ + memcpy(perm_addr, netdev->dev_addr, netdev->addr_len); +} + +static void qede_dcbnl_getpgtccfgtx(struct net_device *netdev, int prio, + u8 *prio_type, u8 *pgid, u8 *bw_pct, + u8 *up_map) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->getpgtccfgtx(edev->cdev, prio, prio_type, + pgid, bw_pct, up_map); +} + +static void qede_dcbnl_getpgbwgcfgtx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->getpgbwgcfgtx(edev->cdev, pgid, bw_pct); +} + +static void qede_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio, + u8 *prio_type, u8 *pgid, u8 *bw_pct, + u8 *up_map) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->getpgtccfgrx(edev->cdev, prio, prio_type, pgid, bw_pct, + up_map); +} + +static void qede_dcbnl_getpgbwgcfgrx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->getpgbwgcfgrx(edev->cdev, pgid, bw_pct); +} + +static void qede_dcbnl_getpfccfg(struct net_device *netdev, int prio, + u8 *setting) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->getpfccfg(edev->cdev, prio, setting); +} + +static void qede_dcbnl_setpfccfg(struct net_device *netdev, int prio, + u8 setting) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->setpfccfg(edev->cdev, prio, setting); +} + +static u8 qede_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getcap(edev->cdev, capid, cap); +} + +static int qede_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getnumtcs(edev->cdev, tcid, num); +} + +static u8 qede_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getpfcstate(edev->cdev); +} + +static int qede_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getapp(edev->cdev, idtype, id); +} + +static u8 qede_dcbnl_getdcbx(struct net_device *netdev) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getdcbx(edev->cdev); +} + +static void qede_dcbnl_setpgtccfgtx(struct net_device *netdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setpgtccfgtx(edev->cdev, prio, pri_type, pgid, + bw_pct, up_map); +} + +static void qede_dcbnl_setpgtccfgrx(struct net_device *netdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setpgtccfgrx(edev->cdev, prio, pri_type, pgid, + bw_pct, up_map); +} + +static void qede_dcbnl_setpgbwgcfgtx(struct net_device *netdev, int pgid, + u8 bw_pct) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setpgbwgcfgtx(edev->cdev, pgid, bw_pct); +} + +static void qede_dcbnl_setpgbwgcfgrx(struct net_device *netdev, int pgid, + u8 bw_pct) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setpgbwgcfgrx(edev->cdev, pgid, bw_pct); +} + +static u8 qede_dcbnl_setall(struct net_device *netdev) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setall(edev->cdev); +} + +static int qede_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setnumtcs(edev->cdev, tcid, num); +} + +static void qede_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setpfcstate(edev->cdev, state); +} + +static int qede_dcbnl_setapp(struct net_device *netdev, u8 idtype, u16 idval, + u8 up) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setapp(edev->cdev, idtype, idval, up); +} + +static u8 qede_dcbnl_setdcbx(struct net_device *netdev, u8 state) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setdcbx(edev->cdev, state); +} + +static u8 qede_dcbnl_getfeatcfg(struct net_device *netdev, int featid, + u8 *flags) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getfeatcfg(edev->cdev, featid, flags); +} + +static u8 qede_dcbnl_setfeatcfg(struct net_device *netdev, int featid, u8 flags) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setfeatcfg(edev->cdev, featid, flags); +} + +static int qede_dcbnl_peer_getappinfo(struct net_device *netdev, + struct dcb_peer_app_info *info, + u16 *count) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->peer_getappinfo(edev->cdev, info, count); +} + +static int qede_dcbnl_peer_getapptable(struct net_device *netdev, + struct dcb_app *app) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->peer_getapptable(edev->cdev, app); +} + +static int qede_dcbnl_cee_peer_getpfc(struct net_device *netdev, + struct cee_pfc *pfc) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->cee_peer_getpfc(edev->cdev, pfc); +} + +static int qede_dcbnl_cee_peer_getpg(struct net_device *netdev, + struct cee_pg *pg) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->cee_peer_getpg(edev->cdev, pg); +} + +static int qede_dcbnl_ieee_getpfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_getpfc(edev->cdev, pfc); +} + +static int qede_dcbnl_ieee_setpfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_setpfc(edev->cdev, pfc); +} + +static int qede_dcbnl_ieee_getets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_getets(edev->cdev, ets); +} + +static int qede_dcbnl_ieee_setets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_setets(edev->cdev, ets); +} + +static int qede_dcbnl_ieee_getapp(struct net_device *netdev, + struct dcb_app *app) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_getapp(edev->cdev, app); +} + +static int qede_dcbnl_ieee_setapp(struct net_device *netdev, + struct dcb_app *app) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_setapp(edev->cdev, app); +} + +static int qede_dcbnl_ieee_peer_getpfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_peer_getpfc(edev->cdev, pfc); +} + +static int qede_dcbnl_ieee_peer_getets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_peer_getets(edev->cdev, ets); +} + +static const struct dcbnl_rtnl_ops qede_dcbnl_ops = { + .ieee_getpfc = qede_dcbnl_ieee_getpfc, + .ieee_setpfc = qede_dcbnl_ieee_setpfc, + .ieee_getets = qede_dcbnl_ieee_getets, + .ieee_setets = qede_dcbnl_ieee_setets, + .ieee_getapp = qede_dcbnl_ieee_getapp, + .ieee_setapp = qede_dcbnl_ieee_setapp, + .getdcbx = qede_dcbnl_getdcbx, + .ieee_peer_getpfc = qede_dcbnl_ieee_peer_getpfc, + .ieee_peer_getets = qede_dcbnl_ieee_peer_getets, + .getstate = qede_dcbnl_getstate, + .setstate = qede_dcbnl_setstate, + .getpermhwaddr = qede_dcbnl_getpermhwaddr, + .getpgtccfgtx = qede_dcbnl_getpgtccfgtx, + .getpgbwgcfgtx = qede_dcbnl_getpgbwgcfgtx, + .getpgtccfgrx = qede_dcbnl_getpgtccfgrx, + .getpgbwgcfgrx = qede_dcbnl_getpgbwgcfgrx, + .getpfccfg = qede_dcbnl_getpfccfg, + .setpfccfg = qede_dcbnl_setpfccfg, + .getcap = qede_dcbnl_getcap, + .getnumtcs = qede_dcbnl_getnumtcs, + .getpfcstate = qede_dcbnl_getpfcstate, + .getapp = qede_dcbnl_getapp, + .getdcbx = qede_dcbnl_getdcbx, + .setpgtccfgtx = qede_dcbnl_setpgtccfgtx, + .setpgtccfgrx = qede_dcbnl_setpgtccfgrx, + .setpgbwgcfgtx = qede_dcbnl_setpgbwgcfgtx, + .setpgbwgcfgrx = qede_dcbnl_setpgbwgcfgrx, + .setall = qede_dcbnl_setall, + .setnumtcs = qede_dcbnl_setnumtcs, + .setpfcstate = qede_dcbnl_setpfcstate, + .setapp = qede_dcbnl_setapp, + .setdcbx = qede_dcbnl_setdcbx, + .setfeatcfg = qede_dcbnl_setfeatcfg, + .getfeatcfg = qede_dcbnl_getfeatcfg, + .peer_getappinfo = qede_dcbnl_peer_getappinfo, + .peer_getapptable = qede_dcbnl_peer_getapptable, + .cee_peer_getpfc = qede_dcbnl_cee_peer_getpfc, + .cee_peer_getpg = qede_dcbnl_cee_peer_getpg, +}; + +void qede_set_dcbnl_ops(struct net_device *dev) +{ + dev->dcbnl_ops = &qede_dcbnl_ops; +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index ad3cae3b7..f8492cac9 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -37,6 +37,7 @@ static const struct { } qede_rqstats_arr[] = { QEDE_RQSTAT(rx_hw_errors), QEDE_RQSTAT(rx_alloc_errors), + QEDE_RQSTAT(rx_ip_frags), }; #define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr) @@ -426,6 +427,59 @@ static u32 qede_get_link(struct net_device *dev) return current_link.link_up; } +static int qede_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 rxc, txc; + + memset(coal, 0, sizeof(struct ethtool_coalesce)); + edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc); + + coal->rx_coalesce_usecs = rxc; + coal->tx_coalesce_usecs = txc; + + return 0; +} + +static int qede_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct qede_dev *edev = netdev_priv(dev); + int i, rc = 0; + u16 rxc, txc; + u8 sb_id; + + if (!netif_running(dev)) { + DP_INFO(edev, "Interface is down\n"); + return -EINVAL; + } + + if (coal->rx_coalesce_usecs > QED_COALESCE_MAX || + coal->tx_coalesce_usecs > QED_COALESCE_MAX) { + DP_INFO(edev, + "Can't support requested %s coalesce value [max supported value %d]\n", + coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" + : "tx", + QED_COALESCE_MAX); + return -EINVAL; + } + + rxc = (u16)coal->rx_coalesce_usecs; + txc = (u16)coal->tx_coalesce_usecs; + for_each_rss(i) { + sb_id = edev->fp_array[i].sb_info->igu_sb_id; + rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc, + (u8)i, sb_id); + if (rc) { + DP_INFO(edev, "Set coalesce error, rc = %d\n", rc); + return rc; + } + } + + return rc; +} + static void qede_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { @@ -910,6 +964,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, memset(first_bd, 0, sizeof(*first_bd)); val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; first_bd->data.bd_flags.bitfields = val; + val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK; + first_bd->data.bitfields |= (val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT); /* Map skb linear data for DMA and set in the first BD */ mapping = dma_map_single(&edev->pdev->dev, skb->data, @@ -1129,6 +1185,48 @@ static void qede_self_test(struct net_device *dev, } } +static int qede_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct qede_dev *edev = netdev_priv(dev); + u32 val; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + val = *(u32 *)data; + if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) { + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Invalid rx copy break value, range is [%u, %u]", + QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE); + return -EINVAL; + } + + edev->rx_copybreak = *(u32 *)data; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int qede_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, void *data) +{ + struct qede_dev *edev = netdev_priv(dev); + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = edev->rx_copybreak; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + static const struct ethtool_ops qede_ethtool_ops = { .get_settings = qede_get_settings, .set_settings = qede_set_settings, @@ -1137,6 +1235,8 @@ static const struct ethtool_ops qede_ethtool_ops = { .set_msglevel = qede_set_msglevel, .nway_reset = qede_nway_reset, .get_link = qede_get_link, + .get_coalesce = qede_get_coalesce, + .set_coalesce = qede_set_coalesce, .get_ringparam = qede_get_ringparam, .set_ringparam = qede_set_ringparam, .get_pauseparam = qede_get_pauseparam, @@ -1155,6 +1255,8 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_channels = qede_get_channels, .set_channels = qede_set_channels, .self_test = qede_self_test, + .get_tunable = qede_get_tunable, + .set_tunable = qede_set_tunable, }; static const struct ethtool_ops qede_vf_ethtool_ops = { @@ -1177,6 +1279,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = { .set_rxfh = qede_set_rxfh, .get_channels = qede_get_channels, .set_channels = qede_set_channels, + .get_tunable = qede_get_tunable, + .set_tunable = qede_set_tunable, }; void qede_set_ethtool_ops(struct net_device *dev) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index f8e11f953..9544e4c41 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -24,12 +24,7 @@ #include #include #include -#ifdef CONFIG_QEDE_VXLAN -#include -#endif -#ifdef CONFIG_QEDE_GENEVE -#include -#endif +#include #include #include #include @@ -490,6 +485,24 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb, } #endif +static inline void qede_update_tx_producer(struct qede_tx_queue *txq) +{ + /* wmb makes sure that the BDs data is updated before updating the + * producer, otherwise FW may read old data from the BDs. + */ + wmb(); + barrier(); + writel(txq->tx_db.raw, txq->doorbell_addr); + + /* mmiowb is needed to synchronize doorbell writes from more than one + * processor. It guarantees that the write arrives to the device before + * the queue lock is released and another start_xmit is called (possibly + * on another CPU). Without this barrier, the next doorbell can bypass + * this doorbell. This is applicable to IA64/Altix systems. + */ + mmiowb(); +} + /* Main transmit function */ static netdev_tx_t qede_start_xmit(struct sk_buff *skb, @@ -548,6 +561,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { DP_NOTICE(edev, "SKB mapping failed\n"); qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false); + qede_update_tx_producer(txq); return NETDEV_TX_OK; } nbd++; @@ -579,8 +593,6 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, /* Fill the parsing flags & params according to the requested offload */ if (xmit_type & XMIT_L4_CSUM) { - u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT; - /* We don't re-calculate IP checksum as it is already done by * the upper stack */ @@ -590,14 +602,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, if (xmit_type & XMIT_ENC) { first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; - } else { - /* In cases when OS doesn't indicate for inner offloads - * when packet is tunnelled, we need to override the HW - * tunnel configuration so that packets are treated as - * regular non tunnelled packets and no inner offloads - * are done by the hardware. - */ - first_bd->data.bitfields |= cpu_to_le16(temp); + first_bd->data.bitfields |= + 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; } /* If the packet is IPv6 with extension header, indicate that @@ -655,6 +661,10 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, tx_data_bd = (struct eth_tx_bd *)third_bd; data_split = true; } + } else { + first_bd->data.bitfields |= + (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << + ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; } /* Handle fragmented skb */ @@ -666,6 +676,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, if (rc) { qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, data_split); + qede_update_tx_producer(txq); return NETDEV_TX_OK; } @@ -690,6 +701,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, if (rc) { qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, data_split); + qede_update_tx_producer(txq); return NETDEV_TX_OK; } } @@ -710,23 +722,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, txq->tx_db.data.bd_prod = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); - /* wmb makes sure that the BDs data is updated before updating the - * producer, otherwise FW may read old data from the BDs. - */ - wmb(); - barrier(); - writel(txq->tx_db.raw, txq->doorbell_addr); - - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the queue lock is released and another start_xmit is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. - */ - mmiowb(); + if (!skb->xmit_more || netif_xmit_stopped(netdev_txq)) + qede_update_tx_producer(txq); if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1))) { + if (skb->xmit_more) + qede_update_tx_producer(txq); + netif_tx_stop_queue(netdev_txq); DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, "Stop queue was called\n"); @@ -1357,6 +1360,20 @@ static u8 qede_check_csum(u16 flag) return qede_check_tunn_csum(flag); } +static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, + u16 flag) +{ + u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; + + if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK << + ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) || + (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK << + PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT))) + return true; + + return false; +} + static int qede_rx_int(struct qede_fastpath *fp, int budget) { struct qede_dev *edev = fp->edev; @@ -1435,6 +1452,12 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) csum_flag = qede_check_csum(parse_flag); if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { + if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular, + parse_flag)) { + rxq->rx_ip_frags++; + goto alloc_skb; + } + DP_NOTICE(edev, "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", sw_comp_cons, parse_flag); @@ -1443,6 +1466,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) goto next_cqe; } +alloc_skb: skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); if (unlikely(!skb)) { DP_NOTICE(edev, @@ -1453,7 +1477,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) } /* Copy data into SKB */ - if (len + pad <= QEDE_RX_HDR_SIZE) { + if (len + pad <= edev->rx_copybreak) { memcpy(skb_put(skb, len), page_address(data) + pad + sw_rx_data->page_offset, len); @@ -1585,56 +1609,49 @@ next_cqe: /* don't consume bd rx buffer */ static int qede_poll(struct napi_struct *napi, int budget) { - int work_done = 0; struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, - napi); + napi); struct qede_dev *edev = fp->edev; + int rx_work_done = 0; + u8 tc; - while (1) { - u8 tc; - - for (tc = 0; tc < edev->num_tc; tc++) - if (qede_txq_has_work(&fp->txqs[tc])) - qede_tx_int(edev, &fp->txqs[tc]); - - if (qede_has_rx_work(fp->rxq)) { - work_done += qede_rx_int(fp, budget - work_done); - - /* must not complete if we consumed full budget */ - if (work_done >= budget) - break; - } + for (tc = 0; tc < edev->num_tc; tc++) + if (qede_txq_has_work(&fp->txqs[tc])) + qede_tx_int(edev, &fp->txqs[tc]); + + rx_work_done = qede_has_rx_work(fp->rxq) ? + qede_rx_int(fp, budget) : 0; + if (rx_work_done < budget) { + qed_sb_update_sb_idx(fp->sb_info); + /* *_has_*_work() reads the status block, + * thus we need to ensure that status block indices + * have been actually read (qed_sb_update_sb_idx) + * prior to this check (*_has_*_work) so that + * we won't write the "newer" value of the status block + * to HW (if there was a DMA right after + * qede_has_rx_work and if there is no rmb, the memory + * reading (qed_sb_update_sb_idx) may be postponed + * to right before *_ack_sb). In this case there + * will never be another interrupt until there is + * another update of the status block, while there + * is still unhandled work. + */ + rmb(); /* Fall out from the NAPI loop if needed */ - if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) { - qed_sb_update_sb_idx(fp->sb_info); - /* *_has_*_work() reads the status block, - * thus we need to ensure that status block indices - * have been actually read (qed_sb_update_sb_idx) - * prior to this check (*_has_*_work) so that - * we won't write the "newer" value of the status block - * to HW (if there was a DMA right after - * qede_has_rx_work and if there is no rmb, the memory - * reading (qed_sb_update_sb_idx) may be postponed - * to right before *_ack_sb). In this case there - * will never be another interrupt until there is - * another update of the status block, while there - * is still unhandled work. - */ - rmb(); - - if (!(qede_has_rx_work(fp->rxq) || - qede_has_tx_work(fp))) { - napi_complete(napi); - /* Update and reenable interrupts */ - qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, - 1 /*update*/); - break; - } + if (!(qede_has_rx_work(fp->rxq) || + qede_has_tx_work(fp))) { + napi_complete(napi); + + /* Update and reenable interrupts */ + qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, + 1 /*update*/); + } else { + rx_work_done = budget; } } - return work_done; + return rx_work_done; } static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) @@ -2050,10 +2067,13 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) } /* Remove vlan */ - rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, vid); - if (rc) { - DP_ERR(edev, "Failed to remove VLAN %d\n", vid); - return -EINVAL; + if (vlan->configured) { + rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, + vid); + if (rc) { + DP_ERR(edev, "Failed to remove VLAN %d\n", vid); + return -EINVAL; + } } qede_del_vlan_from_list(edev, vlan); @@ -2116,75 +2136,75 @@ int qede_set_features(struct net_device *dev, netdev_features_t features) return 0; } -#ifdef CONFIG_QEDE_VXLAN -static void qede_add_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) +static void qede_udp_tunnel_add(struct net_device *dev, + struct udp_tunnel_info *ti) { struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + u16 t_port = ntohs(ti->port); - if (edev->vxlan_dst_port) - return; + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (edev->vxlan_dst_port) + return; - edev->vxlan_dst_port = t_port; + edev->vxlan_dst_port = t_port; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port); + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", + t_port); - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); - schedule_delayed_work(&edev->sp_task, 0); -} + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (edev->geneve_dst_port) + return; -static void qede_del_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) -{ - struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + edev->geneve_dst_port = t_port; - if (t_port != edev->vxlan_dst_port) + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", + t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + break; + default: return; + } - edev->vxlan_dst_port = 0; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port); - - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); } -#endif -#ifdef CONFIG_QEDE_GENEVE -static void qede_add_geneve_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) +static void qede_udp_tunnel_del(struct net_device *dev, + struct udp_tunnel_info *ti) { struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + u16 t_port = ntohs(ti->port); - if (edev->geneve_dst_port) - return; + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (t_port != edev->vxlan_dst_port) + return; - edev->geneve_dst_port = t_port; + edev->vxlan_dst_port = 0; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); - schedule_delayed_work(&edev->sp_task, 0); -} + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", + t_port); -static void qede_del_geneve_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) -{ - struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (t_port != edev->geneve_dst_port) + return; - if (t_port != edev->geneve_dst_port) - return; + edev->geneve_dst_port = 0; - edev->geneve_dst_port = 0; + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", + t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + break; + default: + return; + } - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); } -#endif static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, @@ -2208,14 +2228,8 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_get_vf_config = qede_get_vf_config, .ndo_set_vf_rate = qede_set_vf_rate, #endif -#ifdef CONFIG_QEDE_VXLAN - .ndo_add_vxlan_port = qede_add_vxlan_port, - .ndo_del_vxlan_port = qede_del_vxlan_port, -#endif -#ifdef CONFIG_QEDE_GENEVE - .ndo_add_geneve_port = qede_add_geneve_port, - .ndo_del_geneve_port = qede_del_geneve_port, -#endif + .ndo_udp_tunnel_add = qede_udp_tunnel_add, + .ndo_udp_tunnel_del = qede_udp_tunnel_del, }; /* ------------------------------------------------------------------------- @@ -2505,8 +2519,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, edev->ops->register_ops(cdev, &qede_ll_ops, edev); +#ifdef CONFIG_DCB + if (!IS_VF(edev)) + qede_set_dcbnl_ops(edev->ndev); +#endif + INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); mutex_init(&edev->qede_lock); + edev->rx_copybreak = QEDE_RX_HDR_SIZE; DP_INFO(edev, "Ending successfully qede probe\n"); @@ -2823,6 +2843,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_NEXT_PTR, + QED_CHAIN_CNT_TYPE_U16, RX_RING_SIZE, sizeof(struct eth_rx_bd), &rxq->rx_bd_ring); @@ -2834,6 +2855,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, RX_RING_SIZE, sizeof(union eth_rx_cqe), &rxq->rx_comp_ring); @@ -2885,9 +2907,9 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, NUM_TX_BDS_MAX, - sizeof(*p_virt), - &txq->tx_pbl); + sizeof(*p_virt), &txq->tx_pbl); if (rc) goto err; @@ -3253,6 +3275,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) start.vport_id = 0; start.drop_ttl0 = true; start.remove_inner_vlan = vlan_removal_en; + start.clear_stats = clear_stats; rc = edev->ops->vport_start(cdev, &start); @@ -3578,12 +3601,8 @@ static int qede_open(struct net_device *ndev) if (rc) return rc; -#ifdef CONFIG_QEDE_VXLAN - vxlan_get_rx_port(ndev); -#endif -#ifdef CONFIG_QEDE_GENEVE - geneve_get_rx_port(ndev); -#endif + udp_tunnel_get_rx_info(ndev); + return 0; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 94128abf2..18ebcc710 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -37,8 +37,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 3 -#define _QLCNIC_LINUX_SUBVERSION 64 -#define QLCNIC_LINUX_VERSIONID "5.3.64" +#define _QLCNIC_LINUX_SUBVERSION 65 +#define QLCNIC_LINUX_VERSIONID "5.3.65" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) @@ -1026,10 +1026,8 @@ struct qlcnic_ipaddr { #define QLCNIC_HAS_PHYS_PORT_ID 0x40000 #define QLCNIC_TSS_RSS 0x80000 -#ifdef CONFIG_QLCNIC_VXLAN #define QLCNIC_ADD_VXLAN_PORT 0x100000 #define QLCNIC_DEL_VXLAN_PORT 0x200000 -#endif #define QLCNIC_VLAN_FILTERING 0x800000 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index f9640d5ce..bdbcd2b08 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -2159,7 +2159,6 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac, struct qlcnic_cmd_args cmd; u32 mac_low, mac_high; - function = 0; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); if (err) return err; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 13dc6646a..74105c6d0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1020,7 +1020,6 @@ static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter, return 0; } -#ifdef CONFIG_QLCNIC_VXLAN #define QLC_83XX_ENCAP_TYPE_VXLAN BIT_1 #define QLC_83XX_MATCH_ENCAP_ID BIT_2 #define QLC_83XX_SET_VXLAN_UDP_DPORT BIT_3 @@ -1089,14 +1088,12 @@ static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter, return ret; } -#endif static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter) { if (adapter->fhash.fnum) qlcnic_prune_lb_filters(adapter); -#ifdef CONFIG_QLCNIC_VXLAN if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) { if (qlcnic_set_vxlan_port(adapter)) return; @@ -1112,7 +1109,6 @@ static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter) adapter->ahw->vxlan_port = 0; adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT; } -#endif } /** diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h index 9777e5713..f4aa6331b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -45,7 +45,6 @@ struct qlcnic_dcb { static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb) { kfree(dcb); - dcb = NULL; } static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 87c642d3b..fedd73667 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -102,7 +102,6 @@ #define QLCNIC_RESPONSE_DESC 0x05 #define QLCNIC_LRO_DESC 0x12 -#define QLCNIC_TX_POLL_BUDGET 128 #define QLCNIC_TCP_HDR_SIZE 20 #define QLCNIC_TCP_TS_OPTION_SIZE 12 #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) @@ -2008,7 +2007,6 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget) struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_adapter *adapter; - budget = QLCNIC_TX_POLL_BUDGET; tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); adapter = tx_ring->adapter; work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 9fedc8f62..6d03721a6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -16,9 +16,7 @@ #include #include #include -#ifdef CONFIG_QLCNIC_VXLAN #include -#endif #include "qlcnic.h" #include "qlcnic_sriov.h" @@ -474,13 +472,15 @@ static int qlcnic_get_phys_port_id(struct net_device *netdev, return 0; } -#ifdef CONFIG_QLCNIC_VXLAN static void qlcnic_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + /* Adapter supports only one VXLAN port. Use very first port * for enabling offload */ @@ -488,23 +488,26 @@ static void qlcnic_add_vxlan_port(struct net_device *netdev, return; if (!ahw->vxlan_port_count) { ahw->vxlan_port_count = 1; - ahw->vxlan_port = ntohs(port); + ahw->vxlan_port = ntohs(ti->port); adapter->flags |= QLCNIC_ADD_VXLAN_PORT; return; } - if (ahw->vxlan_port == ntohs(port)) + if (ahw->vxlan_port == ntohs(ti->port)) ahw->vxlan_port_count++; } static void qlcnic_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count || - (ahw->vxlan_port != ntohs(port))) + (ahw->vxlan_port != ntohs(ti->port))) return; ahw->vxlan_port_count--; @@ -519,7 +522,6 @@ static netdev_features_t qlcnic_features_check(struct sk_buff *skb, features = vlan_features_check(skb, features); return vxlan_features_check(skb, features); } -#endif static const struct net_device_ops qlcnic_netdev_ops = { .ndo_open = qlcnic_open, @@ -539,11 +541,9 @@ static const struct net_device_ops qlcnic_netdev_ops = { .ndo_fdb_del = qlcnic_fdb_del, .ndo_fdb_dump = qlcnic_fdb_dump, .ndo_get_phys_port_id = qlcnic_get_phys_port_id, -#ifdef CONFIG_QLCNIC_VXLAN - .ndo_add_vxlan_port = qlcnic_add_vxlan_port, - .ndo_del_vxlan_port = qlcnic_del_vxlan_port, + .ndo_udp_tunnel_add = qlcnic_add_vxlan_port, + .ndo_udp_tunnel_del = qlcnic_del_vxlan_port, .ndo_features_check = qlcnic_features_check, -#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = qlcnic_poll_controller, #endif @@ -2015,10 +2015,8 @@ qlcnic_attach(struct qlcnic_adapter *adapter) qlcnic_create_sysfs_entries(adapter); -#ifdef CONFIG_QLCNIC_VXLAN if (qlcnic_encap_rx_offload(adapter)) - vxlan_get_rx_port(netdev); -#endif + udp_tunnel_get_rx_info(netdev); adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC; return 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h index 017d8c2c8..24061b9b9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h @@ -156,10 +156,8 @@ struct qlcnic_vf_info { spinlock_t vlan_list_lock; /* Lock for VLAN list */ }; -struct qlcnic_async_work_list { +struct qlcnic_async_cmd { struct list_head list; - struct work_struct work; - void *ptr; struct qlcnic_cmd_args *cmd; }; @@ -168,7 +166,10 @@ struct qlcnic_back_channel { struct workqueue_struct *bc_trans_wq; struct workqueue_struct *bc_async_wq; struct workqueue_struct *bc_flr_wq; - struct list_head async_list; + struct qlcnic_adapter *adapter; + struct list_head async_cmd_list; + struct work_struct vf_async_work; + spinlock_t queue_lock; /* async_cmd_list queue lock */ }; struct qlcnic_sriov { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 7327b729b..d7107055e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -29,6 +29,7 @@ #define QLC_83XX_VF_RESET_FAIL_THRESH 8 #define QLC_BC_CMD_MAX_RETRY_CNT 5 +static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work); static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); @@ -177,7 +178,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) } bc->bc_async_wq = wq; - INIT_LIST_HEAD(&bc->async_list); + INIT_LIST_HEAD(&bc->async_cmd_list); + INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd); + spin_lock_init(&bc->queue_lock); + bc->adapter = adapter; for (i = 0; i < num_vfs; i++) { vf = &sriov->vf_info[i]; @@ -1517,17 +1521,21 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac, void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) { - struct list_head *head = &bc->async_list; - struct qlcnic_async_work_list *entry; + struct list_head *head = &bc->async_cmd_list; + struct qlcnic_async_cmd *entry; flush_workqueue(bc->bc_async_wq); + cancel_work_sync(&bc->vf_async_work); + + spin_lock(&bc->queue_lock); while (!list_empty(head)) { - entry = list_entry(head->next, struct qlcnic_async_work_list, + entry = list_entry(head->next, struct qlcnic_async_cmd, list); - cancel_work_sync(&entry->work); list_del(&entry->list); + kfree(entry->cmd); kfree(entry); } + spin_unlock(&bc->queue_lock); } void qlcnic_sriov_vf_set_multi(struct net_device *netdev) @@ -1587,57 +1595,64 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev) static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) { - struct qlcnic_async_work_list *entry; - struct qlcnic_adapter *adapter; + struct qlcnic_async_cmd *entry, *tmp; + struct qlcnic_back_channel *bc; struct qlcnic_cmd_args *cmd; + struct list_head *head; + LIST_HEAD(del_list); + + bc = container_of(work, struct qlcnic_back_channel, vf_async_work); + head = &bc->async_cmd_list; + + spin_lock(&bc->queue_lock); + list_splice_init(head, &del_list); + spin_unlock(&bc->queue_lock); + + list_for_each_entry_safe(entry, tmp, &del_list, list) { + list_del(&entry->list); + cmd = entry->cmd; + __qlcnic_sriov_issue_cmd(bc->adapter, cmd); + kfree(entry); + } + + if (!list_empty(head)) + queue_work(bc->bc_async_wq, &bc->vf_async_work); - entry = container_of(work, struct qlcnic_async_work_list, work); - adapter = entry->ptr; - cmd = entry->cmd; - __qlcnic_sriov_issue_cmd(adapter, cmd); return; } -static struct qlcnic_async_work_list * -qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc) +static struct qlcnic_async_cmd * +qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc, + struct qlcnic_cmd_args *cmd) { - struct list_head *node; - struct qlcnic_async_work_list *entry = NULL; - u8 empty = 0; + struct qlcnic_async_cmd *entry = NULL; - list_for_each(node, &bc->async_list) { - entry = list_entry(node, struct qlcnic_async_work_list, list); - if (!work_pending(&entry->work)) { - empty = 1; - break; - } - } + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return NULL; - if (!empty) { - entry = kzalloc(sizeof(struct qlcnic_async_work_list), - GFP_ATOMIC); - if (entry == NULL) - return NULL; - list_add_tail(&entry->list, &bc->async_list); - } + entry->cmd = cmd; + + spin_lock(&bc->queue_lock); + list_add_tail(&entry->list, &bc->async_cmd_list); + spin_unlock(&bc->queue_lock); return entry; } static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, - work_func_t func, void *data, struct qlcnic_cmd_args *cmd) { - struct qlcnic_async_work_list *entry = NULL; + struct qlcnic_async_cmd *entry = NULL; - entry = qlcnic_sriov_get_free_node_async_work(bc); - if (!entry) + entry = qlcnic_sriov_alloc_async_cmd(bc, cmd); + if (!entry) { + qlcnic_free_mbx_args(cmd); + kfree(cmd); return; + } - entry->ptr = data; - entry->cmd = cmd; - INIT_WORK(&entry->work, func); - queue_work(bc->bc_async_wq, &entry->work); + queue_work(bc->bc_async_wq, &bc->vf_async_work); } static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, @@ -1649,8 +1664,8 @@ static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, if (adapter->need_fw_reset) return -EIO; - qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd, - adapter, cmd); + qlcnic_sriov_schedule_async_cmd(bc, cmd); + return 0; } diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index fd5d1c93b..fd4a8e473 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1892,7 +1892,6 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, skb->len += length; skb->data_len += length; skb->truesize += length; - length -= length; ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va, &hlen); diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 6b541e57c..cb29ee24c 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -4,7 +4,7 @@ * Copyright (C) 2004 Sten Wang * Copyright (C) 2007 * Daniel Gimpelevich - * Copyright (C) 2007-2012 Florian Fainelli + * Copyright (C) 2007-2012 Florian Fainelli * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -48,8 +48,8 @@ #include #define DRV_NAME "r6040" -#define DRV_VERSION "0.28" -#define DRV_RELDATE "07Oct2011" +#define DRV_VERSION "0.29" +#define DRV_RELDATE "04Jul2016" /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (6000 * HZ / 1000) @@ -162,7 +162,7 @@ MODULE_AUTHOR("Sten Wang ," "Daniel Gimpelevich ," - "Florian Fainelli "); + "Florian Fainelli "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver"); MODULE_VERSION(DRV_VERSION " " DRV_RELDATE); @@ -200,7 +200,6 @@ struct r6040_private { struct mii_bus *mii_bus; struct napi_struct napi; void __iomem *base; - struct phy_device *phydev; int old_link; int old_duplex; }; @@ -474,7 +473,7 @@ static void r6040_down(struct net_device *dev) iowrite16(adrp[1], ioaddr + MID_0M); iowrite16(adrp[2], ioaddr + MID_0H); - phy_stop(lp->phydev); + phy_stop(dev->phydev); } static int r6040_close(struct net_device *dev) @@ -515,12 +514,10 @@ static int r6040_close(struct net_device *dev) static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct r6040_private *lp = netdev_priv(dev); - - if (!lp->phydev) + if (!dev->phydev) return -EINVAL; - return phy_mii_ioctl(lp->phydev, rq, cmd); + return phy_mii_ioctl(dev->phydev, rq, cmd); } static int r6040_rx(struct net_device *dev, int limit) @@ -617,10 +614,15 @@ static void r6040_tx(struct net_device *dev) if (descptr->status & DSC_OWNER_MAC) break; /* Not complete */ skb_ptr = descptr->skb_ptr; + + /* Statistic Counter */ + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb_ptr->len; + pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), skb_ptr->len, PCI_DMA_TODEVICE); /* Free buffer */ - dev_kfree_skb_irq(skb_ptr); + dev_kfree_skb(skb_ptr); descptr->skb_ptr = NULL; /* To next descriptor */ descptr = descptr->vndescp; @@ -641,12 +643,15 @@ static int r6040_poll(struct napi_struct *napi, int budget) void __iomem *ioaddr = priv->base; int work_done; + r6040_tx(dev); + work_done = r6040_rx(dev, budget); if (work_done < budget) { - napi_complete(napi); - /* Enable RX interrupt */ - iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER); + napi_complete_done(napi, work_done); + /* Enable RX/TX interrupt */ + iowrite16(ioread16(ioaddr + MIER) | RX_INTS | TX_INTS, + ioaddr + MIER); } return work_done; } @@ -673,7 +678,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id) } /* RX interrupt request */ - if (status & RX_INTS) { + if (status & (RX_INTS | TX_INTS)) { if (status & RX_NO_DESC) { /* RX descriptor unavailable */ dev->stats.rx_dropped++; @@ -684,15 +689,11 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id) if (likely(napi_schedule_prep(&lp->napi))) { /* Mask off RX interrupt */ - misr &= ~RX_INTS; - __napi_schedule(&lp->napi); + misr &= ~(RX_INTS | TX_INTS); + __napi_schedule_irqoff(&lp->napi); } } - /* TX interrupt request */ - if (status & TX_INTS) - r6040_tx(dev); - /* Restore RDC MAC interrupt */ iowrite16(misr, ioaddr + MIER); @@ -732,7 +733,7 @@ static int r6040_up(struct net_device *dev) /* Initialize all MAC registers */ r6040_init_mac_regs(dev); - phy_start(lp->phydev); + phy_start(dev->phydev); return 0; } @@ -813,6 +814,9 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb, void __iomem *ioaddr = lp->base; unsigned long flags; + if (skb_put_padto(skb, ETH_ZLEN) < 0) + return NETDEV_TX_OK; + /* Critical Section */ spin_lock_irqsave(&lp->lock, flags); @@ -824,17 +828,10 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb, return NETDEV_TX_BUSY; } - /* Statistic Counter */ - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; /* Set TX descriptor & Transmit it */ lp->tx_free_desc--; descptr = lp->tx_insert_ptr; - if (skb->len < ETH_ZLEN) - descptr->len = ETH_ZLEN; - else - descptr->len = skb->len; - + descptr->len = skb->len; descptr->skb_ptr = skb; descptr->buf = cpu_to_le32(pci_map_single(lp->pdev, skb->data, skb->len, PCI_DMA_TODEVICE)); @@ -843,7 +840,8 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb, skb_tx_timestamp(skb); /* Trigger the MAC to check the TX descriptor */ - iowrite16(TM2TX, ioaddr + MTPR); + if (!skb->xmit_more || netif_queue_stopped(dev)) + iowrite16(TM2TX, ioaddr + MTPR); lp->tx_insert_ptr = descptr->vndescp; /* If no tx resource, stop */ @@ -957,26 +955,12 @@ static void netdev_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info)); } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct r6040_private *rp = netdev_priv(dev); - - return phy_ethtool_gset(rp->phydev, cmd); -} - -static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct r6040_private *rp = netdev_priv(dev); - - return phy_ethtool_sset(rp->phydev, cmd); -} - static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, - .set_settings = netdev_set_settings, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops r6040_netdev_ops = { @@ -998,7 +982,7 @@ static const struct net_device_ops r6040_netdev_ops = { static void r6040_adjust_link(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); - struct phy_device *phydev = lp->phydev; + struct phy_device *phydev = dev->phydev; int status_changed = 0; void __iomem *ioaddr = lp->base; @@ -1018,14 +1002,8 @@ static void r6040_adjust_link(struct net_device *dev) lp->old_duplex = phydev->duplex; } - if (status_changed) { - pr_info("%s: link %s", dev->name, phydev->link ? - "UP" : "DOWN"); - if (phydev->link) - pr_cont(" - %d/%s", phydev->speed, - DUPLEX_FULL == phydev->duplex ? "full" : "half"); - pr_cont("\n"); - } + if (status_changed) + phy_print_status(phydev); } static int r6040_mii_probe(struct net_device *dev) @@ -1057,7 +1035,6 @@ static int r6040_mii_probe(struct net_device *dev) | SUPPORTED_TP); phydev->advertising = phydev->supported; - lp->phydev = phydev; lp->old_link = 0; lp->old_duplex = -1; diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index deae10d74..5297bf772 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -467,8 +467,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget) unsigned int rx_tail = cp->rx_tail; int rx; -rx_status_loop: rx = 0; +rx_status_loop: cpw16(IntrStatus, cp_rx_intr_mask); while (rx < budget) { diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index ef668d300..da4c2d8a4 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -1667,6 +1667,10 @@ static void rtl8139_tx_timeout_task (struct work_struct *work) int i; u8 tmp8; + napi_disable(&tp->napi); + netif_stop_queue(dev); + synchronize_sched(); + netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n", RTL_R8(ChipCmd), RTL_R16(IntrStatus), RTL_R16(IntrMask), RTL_R8(MediaStatus)); @@ -1696,10 +1700,10 @@ static void rtl8139_tx_timeout_task (struct work_struct *work) spin_unlock_irq(&tp->lock); /* ...and finally, reset everything */ - if (netif_running(dev)) { - rtl8139_hw_start (dev); - netif_wake_queue (dev); - } + napi_enable(&tp->napi); + rtl8139_hw_start(dev); + netif_wake_queue(dev); + spin_unlock_bh(&tp->rx_lock); } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index e4030c3d7..c071624e7 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1731,13 +1731,21 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp) static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = &tp->pci_dev->dev; + + pm_runtime_get_noresume(d); rtl_lock_work(tp); wol->supported = WAKE_ANY; - wol->wolopts = __rtl8169_get_wol(tp); + if (pm_runtime_active(d)) + wol->wolopts = __rtl8169_get_wol(tp); + else + wol->wolopts = tp->saved_wolopts; rtl_unlock_work(tp); + + pm_runtime_put_noidle(d); } static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) @@ -1827,6 +1835,9 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = &tp->pci_dev->dev; + + pm_runtime_get_noresume(d); rtl_lock_work(tp); @@ -1834,12 +1845,17 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) tp->features |= RTL_FEATURE_WOL; else tp->features &= ~RTL_FEATURE_WOL; - __rtl8169_set_wol(tp, wol->wolopts); + if (pm_runtime_active(d)) + __rtl8169_set_wol(tp, wol->wolopts); + else + tp->saved_wolopts = wol->wolopts; rtl_unlock_work(tp); device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); + pm_runtime_put_noidle(d); + return 0; } @@ -2274,11 +2290,17 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = &tp->pci_dev->dev; struct rtl8169_counters *counters = tp->counters; ASSERT_RTNL(); - rtl8169_update_counters(dev); + pm_runtime_get_noresume(d); + + if (pm_runtime_active(d)) + rtl8169_update_counters(dev); + + pm_runtime_put_noidle(d); data[0] = le64_to_cpu(counters->tx_packets); data[1] = le64_to_cpu(counters->rx_packets); @@ -4440,6 +4462,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) static int rtl_set_mac_address(struct net_device *dev, void *p) { struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = &tp->pci_dev->dev; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) @@ -4447,7 +4470,12 @@ static int rtl_set_mac_address(struct net_device *dev, void *p) memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - rtl_rar_set(tp, dev->dev_addr); + pm_runtime_get_noresume(d); + + if (pm_runtime_active(d)) + rtl_rar_set(tp, dev->dev_addr); + + pm_runtime_put_noidle(d); return 0; } @@ -7850,6 +7878,7 @@ static int rtl8169_runtime_resume(struct device *device) struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); + rtl_rar_set(tp, dev->dev_addr); if (!tp->TxDescArray) return 0; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 867caf6e7..1e1cc0fad 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -362,8 +362,6 @@ static void ravb_emac_init(struct net_device *ndev) ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); - ravb_write(ndev, 1, MPR); - /* E-MAC status register clear */ ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR); @@ -402,7 +400,8 @@ static int ravb_dmac_init(struct net_device *ndev) #endif /* Set AVB RX */ - ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR); + ravb_write(ndev, + RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR); /* Set FIFO size */ ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC); @@ -1006,6 +1005,7 @@ static int ravb_phy_init(struct net_device *ndev) } phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, priv->phy_interface); + of_node_put(pn); if (!phydev) { netdev_err(ndev, "failed to connect PHY\n"); return -ENOENT; @@ -1909,7 +1909,6 @@ static int ravb_probe(struct platform_device *pdev) /* The Ether-specific entries in the device structure. */ ndev->base_addr = res->start; - ndev->dma = -1; chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev); @@ -2111,8 +2110,7 @@ static int ravb_runtime_nop(struct device *dev) } static const struct dev_pm_ops ravb_dev_pm_ops = { - .runtime_suspend = ravb_runtime_nop, - .runtime_resume = ravb_runtime_nop, + SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL) }; #define RAVB_PM_OPS (&ravb_dev_pm_ops) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 04cd39f66..054e795df 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -201,9 +201,14 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = { [ARSTR] = 0x0000, [TSU_CTRST] = 0x0004, + [TSU_FWSLC] = 0x0038, [TSU_VTAG0] = 0x0058, [TSU_ADSBSY] = 0x0060, [TSU_TEN] = 0x0064, + [TSU_POST1] = 0x0070, + [TSU_POST2] = 0x0074, + [TSU_POST3] = 0x0078, + [TSU_POST4] = 0x007c, [TSU_ADRH0] = 0x0100, [TXNLCR0] = 0x0080, @@ -1780,6 +1785,7 @@ static int sh_eth_phy_init(struct net_device *ndev) sh_eth_adjust_link, 0, mdp->phy_interface); + of_node_put(pn); if (!phydev) phydev = ERR_PTR(-ENOENT); } else { @@ -2785,6 +2791,8 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp) { if (sh_eth_is_rz_fast_ether(mdp)) { sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ + sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, + TSU_FWSLC); /* Enable POST registers */ return; } @@ -2996,7 +3004,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) if (devno < 0) devno = 0; - ndev->dma = -1; ret = platform_get_irq(pdev, 0); if (ret < 0) goto out_release; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 28b775e5a..f0b09b05e 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1996,7 +1996,8 @@ static int rocker_port_change_proto_down(struct net_device *dev, return 0; } -static void rocker_port_neigh_destroy(struct neighbour *n) +static void rocker_port_neigh_destroy(struct net_device *dev, + struct neighbour *n) { struct rocker_port *rocker_port = netdev_priv(n->dev); int err; diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h index 45019649b..5cb51b609 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h @@ -475,7 +475,6 @@ struct sxgbe_priv_data { int rxcsum_insertion; spinlock_t stats_lock; /* lock for tx/rx statatics */ - struct phy_device *phydev; int oldlink; int speed; int oldduplex; diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c index c0981ae45..542b67d43 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c @@ -147,7 +147,7 @@ static int sxgbe_get_eee(struct net_device *dev, edata->eee_active = priv->eee_active; edata->tx_lpi_timer = priv->tx_lpi_timer; - return phy_ethtool_get_eee(priv->phydev, edata); + return phy_ethtool_get_eee(dev->phydev, edata); } static int sxgbe_set_eee(struct net_device *dev, @@ -172,7 +172,7 @@ static int sxgbe_set_eee(struct net_device *dev, priv->tx_lpi_timer = edata->tx_lpi_timer; } - return phy_ethtool_set_eee(priv->phydev, edata); + return phy_ethtool_set_eee(dev->phydev, edata); } static void sxgbe_getdrvinfo(struct net_device *dev, @@ -182,27 +182,6 @@ static void sxgbe_getdrvinfo(struct net_device *dev, strlcpy(info->version, DRV_VERSION, sizeof(info->version)); } -static int sxgbe_getsettings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct sxgbe_priv_data *priv = netdev_priv(dev); - - if (priv->phydev) - return phy_ethtool_gset(priv->phydev, cmd); - - return -EOPNOTSUPP; -} - -static int sxgbe_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct sxgbe_priv_data *priv = netdev_priv(dev); - - if (priv->phydev) - return phy_ethtool_sset(priv->phydev, cmd); - - return -EOPNOTSUPP; -} - static u32 sxgbe_getmsglevel(struct net_device *dev) { struct sxgbe_priv_data *priv = netdev_priv(dev); @@ -255,7 +234,7 @@ static void sxgbe_get_ethtool_stats(struct net_device *dev, char *p; if (priv->eee_enabled) { - int val = phy_get_eee_err(priv->phydev); + int val = phy_get_eee_err(dev->phydev); if (val) priv->xstats.eee_wakeup_error_n = val; @@ -499,8 +478,6 @@ static int sxgbe_get_regs_len(struct net_device *dev) static const struct ethtool_ops sxgbe_ethtool_ops = { .get_drvinfo = sxgbe_getdrvinfo, - .get_settings = sxgbe_getsettings, - .set_settings = sxgbe_setsettings, .get_msglevel = sxgbe_getmsglevel, .set_msglevel = sxgbe_setmsglevel, .get_link = ethtool_op_get_link, @@ -516,6 +493,8 @@ static const struct ethtool_ops sxgbe_ethtool_ops = { .get_regs_len = sxgbe_get_regs_len, .get_eee = sxgbe_get_eee, .set_eee = sxgbe_set_eee, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; void sxgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 413ea14ab..ea44a2456 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -124,12 +124,13 @@ static void sxgbe_eee_ctrl_timer(unsigned long arg) */ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv) { + struct net_device *ndev = priv->dev; bool ret = false; /* MAC core supports the EEE feature. */ if (priv->hw_cap.eee) { /* Check if the PHY supports EEE */ - if (phy_init_eee(priv->phydev, 1)) + if (phy_init_eee(ndev->phydev, 1)) return false; priv->eee_active = 1; @@ -152,12 +153,14 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv) static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv) { + struct net_device *ndev = priv->dev; + /* When the EEE has been already initialised we have to * modify the PLS bit in the LPI ctrl & status reg according * to the PHY link status. For this reason. */ if (priv->eee_enabled) - priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link); + priv->hw->mac->set_eee_pls(priv->ioaddr, ndev->phydev->link); } /** @@ -203,7 +206,7 @@ static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize) static void sxgbe_adjust_link(struct net_device *dev) { struct sxgbe_priv_data *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; u8 new_state = 0; u8 speed = 0xff; @@ -306,9 +309,6 @@ static int sxgbe_init_phy(struct net_device *ndev) netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n", __func__, phydev->phy_id, phydev->link); - /* save phy device in private structure */ - priv->phydev = phydev; - return 0; } @@ -1173,8 +1173,8 @@ static int sxgbe_open(struct net_device *dev) priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES); priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES); - if (priv->phydev) - phy_start(priv->phydev); + if (dev->phydev) + phy_start(dev->phydev); /* initialise TX coalesce parameters */ sxgbe_tx_init_coalesce(priv); @@ -1194,8 +1194,8 @@ static int sxgbe_open(struct net_device *dev) init_error: free_dma_desc_resources(priv); - if (priv->phydev) - phy_disconnect(priv->phydev); + if (dev->phydev) + phy_disconnect(dev->phydev); phy_error: clk_disable_unprepare(priv->sxgbe_clk); @@ -1216,10 +1216,9 @@ static int sxgbe_release(struct net_device *dev) del_timer_sync(&priv->eee_ctrl_timer); /* Stop and disconnect the PHY */ - if (priv->phydev) { - phy_stop(priv->phydev); - phy_disconnect(priv->phydev); - priv->phydev = NULL; + if (dev->phydev) { + phy_stop(dev->phydev); + phy_disconnect(dev->phydev); } netif_tx_stop_all_queues(dev); @@ -1969,7 +1968,6 @@ static void sxgbe_poll_controller(struct net_device *dev) */ static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct sxgbe_priv_data *priv = netdev_priv(dev); int ret = -EOPNOTSUPP; if (!netif_running(dev)) @@ -1979,9 +1977,9 @@ static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: - if (!priv->phydev) + if (!dev->phydev) return -EINVAL; - ret = phy_mii_ioctl(priv->phydev, rq, cmd); + ret = phy_mii_ioctl(dev->phydev, rq, cmd); break; default: break; diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 1f3091274..e00a669e9 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -50,14 +50,34 @@ enum { #define HUNT_FILTER_TBL_ROWS 8192 #define EFX_EF10_FILTER_ID_INVALID 0xffff + +#define EFX_EF10_FILTER_DEV_UC_MAX 32 +#define EFX_EF10_FILTER_DEV_MC_MAX 256 + +/* VLAN list entry */ +struct efx_ef10_vlan { + struct list_head list; + u16 vid; +}; + +/* Per-VLAN filters information */ +struct efx_ef10_filter_vlan { + struct list_head list; + u16 vid; + u16 uc[EFX_EF10_FILTER_DEV_UC_MAX]; + u16 mc[EFX_EF10_FILTER_DEV_MC_MAX]; + u16 ucdef; + u16 bcast; + u16 mcdef; +}; + struct efx_ef10_dev_addr { u8 addr[ETH_ALEN]; - u16 id; }; struct efx_ef10_filter_table { -/* The RX match field masks supported by this fw & hw, in order of priority */ - enum efx_filter_match_flags rx_match_flags[ +/* The MCDI match masks supported by this fw & hw, in order of priority */ + u32 rx_match_mcdi_flags[ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM]; unsigned int rx_match_count; @@ -73,16 +93,16 @@ struct efx_ef10_filter_table { } *entry; wait_queue_head_t waitq; /* Shadow of net_device address lists, guarded by mac_lock */ -#define EFX_EF10_FILTER_DEV_UC_MAX 32 -#define EFX_EF10_FILTER_DEV_MC_MAX 256 struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX]; struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; int dev_uc_count; int dev_mc_count; -/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */ - u16 ucdef_id; - u16 bcast_id; - u16 mcdef_id; + bool uc_promisc; + bool mc_promisc; +/* Whether in multicast promiscuous mode when last changed */ + bool mc_promisc_last; + bool vlan_filter; + struct list_head vlan_list; }; /* An arbitrary search limit for the software hash table */ @@ -90,6 +110,10 @@ struct efx_ef10_filter_table { static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); static void efx_ef10_filter_table_remove(struct efx_nic *efx); +static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid); +static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, + struct efx_ef10_filter_vlan *vlan); +static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid); static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) { @@ -275,6 +299,131 @@ static ssize_t efx_ef10_show_primary_flag(struct device *dev, ? 1 : 0); } +static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_ef10_vlan *vlan; + + WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); + + list_for_each_entry(vlan, &nic_data->vlan_list, list) { + if (vlan->vid == vid) + return vlan; + } + + return NULL; +} + +static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_ef10_vlan *vlan; + int rc; + + mutex_lock(&nic_data->vlan_lock); + + vlan = efx_ef10_find_vlan(efx, vid); + if (vlan) { + /* We add VID 0 on init. 8021q adds it on module init + * for all interfaces with VLAN filtring feature. + */ + if (vid == 0) + goto done_unlock; + netif_warn(efx, drv, efx->net_dev, + "VLAN %u already added\n", vid); + rc = -EALREADY; + goto fail_exist; + } + + rc = -ENOMEM; + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) + goto fail_alloc; + + vlan->vid = vid; + + list_add_tail(&vlan->list, &nic_data->vlan_list); + + if (efx->filter_state) { + mutex_lock(&efx->mac_lock); + down_write(&efx->filter_sem); + rc = efx_ef10_filter_add_vlan(efx, vlan->vid); + up_write(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); + if (rc) + goto fail_filter_add_vlan; + } + +done_unlock: + mutex_unlock(&nic_data->vlan_lock); + return 0; + +fail_filter_add_vlan: + list_del(&vlan->list); + kfree(vlan); +fail_alloc: +fail_exist: + mutex_unlock(&nic_data->vlan_lock); + return rc; +} + +static void efx_ef10_del_vlan_internal(struct efx_nic *efx, + struct efx_ef10_vlan *vlan) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); + + if (efx->filter_state) { + down_write(&efx->filter_sem); + efx_ef10_filter_del_vlan(efx, vlan->vid); + up_write(&efx->filter_sem); + } + + list_del(&vlan->list); + kfree(vlan); +} + +static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_ef10_vlan *vlan; + int rc = 0; + + /* 8021q removes VID 0 on module unload for all interfaces + * with VLAN filtering feature. We need to keep it to receive + * untagged traffic. + */ + if (vid == 0) + return 0; + + mutex_lock(&nic_data->vlan_lock); + + vlan = efx_ef10_find_vlan(efx, vid); + if (!vlan) { + netif_err(efx, drv, efx->net_dev, + "VLAN %u to be deleted not found\n", vid); + rc = -ENOENT; + } else { + efx_ef10_del_vlan_internal(efx, vlan); + } + + mutex_unlock(&nic_data->vlan_lock); + + return rc; +} + +static void efx_ef10_cleanup_vlans(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_ef10_vlan *vlan, *next_vlan; + + mutex_lock(&nic_data->vlan_lock); + list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list) + efx_ef10_del_vlan_internal(efx, vlan); + mutex_unlock(&nic_data->vlan_lock); +} + static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag, NULL); static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); @@ -421,8 +570,30 @@ static int efx_ef10_probe(struct efx_nic *efx) #endif ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); + INIT_LIST_HEAD(&nic_data->vlan_list); + mutex_init(&nic_data->vlan_lock); + + /* Add unspecified VID to support VLAN filtering being disabled */ + rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC); + if (rc) + goto fail_add_vid_unspec; + + /* If VLAN filtering is enabled, we need VID 0 to get untagged + * traffic. It is added automatically if 8021q module is loaded, + * but we can't rely on it since module may be not loaded. + */ + rc = efx_ef10_add_vlan(efx, 0); + if (rc) + goto fail_add_vid_0; + return 0; +fail_add_vid_0: + efx_ef10_cleanup_vlans(efx); +fail_add_vid_unspec: + mutex_destroy(&nic_data->vlan_lock); + efx_ptp_remove(efx); + efx_mcdi_mon_remove(efx); fail5: device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); fail4: @@ -676,6 +847,9 @@ static void efx_ef10_remove(struct efx_nic *efx) } #endif + efx_ef10_cleanup_vlans(efx); + mutex_destroy(&nic_data->vlan_lock); + efx_ptp_remove(efx); efx_mcdi_mon_remove(efx); @@ -704,6 +878,45 @@ static int efx_ef10_probe_pf(struct efx_nic *efx) return efx_ef10_probe(efx); } +int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id, + u32 *port_flags, u32 *vadaptor_flags, + unsigned int *vlan_tags) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN); + size_t outlen; + int rc; + + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) { + MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID, + port_id); + + rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (outlen < sizeof(outbuf)) { + rc = -EIO; + return rc; + } + } + + if (port_flags) + *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS); + if (vadaptor_flags) + *vadaptor_flags = + MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS); + if (vlan_tags) + *vlan_tags = + MCDI_DWORD(outbuf, + VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS); + + return 0; +} + int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) { MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); @@ -1304,13 +1517,14 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) } #if BITS_PER_LONG == 64 + BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2); mask[0] = raw_mask[0]; mask[1] = raw_mask[1]; #else + BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3); mask[0] = raw_mask[0] & 0xffffffff; mask[1] = raw_mask[0] >> 32; mask[2] = raw_mask[1] & 0xffffffff; - mask[3] = raw_mask[1] >> 32; #endif } @@ -3040,15 +3254,55 @@ static int efx_ef10_filter_push(struct efx_nic *efx, return rc; } -static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table, - enum efx_filter_match_flags match_flags) +static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec) { + unsigned int match_flags = spec->match_flags; + u32 mcdi_flags = 0; + + if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { + match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG; + mcdi_flags |= + is_multicast_ether_addr(spec->loc_mac) ? + (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) : + (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN); + } + +#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field) { \ + unsigned int old_match_flags = match_flags; \ + match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \ + if (match_flags != old_match_flags) \ + mcdi_flags |= \ + (1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ + mcdi_field ## _LBN); \ + } + MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP); + MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP); + MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC); + MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT); + MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC); + MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT); + MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE); + MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN); + MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN); + MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO); +#undef MAP_FILTER_TO_MCDI_FLAG + + /* Did we map them all? */ + WARN_ON_ONCE(match_flags); + + return mcdi_flags; +} + +static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table, + const struct efx_filter_spec *spec) +{ + u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec); unsigned int match_pri; for (match_pri = 0; match_pri < table->rx_match_count; match_pri++) - if (table->rx_match_flags[match_pri] == match_flags) + if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags) return match_pri; return -EPROTONOSUPPORT; @@ -3074,7 +3328,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, EFX_FILTER_FLAG_RX) return -EINVAL; - rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags); + rc = efx_ef10_filter_pri(table, spec); if (rc < 0) return rc; match_pri = rc; @@ -3313,7 +3567,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, spec = efx_ef10_filter_entry_spec(table, filter_idx); if (!spec || (!by_index && - efx_ef10_filter_rx_match_pri(table, spec->match_flags) != + efx_ef10_filter_pri(table, spec) != filter_id / HUNT_FILTER_TBL_ROWS)) { rc = -ENOENT; goto out_unlock; @@ -3394,12 +3648,13 @@ static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id) return filter_id % HUNT_FILTER_TBL_ROWS; } -static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 filter_id) +static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) { - return efx_ef10_filter_remove_internal(efx, 1U << priority, - filter_id, true); + if (filter_id == EFX_EF10_FILTER_ID_INVALID) + return; + efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, true); } static int efx_ef10_filter_get_safe(struct efx_nic *efx, @@ -3414,7 +3669,7 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx, spin_lock_bh(&efx->filter_lock); saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); if (saved_spec && saved_spec->priority == priority && - efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) == + efx_ef10_filter_pri(table, saved_spec) == filter_id / HUNT_FILTER_TBL_ROWS) { *spec = *saved_spec; rc = 0; @@ -3487,8 +3742,7 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, count = -EMSGSIZE; break; } - buf[count++] = (efx_ef10_filter_rx_match_pri( - table, spec->match_flags) * + buf[count++] = (efx_ef10_filter_pri(table, spec) * HUNT_FILTER_TBL_ROWS + filter_idx); } @@ -3724,15 +3978,58 @@ static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags) return match_flags; } +static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_filter_vlan *vlan, *next_vlan; + + /* See comment in efx_ef10_filter_table_remove() */ + if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) + return; + + if (!table) + return; + + list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list) + efx_ef10_filter_del_vlan_internal(efx, vlan); +} + +static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, + enum efx_filter_match_flags match_flags) +{ + unsigned int match_pri; + int mf; + + for (match_pri = 0; + match_pri < table->rx_match_count; + match_pri++) { + mf = efx_ef10_filter_match_flags_from_mcdi( + table->rx_match_mcdi_flags[match_pri]); + if (mf == match_flags) + return true; + } + + return false; +} + static int efx_ef10_filter_table_probe(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct net_device *net_dev = efx->net_dev; unsigned int pd_match_pri, pd_match_count; struct efx_ef10_filter_table *table; + struct efx_ef10_vlan *vlan; size_t outlen; int rc; + if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) + return -EINVAL; + + if (efx->filter_state) /* already probed */ + return 0; + table = kzalloc(sizeof(*table), GFP_KERNEL); if (!table) return -ENOMEM; @@ -3765,24 +4062,48 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx) "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n", __func__, mcdi_flags, pd_match_pri, rc, table->rx_match_count); - table->rx_match_flags[table->rx_match_count++] = rc; + table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags; + table->rx_match_count++; } } + if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) && + !(efx_ef10_filter_match_supported(table, + (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) && + efx_ef10_filter_match_supported(table, + (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) { + netif_info(efx, probe, net_dev, + "VLAN filters are not supported in this firmware variant\n"); + net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + } + table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry)); if (!table->entry) { rc = -ENOMEM; goto fail; } - table->ucdef_id = EFX_EF10_FILTER_ID_INVALID; - table->bcast_id = EFX_EF10_FILTER_ID_INVALID; - table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; + table->mc_promisc_last = false; + table->vlan_filter = + !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); + INIT_LIST_HEAD(&table->vlan_list); efx->filter_state = table; init_waitqueue_head(&table->waitq); + + list_for_each_entry(vlan, &nic_data->vlan_list, list) { + rc = efx_ef10_filter_add_vlan(efx, vlan->vid); + if (rc) + goto fail_add_vlan; + } + return 0; +fail_add_vlan: + efx_ef10_filter_cleanup_vlans(efx); + efx->filter_state = NULL; fail: kfree(table); return rc; @@ -3843,7 +4164,6 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) nic_data->must_restore_filters = false; } -/* Caller must hold efx->filter_sem for write */ static void efx_ef10_filter_table_remove(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; @@ -3852,7 +4172,17 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx) unsigned int filter_idx; int rc; + efx_ef10_filter_cleanup_vlans(efx); efx->filter_state = NULL; + /* If we were called without locking, then it's not safe to free + * the table as others might be using it. So we just WARN, leak + * the memory, and potentially get an inconsistent filter table + * state. + * This should never actually happen. + */ + if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) + return; + if (!table) return; @@ -3880,37 +4210,54 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx) kfree(table); } -#define EFX_EF10_FILTER_DO_MARK_OLD(id) \ - if (id != EFX_EF10_FILTER_ID_INVALID) { \ - filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \ - if (!table->entry[filter_idx].spec) \ - netif_dbg(efx, drv, efx->net_dev, \ - "%s: marked null spec old %04x:%04x\n", \ - __func__, id, filter_idx); \ - table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;\ - } -static void efx_ef10_filter_mark_old(struct efx_nic *efx) +static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id) { struct efx_ef10_filter_table *table = efx->filter_state; - unsigned int filter_idx, i; + unsigned int filter_idx; - if (!table) - return; + if (*id != EFX_EF10_FILTER_ID_INVALID) { + filter_idx = efx_ef10_filter_get_unsafe_id(efx, *id); + if (!table->entry[filter_idx].spec) + netif_dbg(efx, drv, efx->net_dev, + "marked null spec old %04x:%04x\n", *id, + filter_idx); + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; + *id = EFX_EF10_FILTER_ID_INVALID; + } +} + +/* Mark old per-VLAN filters that may need to be removed */ +static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx, + struct efx_ef10_filter_vlan *vlan) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + unsigned int i; - /* Mark old filters that may need to be removed */ - spin_lock_bh(&efx->filter_lock); for (i = 0; i < table->dev_uc_count; i++) - EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id); + efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]); for (i = 0; i < table->dev_mc_count; i++) - EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id); - EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id); - EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id); - EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id); + efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]); + efx_ef10_filter_mark_one_old(efx, &vlan->ucdef); + efx_ef10_filter_mark_one_old(efx, &vlan->bcast); + efx_ef10_filter_mark_one_old(efx, &vlan->mcdef); +} + +/* Mark old filters that may need to be removed. + * Caller must hold efx->filter_sem for read if race against + * efx_ef10_filter_table_remove() is possible + */ +static void efx_ef10_filter_mark_old(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_filter_vlan *vlan; + + spin_lock_bh(&efx->filter_lock); + list_for_each_entry(vlan, &table->vlan_list, list) + _efx_ef10_filter_vlan_mark_old(efx, vlan); spin_unlock_bh(&efx->filter_lock); } -#undef EFX_EF10_FILTER_DO_MARK_OLD -static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc) +static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; struct net_device *net_dev = efx->net_dev; @@ -3918,45 +4265,38 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc) int addr_count; unsigned int i; - table->ucdef_id = EFX_EF10_FILTER_ID_INVALID; addr_count = netdev_uc_count(net_dev); - if (net_dev->flags & IFF_PROMISC) - *promisc = true; + table->uc_promisc = !!(net_dev->flags & IFF_PROMISC); table->dev_uc_count = 1 + addr_count; ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); i = 1; netdev_for_each_uc_addr(uc, net_dev) { if (i >= EFX_EF10_FILTER_DEV_UC_MAX) { - *promisc = true; + table->uc_promisc = true; break; } ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); - table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID; i++; } } -static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc) +static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; struct net_device *net_dev = efx->net_dev; struct netdev_hw_addr *mc; unsigned int i, addr_count; - table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; - table->bcast_id = EFX_EF10_FILTER_ID_INVALID; - if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) - *promisc = true; + table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)); addr_count = netdev_mc_count(net_dev); i = 0; netdev_for_each_mc_addr(mc, net_dev) { if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { - *promisc = true; + table->mc_promisc = true; break; } ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); - table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID; i++; } @@ -3964,7 +4304,8 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc) } static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, - bool multicast, bool rollback) + struct efx_ef10_filter_vlan *vlan, + bool multicast, bool rollback) { struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_dev_addr *addr_list; @@ -3973,14 +4314,17 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, u8 baddr[ETH_ALEN]; unsigned int i, j; int addr_count; + u16 *ids; int rc; if (multicast) { addr_list = table->dev_mc_list; addr_count = table->dev_mc_count; + ids = vlan->mc; } else { addr_list = table->dev_uc_list; addr_count = table->dev_uc_count; + ids = vlan->uc; } filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; @@ -3988,8 +4332,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, /* Insert/renew filters */ for (i = 0; i < addr_count; i++) { efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); - efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, - addr_list[i].addr); + efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { if (rollback) { @@ -3998,12 +4341,10 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, rc); /* Fall back to promiscuous */ for (j = 0; j < i; j++) { - if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID) - continue; efx_ef10_filter_remove_unsafe( efx, EFX_FILTER_PRI_AUTO, - addr_list[j].id); - addr_list[j].id = EFX_EF10_FILTER_ID_INVALID; + ids[j]); + ids[j] = EFX_EF10_FILTER_ID_INVALID; } return rc; } else { @@ -4011,40 +4352,40 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, rc = EFX_EF10_FILTER_ID_INVALID; } } - addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc); + ids[i] = efx_ef10_filter_get_unsafe_id(efx, rc); } if (multicast && rollback) { /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); eth_broadcast_addr(baddr); - efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); + efx_filter_set_eth_local(&spec, vlan->vid, baddr); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { netif_warn(efx, drv, efx->net_dev, "Broadcast filter insert failed rc=%d\n", rc); /* Fall back to promiscuous */ for (j = 0; j < i; j++) { - if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID) - continue; efx_ef10_filter_remove_unsafe( efx, EFX_FILTER_PRI_AUTO, - addr_list[j].id); - addr_list[j].id = EFX_EF10_FILTER_ID_INVALID; + ids[j]); + ids[j] = EFX_EF10_FILTER_ID_INVALID; } return rc; } else { - table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc); + EFX_WARN_ON_PARANOID(vlan->bcast != + EFX_EF10_FILTER_ID_INVALID); + vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc); } } return 0; } -static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, - bool rollback) +static int efx_ef10_filter_insert_def(struct efx_nic *efx, + struct efx_ef10_filter_vlan *vlan, + bool multicast, bool rollback) { - struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_nic_data *nic_data = efx->nic_data; enum efx_filter_flags filter_flags; struct efx_filter_spec spec; @@ -4060,6 +4401,9 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, else efx_filter_set_uc_def(&spec); + if (vlan->vid != EFX_FILTER_VID_UNSPEC) + efx_filter_set_eth_local(&spec, vlan->vid, NULL); + rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING, @@ -4067,14 +4411,14 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, "%scast mismatch filter insert failed rc=%d\n", multicast ? "Multi" : "Uni", rc); } else if (multicast) { - table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc); + EFX_WARN_ON_PARANOID(vlan->mcdef != EFX_EF10_FILTER_ID_INVALID); + vlan->mcdef = efx_ef10_filter_get_unsafe_id(efx, rc); if (!nic_data->workaround_26807) { /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); eth_broadcast_addr(baddr); - efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, - baddr); + efx_filter_set_eth_local(&spec, vlan->vid, baddr); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { netif_warn(efx, drv, efx->net_dev, @@ -4084,17 +4428,20 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, /* Roll back the mc_def filter */ efx_ef10_filter_remove_unsafe( efx, EFX_FILTER_PRI_AUTO, - table->mcdef_id); - table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; + vlan->mcdef); + vlan->mcdef = EFX_EF10_FILTER_ID_INVALID; return rc; } } else { - table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc); + EFX_WARN_ON_PARANOID(vlan->bcast != + EFX_EF10_FILTER_ID_INVALID); + vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc); } } rc = 0; } else { - table->ucdef_id = rc; + EFX_WARN_ON_PARANOID(vlan->ucdef != EFX_EF10_FILTER_ID_INVALID); + vlan->ucdef = rc; rc = 0; } return rc; @@ -4203,64 +4550,55 @@ reset_nic: /* Caller must hold efx->filter_sem for read if race against * efx_ef10_filter_table_remove() is possible */ -static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) +static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, + struct efx_ef10_filter_vlan *vlan) { struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_nic_data *nic_data = efx->nic_data; - struct net_device *net_dev = efx->net_dev; - bool uc_promisc = false, mc_promisc = false; - if (!efx_dev_registered(efx)) - return; - - if (!table) - return; - - efx_ef10_filter_mark_old(efx); - - /* Copy/convert the address lists; add the primary station - * address and broadcast address + /* Do not install unspecified VID if VLAN filtering is enabled. + * Do not install all specified VIDs if VLAN filtering is disabled. */ - netif_addr_lock_bh(net_dev); - efx_ef10_filter_uc_addr_list(efx, &uc_promisc); - efx_ef10_filter_mc_addr_list(efx, &mc_promisc); - netif_addr_unlock_bh(net_dev); + if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter) + return; /* Insert/renew unicast filters */ - if (uc_promisc) { - efx_ef10_filter_insert_def(efx, false, false); - efx_ef10_filter_insert_addr_list(efx, false, false); + if (table->uc_promisc) { + efx_ef10_filter_insert_def(efx, vlan, false, false); + efx_ef10_filter_insert_addr_list(efx, vlan, false, false); } else { /* If any of the filters failed to insert, fall back to * promiscuous mode - add in the uc_def filter. But keep * our individual unicast filters. */ - if (efx_ef10_filter_insert_addr_list(efx, false, false)) - efx_ef10_filter_insert_def(efx, false, false); + if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false)) + efx_ef10_filter_insert_def(efx, vlan, false, false); } /* Insert/renew multicast filters */ /* If changing promiscuous state with cascaded multicast filters, remove * old filters first, so that packets are dropped rather than duplicated */ - if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc) + if (nic_data->workaround_26807 && + table->mc_promisc_last != table->mc_promisc) efx_ef10_filter_remove_old(efx); - if (mc_promisc) { + if (table->mc_promisc) { if (nic_data->workaround_26807) { /* If we failed to insert promiscuous filters, rollback * and fall back to individual multicast filters */ - if (efx_ef10_filter_insert_def(efx, true, true)) { + if (efx_ef10_filter_insert_def(efx, vlan, true, true)) { /* Changing promisc state, so remove old filters */ efx_ef10_filter_remove_old(efx); - efx_ef10_filter_insert_addr_list(efx, true, false); + efx_ef10_filter_insert_addr_list(efx, vlan, + true, false); } } else { /* If we failed to insert promiscuous filters, don't * rollback. Regardless, also insert the mc_list */ - efx_ef10_filter_insert_def(efx, true, false); - efx_ef10_filter_insert_addr_list(efx, true, false); + efx_ef10_filter_insert_def(efx, vlan, true, false); + efx_ef10_filter_insert_addr_list(efx, vlan, true, false); } } else { /* If any filters failed to insert, rollback and fall back to @@ -4268,17 +4606,153 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) * that fails, roll back again and insert as many of our * individual multicast filters as we can. */ - if (efx_ef10_filter_insert_addr_list(efx, true, true)) { + if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) { /* Changing promisc state, so remove old filters */ if (nic_data->workaround_26807) efx_ef10_filter_remove_old(efx); - if (efx_ef10_filter_insert_def(efx, true, true)) - efx_ef10_filter_insert_addr_list(efx, true, false); + if (efx_ef10_filter_insert_def(efx, vlan, true, true)) + efx_ef10_filter_insert_addr_list(efx, vlan, + true, false); } } +} + +/* Caller must hold efx->filter_sem for read if race against + * efx_ef10_filter_table_remove() is possible + */ +static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct net_device *net_dev = efx->net_dev; + struct efx_ef10_filter_vlan *vlan; + bool vlan_filter; + + if (!efx_dev_registered(efx)) + return; + + if (!table) + return; + + efx_ef10_filter_mark_old(efx); + + /* Copy/convert the address lists; add the primary station + * address and broadcast address + */ + netif_addr_lock_bh(net_dev); + efx_ef10_filter_uc_addr_list(efx); + efx_ef10_filter_mc_addr_list(efx); + netif_addr_unlock_bh(net_dev); + + /* If VLAN filtering changes, all old filters are finally removed. + * Do it in advance to avoid conflicts for unicast untagged and + * VLAN 0 tagged filters. + */ + vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); + if (table->vlan_filter != vlan_filter) { + table->vlan_filter = vlan_filter; + efx_ef10_filter_remove_old(efx); + } + + list_for_each_entry(vlan, &table->vlan_list, list) + efx_ef10_filter_vlan_sync_rx_mode(efx, vlan); efx_ef10_filter_remove_old(efx); - efx->mc_promisc = mc_promisc; + table->mc_promisc_last = table->mc_promisc; +} + +static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_filter_vlan *vlan; + + WARN_ON(!rwsem_is_locked(&efx->filter_sem)); + + list_for_each_entry(vlan, &table->vlan_list, list) { + if (vlan->vid == vid) + return vlan; + } + + return NULL; +} + +static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_filter_vlan *vlan; + unsigned int i; + + if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) + return -EINVAL; + + vlan = efx_ef10_filter_find_vlan(efx, vid); + if (WARN_ON(vlan)) { + netif_err(efx, drv, efx->net_dev, + "VLAN %u already added\n", vid); + return -EALREADY; + } + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) + return -ENOMEM; + + vlan->vid = vid; + + for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) + vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID; + for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) + vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID; + vlan->ucdef = EFX_EF10_FILTER_ID_INVALID; + vlan->bcast = EFX_EF10_FILTER_ID_INVALID; + vlan->mcdef = EFX_EF10_FILTER_ID_INVALID; + + list_add_tail(&vlan->list, &table->vlan_list); + + if (efx_dev_registered(efx)) + efx_ef10_filter_vlan_sync_rx_mode(efx, vlan); + + return 0; +} + +static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, + struct efx_ef10_filter_vlan *vlan) +{ + unsigned int i; + + /* See comment in efx_ef10_filter_table_remove() */ + if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) + return; + + list_del(&vlan->list); + + for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) + efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, + vlan->uc[i]); + for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) + efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, + vlan->mc[i]); + efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->ucdef); + efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->bcast); + efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mcdef); + + kfree(vlan); +} + +static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid) +{ + struct efx_ef10_filter_vlan *vlan; + + /* See comment in efx_ef10_filter_table_remove() */ + if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) + return; + + vlan = efx_ef10_filter_find_vlan(efx, vid); + if (!vlan) { + netif_err(efx, drv, efx->net_dev, + "VLAN %u not found in filter state\n", vid); + return; + } + + efx_ef10_filter_del_vlan_internal(efx, vlan); } static int efx_ef10_set_mac_address(struct efx_nic *efx) @@ -4290,6 +4764,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) efx_device_detach_sync(efx); efx_net_stop(efx->net_dev); + + mutex_lock(&efx->mac_lock); down_write(&efx->filter_sem); efx_ef10_filter_table_remove(efx); @@ -4302,6 +4778,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) efx_ef10_filter_table_probe(efx); up_write(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); + if (was_enabled) efx_net_open(efx->net_dev); netif_device_attach(efx->net_dev); @@ -4703,6 +5181,29 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, } } +static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid) +{ + if (proto != htons(ETH_P_8021Q)) + return -EINVAL; + + return efx_ef10_add_vlan(efx, vid); +} + +static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid) +{ + if (proto != htons(ETH_P_8021Q)) + return -EINVAL; + + return efx_ef10_del_vlan(efx, vid); +} + +#define EF10_OFFLOAD_FEATURES \ + (NETIF_F_IP_CSUM | \ + NETIF_F_HW_VLAN_CTAG_FILTER | \ + NETIF_F_IPV6_CSUM | \ + NETIF_F_RXHASH | \ + NETIF_F_NTUPLE) + const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .is_vf = true, .mem_bar = EFX_MEM_VF_BAR, @@ -4780,6 +5281,8 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { #endif .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf, .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf, + .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, + .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, #ifdef CONFIG_SFC_SRIOV .vswitching_probe = efx_ef10_vswitching_probe_vf, .vswitching_restore = efx_ef10_vswitching_restore_vf, @@ -4798,8 +5301,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .always_rx_scatter = true, .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, - .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_RXHASH | NETIF_F_NTUPLE), + .offload_features = EF10_OFFLOAD_FEATURES, .mcdi_max_ver = 2, .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | @@ -4891,6 +5393,8 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .ptp_write_host_time = efx_ef10_ptp_write_host_time, .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, + .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, + .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, #ifdef CONFIG_SFC_SRIOV .sriov_configure = efx_ef10_sriov_configure, .sriov_init = efx_ef10_sriov_init, @@ -4919,8 +5423,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .always_rx_scatter = true, .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, - .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_RXHASH | NETIF_F_NTUPLE), + .offload_features = EF10_OFFLOAD_FEATURES, .mcdi_max_ver = 2, .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 3c17f274e..a949b9d27 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -232,6 +232,35 @@ fail: return rc; } +static int efx_ef10_vadaptor_alloc_set_features(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u32 port_flags; + int rc; + + rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); + if (rc) + goto fail_vadaptor_alloc; + + rc = efx_ef10_vadaptor_query(efx, nic_data->vport_id, + &port_flags, NULL, NULL); + if (rc) + goto fail_vadaptor_query; + + if (port_flags & + (1 << MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN)) + efx->fixed_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + else + efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + + return 0; + +fail_vadaptor_query: + efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED); +fail_vadaptor_alloc: + return rc; +} + /* On top of the default firmware vswitch setup, create a VEB vswitch and * expansion vport for use by this function. */ @@ -243,7 +272,7 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx) if (pci_sriov_get_totalvfs(efx->pci_dev) <= 0) { /* vswitch not needed as we have no VFs */ - efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); + efx_ef10_vadaptor_alloc_set_features(efx); return 0; } @@ -263,7 +292,7 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx) goto fail3; ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr); - rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); + rc = efx_ef10_vadaptor_alloc_set_features(efx); if (rc) goto fail4; @@ -282,9 +311,7 @@ fail1: int efx_ef10_vswitching_probe_vf(struct efx_nic *efx) { - struct efx_ef10_nic_data *nic_data = efx->nic_data; - - return efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); + return efx_ef10_vadaptor_alloc_set_features(efx); } int efx_ef10_vswitching_restore_pf(struct efx_nic *efx) @@ -554,6 +581,7 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, efx_device_detach_sync(vf->efx); efx_net_stop(vf->efx->net_dev); + mutex_lock(&vf->efx->mac_lock); down_write(&vf->efx->filter_sem); vf->efx->type->filter_table_remove(vf->efx); @@ -630,6 +658,7 @@ restore_filters: goto reset_nic_up_write; up_write(&vf->efx->filter_sem); + mutex_unlock(&vf->efx->mac_lock); up_write(&vf->efx->filter_sem); @@ -642,9 +671,10 @@ restore_filters: return rc; reset_nic_up_write: - if (vf->efx) + if (vf->efx) { up_write(&vf->efx->filter_sem); - + mutex_unlock(&vf->efx->mac_lock); + } reset_nic: if (vf->efx) { netif_err(efx, drv, efx->net_dev, diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h index 6d25b92cb..9ceb7ef0a 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.h +++ b/drivers/net/ethernet/sfc/ef10_sriov.h @@ -70,6 +70,9 @@ int efx_ef10_vport_add_mac(struct efx_nic *efx, int efx_ef10_vport_del_mac(struct efx_nic *efx, unsigned int port_id, u8 *mac); int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id); +int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id, + u32 *port_flags, u32 *vadaptor_flags, + unsigned int *vlan_tags); int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id); #endif /* EF10_SRIOV_H */ diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 097f363f1..14b821b1c 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -600,6 +600,7 @@ fail: */ static void efx_start_datapath(struct efx_nic *efx) { + netdev_features_t old_features = efx->net_dev->features; bool old_rx_scatter = efx->rx_scatter; struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; @@ -644,6 +645,15 @@ static void efx_start_datapath(struct efx_nic *efx) efx->rx_dma_len, efx->rx_page_buf_step, efx->rx_bufs_per_page, efx->rx_pages_per_batch); + /* Restore previously fixed features in hw_features and remove + * features which are fixed now + */ + efx->net_dev->hw_features |= efx->net_dev->features; + efx->net_dev->hw_features &= ~efx->fixed_features; + efx->net_dev->features |= efx->fixed_features; + if (efx->net_dev->features != old_features) + netdev_features_change(efx->net_dev); + /* RX filters may also have scatter-enabled flags */ if (efx->rx_scatter != old_rx_scatter) efx->type->filter_update_rx_scatter(efx); @@ -1719,6 +1729,7 @@ static int efx_probe_filters(struct efx_nic *efx) spin_lock_init(&efx->filter_lock); init_rwsem(&efx->filter_sem); + mutex_lock(&efx->mac_lock); down_write(&efx->filter_sem); rc = efx->type->filter_table_probe(efx); if (rc) @@ -1757,6 +1768,7 @@ static int efx_probe_filters(struct efx_nic *efx) #endif out_unlock: up_write(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); return rc; } @@ -2312,14 +2324,46 @@ static void efx_set_rx_mode(struct net_device *net_dev) static int efx_set_features(struct net_device *net_dev, netdev_features_t data) { struct efx_nic *efx = netdev_priv(net_dev); + int rc; /* If disabling RX n-tuple filtering, clear existing filters */ - if (net_dev->features & ~data & NETIF_F_NTUPLE) - return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); + if (net_dev->features & ~data & NETIF_F_NTUPLE) { + rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); + if (rc) + return rc; + } + + /* If Rx VLAN filter is changed, update filters via mac_reconfigure */ + if ((net_dev->features ^ data) & NETIF_F_HW_VLAN_CTAG_FILTER) { + /* efx_set_rx_mode() will schedule MAC work to update filters + * when a new features are finally set in net_dev. + */ + efx_set_rx_mode(net_dev); + } return 0; } +static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->vlan_rx_add_vid) + return efx->type->vlan_rx_add_vid(efx, proto, vid); + else + return -EOPNOTSUPP; +} + +static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->vlan_rx_kill_vid) + return efx->type->vlan_rx_kill_vid(efx, proto, vid); + else + return -EOPNOTSUPP; +} + static const struct net_device_ops efx_netdev_ops = { .ndo_open = efx_net_open, .ndo_stop = efx_net_stop, @@ -2332,6 +2376,8 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_set_mac_address = efx_set_mac_address, .ndo_set_rx_mode = efx_set_rx_mode, .ndo_set_features = efx_set_features, + .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid, #ifdef CONFIG_SFC_SRIOV .ndo_set_vf_mac = efx_sriov_set_vf_mac, .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, @@ -3147,17 +3193,25 @@ static int efx_pci_probe(struct pci_dev *pci_dev, return -ENOMEM; efx = netdev_priv(net_dev); efx->type = (const struct efx_nic_type *) entry->driver_data; + efx->fixed_features |= NETIF_F_HIGHDMA; net_dev->features |= (efx->type->offload_features | NETIF_F_SG | - NETIF_F_HIGHDMA | NETIF_F_TSO | - NETIF_F_RXCSUM); + NETIF_F_TSO | NETIF_F_RXCSUM); if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) net_dev->features |= NETIF_F_TSO6; /* Mask for features that also apply to VLAN devices */ net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | NETIF_F_RXCSUM); - /* All offloads can be toggled */ - net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; + + net_dev->hw_features = net_dev->features & ~efx->fixed_features; + + /* Disable VLAN filtering by default. It may be enforced if + * the feature is fixed (i.e. VLAN filters are required to + * receive VLAN tagged packets due to vPort restrictions). + */ + net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + net_dev->features |= efx->fixed_features; + pci_set_drvdata(pci_dev, efx); SET_NETDEV_DEV(net_dev, &pci_dev->dev); rc = efx_init_struct(efx, pci_dev, net_dev); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 5e3f93f04..c3ae739e9 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -274,4 +274,13 @@ static inline void efx_device_detach_sync(struct efx_nic *efx) netif_tx_unlock_bh(dev); } +static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem) +{ + if (WARN_ON(down_read_trylock(sem))) { + up_read(sem); + return false; + } + return true; +} + #endif /* EFX_EFX_H */ diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 4cc772164..c9a5b003c 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -273,6 +273,9 @@ * have already installed filters. See the comment at * MC_CMD_WORKAROUND_BUG26807. */ #define MC_CMD_ERR_FILTERS_PRESENT 0x1014 +/* The clock whose frequency you've attempted to set set + * doesn't exist on this NIC */ +#define MC_CMD_ERR_NO_CLOCK 0x1015 #define MC_CMD_ERR_CODE_OFST 0 @@ -292,9 +295,11 @@ /* Point to the copycode entry point. */ #define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4) #define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4) +#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4) /* Points to the recovery mode entry point. */ #define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4) #define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4) +#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4) /* The command set exported by the boot ROM (MCDI v0) */ #define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ @@ -686,6 +691,12 @@ #define FCDI_EVENT_CODE_PTP_STATUS 0x9 /* enum: Port id config to map MC-FC port idx */ #define FCDI_EVENT_CODE_PORT_CONFIG 0xa +/* enum: Boot result or error code */ +#define FCDI_EVENT_CODE_BOOT_RESULT 0xb +#define FCDI_EVENT_REBOOT_SRC_LBN 36 +#define FCDI_EVENT_REBOOT_SRC_WIDTH 8 +#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */ +#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */ #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 @@ -717,6 +728,11 @@ #define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0 #define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0 #define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32 +#define FCDI_EVENT_BOOT_RESULT_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */ +#define FCDI_EVENT_BOOT_RESULT_LBN 0 +#define FCDI_EVENT_BOOT_RESULT_WIDTH 32 /* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events * to the MC. Note that this structure | is overlayed over a normal FCDI event @@ -1649,15 +1665,30 @@ /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16 -/* Uncorrected error on transmit timestamps in NIC clock format */ +/* Uncorrected error on PTP transmit timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0 -/* Uncorrected error on receive timestamps in NIC clock format */ +/* Uncorrected error on PTP receive timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4 /* Uncorrected error on PPS output in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8 /* Uncorrected error on PPS input in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12 +/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24 +/* Uncorrected error on PTP transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0 +/* Uncorrected error on PTP receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4 +/* Uncorrected error on PPS output in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8 +/* Uncorrected error on PPS input in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12 +/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16 +/* Uncorrected error on non-PTP receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20 + /* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */ #define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4 /* Results of testing */ @@ -2158,8 +2189,12 @@ /* MC_CMD_DRV_ATTACH_IN msgrequest */ #define MC_CMD_DRV_ATTACH_IN_LEN 12 -/* new state (0=detached, 1=attached) to set if UPDATE=1 */ +/* new state to set if UPDATE=1 */ #define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_WIDTH 1 +#define MC_CMD_DRV_PREBOOT_LBN 1 +#define MC_CMD_DRV_PREBOOT_WIDTH 1 /* 1 to set new state, or 0 to just report the existing state */ #define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 /* preferred datapath firmware (for Huntington; ignored for Siena) */ @@ -2181,12 +2216,12 @@ /* MC_CMD_DRV_ATTACH_OUT msgresponse */ #define MC_CMD_DRV_ATTACH_OUT_LEN 4 -/* previous or existing state (0=detached, 1=attached) */ +/* previous or existing state, see the bitmask at NEW_STATE */ #define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0 /* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */ #define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8 -/* previous or existing state (0=detached, 1=attached) */ +/* previous or existing state, see the bitmask at NEW_STATE */ #define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0 /* Flags associated with this function */ #define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4 @@ -2198,6 +2233,10 @@ #define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1 /* enum: The function can perform privileged operations */ #define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2 +/* enum: The function does not have an active port associated with it. The port + * refers to the Sorrento external FPGA port. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3 /***********************************/ @@ -2892,7 +2931,7 @@ */ #define MC_CMD_SET_MAC 0x2c -#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK +#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_SET_MAC_IN msgrequest */ #define MC_CMD_SET_MAC_IN_LEN 28 @@ -2927,9 +2966,66 @@ #define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0 #define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1 +/* MC_CMD_SET_MAC_EXT_IN msgrequest */ +#define MC_CMD_SET_MAC_EXT_IN_LEN 32 +/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of + * EtherII, VLAN, bug16011 padding). + */ +#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20 +/* enum: Flow control is off. */ +/* MC_CMD_FCNTL_OFF 0x0 */ +/* enum: Respond to flow control. */ +/* MC_CMD_FCNTL_RESPOND 0x1 */ +/* enum: Respond to and Issue flow control. */ +/* MC_CMD_FCNTL_BIDIR 0x2 */ +/* enum: Auto neg flow control. */ +/* MC_CMD_FCNTL_AUTO 0x3 */ +/* enum: Priority flow control (eftest builds only). */ +/* MC_CMD_FCNTL_QBB 0x4 */ +/* enum: Issue flow control. */ +/* MC_CMD_FCNTL_GENERATE 0x5 */ +#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1 +/* Select which parameters to configure. A parameter will only be modified if + * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in + * capabilities then this field is ignored (and all flags are assumed to be + * set). + */ +#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1 + /* MC_CMD_SET_MAC_OUT msgresponse */ #define MC_CMD_SET_MAC_OUT_LEN 0 +/* MC_CMD_SET_MAC_V2_OUT msgresponse */ +#define MC_CMD_SET_MAC_V2_OUT_LEN 4 +/* MTU as configured after processing the request. See comment at + * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL + * to 0. + */ +#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0 + /***********************************/ /* MC_CMD_PHY_STATS @@ -3521,6 +3617,26 @@ #define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 #define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 +/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */ +#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28 +#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8 +#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0 +#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7 +#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20 +/* Writes must be multiples of this size. Added to support the MUM on Sorrento. + */ +#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24 + /***********************************/ /* MC_CMD_NVRAM_UPDATE_START @@ -3561,6 +3677,37 @@ /* amount to read in bytes */ #define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 +/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */ +#define MC_CMD_NVRAM_READ_IN_V2_LEN 16 +#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4 +/* amount to read in bytes */ +#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8 +/* Optional control info. If a partition is stored with an A/B versioning + * scheme (i.e. in more than one physical partition in NVRAM) the host can set + * this to control which underlying physical partition is used to read data + * from. This allows it to perform a read-modify-write-verify with the write + * lock continuously held by calling NVRAM_UPDATE_START, reading the old + * contents using MODE=TARGET_CURRENT, overwriting the old partition and then + * verifying by reading with MODE=TARGET_BACKUP. + */ +#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12 +/* enum: Same as omitting MODE: caller sees data in current partition unless it + * holds the write lock in which case it sees data in the partition it is + * updating. + */ +#define MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0 +/* enum: Read from the current partition of an A/B pair, even if holding the + * write lock. + */ +#define MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1 +/* enum: Read from the non-current (i.e. to be updated) partition of an A/B + * pair + */ +#define MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2 + /* MC_CMD_NVRAM_READ_OUT msgresponse */ #define MC_CMD_NVRAM_READ_OUT_LENMIN 1 #define MC_CMD_NVRAM_READ_OUT_LENMAX 252 @@ -3895,6 +4042,8 @@ #define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39 /* enum: CCOM AVREG 1v8 supply (external ADC): mV */ #define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a +/* enum: CCOM RTS temperature: degC */ +#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b /* enum: Not a sensor: reserved for the next page flag */ #define MC_CMD_SENSOR_PAGE1_NEXT 0x3f /* enum: controller internal temperature sensor voltage on master core @@ -3931,6 +4080,12 @@ #define MC_CMD_SENSOR_PHY0_VCC 0x4c /* enum: Voltage supplied to the QSFP #1 from their power supply: mV */ #define MC_CMD_SENSOR_PHY1_VCC 0x4d +/* enum: Controller die temperature (TDIODE): degC */ +#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e +/* enum: Board temperature (front): degC */ +#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f +/* enum: Board temperature (back): degC */ +#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50 /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ #define MC_CMD_SENSOR_ENTRY_OFST 4 #define MC_CMD_SENSOR_ENTRY_LEN 8 @@ -4007,7 +4162,7 @@ /* MC_CMD_READ_SENSORS_EXT_IN msgrequest */ #define MC_CMD_READ_SENSORS_EXT_IN_LEN 12 -/* DMA address of host buffer for sensor readings */ +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */ #define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0 #define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8 #define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0 @@ -4608,6 +4763,10 @@ * operations */ #define MC_CMD_MUM_OP_QSFP 0xc +/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage + * level) from MUM + */ +#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd /* MC_CMD_MUM_IN_NULL msgrequest */ #define MC_CMD_MUM_IN_NULL_LEN 4 @@ -4793,6 +4952,10 @@ #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1 /* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */ #define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8 @@ -4862,6 +5025,11 @@ #define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4 #define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8 +/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */ +#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ + /* MC_CMD_MUM_OUT msgresponse */ #define MC_CMD_MUM_OUT_LEN 0 @@ -5004,6 +5172,69 @@ #define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4 #define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0 +/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num)) +/* Discrete (soldered) DDR resistor strap info */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16 +/* Number of SODIMM info records */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4 +/* Array of SODIMM info records */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8 +/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0 +/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1 +/* enum: Total number of SODIMM banks */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */ +/* enum: Values 5-15 are reserved for future usage */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4 +/* enum: No module present */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0 +/* enum: Module present supported and powered on */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1 +/* enum: Module present but bad type */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2 +/* enum: Module present but incompatible voltage */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3 +/* enum: Module present but unknown SPD */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4 +/* enum: Module present but slot cannot support it */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5 +/* enum: Modules may or may not be present, but cannot establish contact by I2C + */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12 + /* MC_CMD_RESOURCE_SPECIFIER enum */ /* enum: Any */ #define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff @@ -5076,6 +5307,8 @@ #define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500 /* enum: Expansion ROM configuration data for port 0 */ #define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600 +/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600 /* enum: Expansion ROM configuration data for port 1 */ #define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601 /* enum: Expansion ROM configuration data for port 2 */ @@ -5084,6 +5317,8 @@ #define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603 /* enum: Non-volatile log output partition */ #define NVRAM_PARTITION_TYPE_LOG 0x700 +/* enum: Non-volatile log output of second core on dual-core device */ +#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701 /* enum: Device state dump output partition */ #define NVRAM_PARTITION_TYPE_DUMP 0x800 /* enum: Application license key storage partition */ @@ -5116,6 +5351,20 @@ #define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05 /* enum: MUM fuses and lockbits partition. */ #define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 +/* enum: UEFI expansion ROM if separate from PXE */ +#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00 +/* enum: Spare partition 0 */ +#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000 +/* enum: Spare partition 1 */ +#define NVRAM_PARTITION_TYPE_SPARE_1 0x1100 +/* enum: Spare partition 2 */ +#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200 +/* enum: Spare partition 3 */ +#define NVRAM_PARTITION_TYPE_SPARE_3 0x1300 +/* enum: Spare partition 4 */ +#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400 +/* enum: Spare partition 5 */ +#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500 /* enum: Start of reserved value range (firmware may use for any purpose) */ #define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 /* enum: End of reserved value range (firmware may use for any purpose) */ @@ -5149,6 +5398,90 @@ #define LICENSED_APP_ID_ID_LBN 0 #define LICENSED_APP_ID_ID_WIDTH 32 +/* LICENSED_FEATURES structuredef */ +#define LICENSED_FEATURES_LEN 8 +/* Bitmask of licensed firmware features */ +#define LICENSED_FEATURES_MASK_OFST 0 +#define LICENSED_FEATURES_MASK_LEN 8 +#define LICENSED_FEATURES_MASK_LO_OFST 0 +#define LICENSED_FEATURES_MASK_HI_OFST 4 +#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0 +#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1 +#define LICENSED_FEATURES_PIO_LBN 1 +#define LICENSED_FEATURES_PIO_WIDTH 1 +#define LICENSED_FEATURES_EVQ_TIMER_LBN 2 +#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1 +#define LICENSED_FEATURES_CLOCK_LBN 3 +#define LICENSED_FEATURES_CLOCK_WIDTH 1 +#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4 +#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1 +#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5 +#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1 +#define LICENSED_FEATURES_RX_SNIFF_LBN 6 +#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1 +#define LICENSED_FEATURES_TX_SNIFF_LBN 7 +#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1 +#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8 +#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1 +#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9 +#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1 +#define LICENSED_FEATURES_MASK_LBN 0 +#define LICENSED_FEATURES_MASK_WIDTH 64 + +/* LICENSED_V3_APPS structuredef */ +#define LICENSED_V3_APPS_LEN 8 +/* Bitmask of licensed applications */ +#define LICENSED_V3_APPS_MASK_OFST 0 +#define LICENSED_V3_APPS_MASK_LEN 8 +#define LICENSED_V3_APPS_MASK_LO_OFST 0 +#define LICENSED_V3_APPS_MASK_HI_OFST 4 +#define LICENSED_V3_APPS_ONLOAD_LBN 0 +#define LICENSED_V3_APPS_ONLOAD_WIDTH 1 +#define LICENSED_V3_APPS_PTP_LBN 1 +#define LICENSED_V3_APPS_PTP_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2 +#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1 +#define LICENSED_V3_APPS_SOLARSECURE_LBN 3 +#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1 +#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4 +#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5 +#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1 +#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7 +#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1 +#define LICENSED_V3_APPS_MASK_LBN 0 +#define LICENSED_V3_APPS_MASK_WIDTH 64 + +/* LICENSED_V3_FEATURES structuredef */ +#define LICENSED_V3_FEATURES_LEN 8 +/* Bitmask of licensed firmware features */ +#define LICENSED_V3_FEATURES_MASK_OFST 0 +#define LICENSED_V3_FEATURES_MASK_LEN 8 +#define LICENSED_V3_FEATURES_MASK_LO_OFST 0 +#define LICENSED_V3_FEATURES_MASK_HI_OFST 4 +#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0 +#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1 +#define LICENSED_V3_FEATURES_PIO_LBN 1 +#define LICENSED_V3_FEATURES_PIO_WIDTH 1 +#define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2 +#define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1 +#define LICENSED_V3_FEATURES_CLOCK_LBN 3 +#define LICENSED_V3_FEATURES_CLOCK_WIDTH 1 +#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4 +#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1 +#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5 +#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1 +#define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6 +#define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1 +#define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7 +#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1 +#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8 +#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1 +#define LICENSED_V3_FEATURES_MASK_LBN 0 +#define LICENSED_V3_FEATURES_MASK_WIDTH 64 + /* TX_TIMESTAMP_EVENT structuredef */ #define TX_TIMESTAMP_EVENT_LEN 6 /* lower 16 bits of timestamp data */ @@ -5258,6 +5591,8 @@ #define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1 #define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5 #define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1 #define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20 /* enum: Disabled */ #define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0 @@ -5362,6 +5697,8 @@ #define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1 #define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9 #define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_LBN 10 +#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ @@ -5422,6 +5759,8 @@ #define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ @@ -5535,6 +5874,8 @@ #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ @@ -5747,6 +6088,46 @@ #define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44 #define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64 +/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REQUEST_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REPLY_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if + * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. + */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32 +/* Must be a power of 2, or zero if this buffer is not provided */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36 +/* Applies to all three buffers */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40 +/* A bit mask defining which MCDI operations may be proxied */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108 + /* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */ #define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0 @@ -6323,6 +6704,15 @@ * client */ #define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2 +/* enum: read properties relating to security rules (Medford-only; for use by + * SolarSecure apps, not directly by drivers. See SF-114946-SW.) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3 +/* enum: read the list of supported RX filter matches for VXLAN/NVGRE + * encapsulated frames, which follow a different match sequence to normal + * frames (Medford only) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4 /* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 @@ -6356,7 +6746,10 @@ /***********************************/ /* MC_CMD_PARSER_DISP_RW - * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging + * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging. + * Please note that this interface is only of use to debug tools which have + * knowledge of firmware and hardware data structures; nothing here is intended + * for use by normal driver code. */ #define MC_CMD_PARSER_DISP_RW 0xe5 @@ -6374,6 +6767,12 @@ #define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 /* enum: Lookup engine (with requested metadata format) */ #define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3 +/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */ +#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0 +/* enum: RX1 dispatcher CPU (only valid for Medford) */ +#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4 +/* enum: Miscellaneous other state (only valid for Medford) */ +#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5 /* identifies the type of operation requested */ #define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4 /* enum: read a word of DICPU DMEM or a LUE entry */ @@ -6382,8 +6781,12 @@ #define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1 /* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */ #define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2 -/* data memory address or LUE index */ +/* data memory address (DICPU targets) or LUE index (LUE targets) */ #define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8 +/* selector (for MISC_STATE target) */ +#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8 +/* enum: Port to datapath mapping */ +#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1 /* value to write (for DMEM writes) */ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12 /* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */ @@ -6408,6 +6811,12 @@ */ #define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20 #define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32 +/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */ +#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0 +#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4 +#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4 +#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */ +#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */ /***********************************/ @@ -7071,6 +7480,24 @@ #define MC_CMD_GET_CAPABILITIES_OUT_LEN 20 /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 #define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13 @@ -7138,6 +7565,8 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b /* TxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6 #define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2 @@ -7153,6 +7582,8 @@ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0 @@ -7227,6 +7658,258 @@ /* Licensed capabilities */ #define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16 +/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */ +#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0 + +/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Virtual switching (full feature) RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Virtual switching (full feature) TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present + * on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2 + /***********************************/ /* MC_CMD_V2_EXTN @@ -7474,6 +8157,25 @@ #define MC_CMD_VSWITCH_FREE_OUT_LEN 0 +/***********************************/ +/* MC_CMD_VSWITCH_QUERY + * read some config of v-switch. For now this command is an empty placeholder. + * It may be used to check if a v-switch is connected to a given EVB port (if + * not, then the command returns ENOENT). + */ +#define MC_CMD_VSWITCH_QUERY 0x63 + +#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VSWITCH_QUERY_IN msgrequest */ +#define MC_CMD_VSWITCH_QUERY_IN_LEN 4 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 + +/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */ +#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0 + + /***********************************/ /* MC_CMD_VPORT_ALLOC * allocate a v-port. @@ -7510,6 +8212,8 @@ #define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1 /* The number of VLAN tags to insert/remove. An error will be returned if * incompatible with the number of VLAN tags specified for the upstream * v-switch. @@ -7561,6 +8265,8 @@ #define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 /* The number of VLAN tags to strip on receive */ #define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12 /* The number of VLAN tags to transparently insert/remove. */ @@ -7638,6 +8344,29 @@ #define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6 +/***********************************/ +/* MC_CMD_VADAPTOR_QUERY + * read some config of v-adaptor. + */ +#define MC_CMD_VADAPTOR_QUERY 0x61 + +#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */ +#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 + +/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */ +#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12 +/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ +#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0 +/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */ +#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4 +/* The number of VLAN tags that may still be added */ +#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8 + + /***********************************/ /* MC_CMD_EVB_PORT_ASSIGN * assign a port to a PCI function. @@ -7875,10 +8604,17 @@ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 -/* Hash control flags. The _EN bits are always supported. The _MODE bits only - * work when the firmware reports ADDITIONAL_RSS_MODES in - * MC_CMD_GET_CAPABILITIES and override the _EN bits if any of them are not 0. - * See the RSS_MODE structure for the meaning of the mode bits. +/* Hash control flags. The _EN bits are always supported, but new modes are + * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES: + * in this case, the MODE fields may be set to non-zero values, and will take + * effect regardless of the settings of the _EN flags. See the RSS_MODE + * structure for the meaning of the mode bits. Drivers must check the + * capability before trying to set any _MODE fields, as older firmware will + * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In + * the case where all the _MODE flags are zero, the _EN flags take effect, + * providing backward compatibility for existing drivers. (Setting all _MODE + * *and* all _EN flags to zero is valid, to disable RSS spreading for that + * particular packet type.) */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0 @@ -7923,11 +8659,18 @@ /* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8 -/* Hash control flags. If any _MODE bits are non-zero (which will only be true - * when the firmware reports ADDITIONAL_RSS_MODES) then the _EN bits should be - * disregarded (but are guaranteed to be consistent with the _MODE bits if - * RSS_CONTEXT_SET_FLAGS has never been called for this context since it was - * allocated). +/* Hash control flags. If all _MODE bits are zero (which will always be true + * for older firmware which does not report the ADDITIONAL_RSS_MODES + * capability), the _EN bits report the state. If any _MODE bits are non-zero + * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES) + * then the _EN bits should be disregarded, although the _MODE flags are + * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS + * context and in the case where the _EN flags were used in the SET. This + * provides backward compatibility: old drivers will not be attempting to + * derive any meaning from the _MODE bits (and can never set them to any value + * not representable by the _EN bits); new drivers can always determine the + * mode by looking only at the _MODE bits; the value returned by a GET can + * always be used for a SET regardless of old/new driver vs. old/new firmware. */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0 @@ -8154,6 +8897,74 @@ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41 +/***********************************/ +/* MC_CMD_VPORT_RECONFIGURE + * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port + * has already been passed to another function (v-port's user), then that + * function will be reset before applying the changes. + */ +#define MC_CMD_VPORT_RECONFIGURE 0xeb + +#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */ +#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44 +/* The handle of the v-port */ +#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0 +/* Flags requesting what should be changed. */ +#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1 +/* The number of VLAN tags to insert/remove. An error will be returned if + * incompatible with the number of VLAN tags specified for the upstream + * v-switch. + */ +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8 +/* The actual VLAN tags to insert/remove */ +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16 +/* The number of MAC addresses to add */ +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16 +/* MAC addresses to add */ +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20 +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6 +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4 + +/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */ +#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1 + + +/***********************************/ +/* MC_CMD_EVB_PORT_QUERY + * read some config of v-port. + */ +#define MC_CMD_EVB_PORT_QUERY 0x62 + +#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */ +#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0 + +/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */ +#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8 +/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ +#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0 +/* The number of VLAN tags that may be used on a v-adaptor connected to this + * EVB port. + */ +#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4 + + /***********************************/ /* MC_CMD_DUMP_BUFTBL_ENTRIES * Dump buffer table entries, mainly for command client debug use. Dumps @@ -8196,6 +9007,14 @@ #define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2 +/* enum: pad to 64 bytes */ +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0 +/* enum: pad to 128 bytes (Medford only) */ +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1 +/* enum: pad to 256 bytes (Medford only) */ +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2 /* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */ #define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0 @@ -8217,6 +9036,10 @@ #define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0 #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0 #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2 +/* Enum values, see field(s): */ +/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */ /***********************************/ @@ -8788,32 +9611,38 @@ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 -/* enum: Attenuation (0-15, TBD for Medford) */ +/* enum: Attenuation (0-15, Huntington) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0 -/* enum: CTLE Boost (0-15, TBD for Medford) */ +/* enum: CTLE Boost (0-15, Huntington) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1 -/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive, TBD - * for Medford) +/* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max + * positive, Medford - 0-31) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2 -/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive, TBD for - * Medford) +/* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max + * positive, Medford - 0-31) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3 -/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive, TBD for - * Medford) +/* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max + * positive, Medford - 0-16) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4 -/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive, TBD for - * Medford) +/* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max + * positive, Medford - 0-16) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5 -/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive, TBD for - * Medford) +/* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max + * positive, Medford - 0-16) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6 -/* enum: Edge DFE DLEV (TBD for Medford) */ +/* enum: Edge DFE DLEV (0-128 for Medford) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7 +/* enum: Variable Gain Amplifier (0-15, Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8 +/* enum: CTLE EQ Capacitor (0-15, Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 +/* enum: CTLE EQ Resistor (0-7, Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ @@ -8885,26 +9714,32 @@ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 -/* enum: TX Amplitude */ +/* enum: TX Amplitude (Huntington, Medford) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0 -/* enum: De-Emphasis Tap1 Magnitude (0-7) */ +/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1 /* enum: De-Emphasis Tap1 Fine */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2 -/* enum: De-Emphasis Tap2 Magnitude (0-6) */ +/* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3 -/* enum: De-Emphasis Tap2 Fine */ +/* enum: De-Emphasis Tap2 Fine (Huntington) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4 -/* enum: Pre-Emphasis Magnitude */ +/* enum: Pre-Emphasis Magnitude (Huntington) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5 -/* enum: Pre-Emphasis Fine */ +/* enum: Pre-Emphasis Fine (Huntington) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6 -/* enum: TX Slew Rate Coarse control */ +/* enum: TX Slew Rate Coarse control (Huntington) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7 -/* enum: TX Slew Rate Fine control */ +/* enum: TX Slew Rate Fine control (Huntington) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8 -/* enum: TX Termination Impedance control */ +/* enum: TX Termination Impedance control (Huntington) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9 +/* enum: TX Amplitude Fine control (Medford) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa +/* enum: Pre-shoot Tap (Medford) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb +/* enum: De-emphasis Tap (Medford) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ @@ -9086,8 +9921,16 @@ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5 /* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6 +/* enum: DFE DLev */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7 +/* enum: Figure of Merit */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8 +/* enum: CTLE EQ Capacitor (HF Gain) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 +/* enum: CTLE EQ Resistor (DC Gain) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 4 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ @@ -9096,12 +9939,57 @@ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x8 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 12 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 +/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num)) +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3 +/* RXEQ Parameter */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5 +/* Enum values, see field(s): */ +/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8 + +/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0 + /* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */ #define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4 /* Requested operation */ @@ -9176,6 +10064,7 @@ /***********************************/ /* MC_CMD_LICENSING * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition + * - not used for V3 licensing */ #define MC_CMD_LICENSING 0xf3 @@ -9219,6 +10108,93 @@ #define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1 +/***********************************/ +/* MC_CMD_LICENSING_V3 + * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition + * - V3 licensing (Medford) + */ +#define MC_CMD_LICENSING_V3 0xd0 + +#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSING_V3_IN msgrequest */ +#define MC_CMD_LICENSING_V3_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_LICENSING_V3_IN_OP_OFST 0 +/* enum: re-read and apply licenses after a license key partition update; note + * that this operation returns a zero-length response + */ +#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0 +/* enum: report counts of installed licenses */ +#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1 + +/* MC_CMD_LICENSING_V3_OUT msgresponse */ +#define MC_CMD_LICENSING_V3_OUT_LEN 88 +/* count of keys which are valid */ +#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0 +/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with + * MC_CMD_FC_OP_LICENSE) + */ +#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4 +/* count of keys which are invalid due to being unverifiable */ +#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8 +/* count of keys which are invalid due to being for the wrong node */ +#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12 +/* licensing state (for diagnostics; the exact meaning of the bits in this + * field are private to the firmware) + */ +#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16 +/* licensing subsystem self-test report (for manftest) */ +#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20 +/* enum: licensing subsystem self-test failed */ +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0 +/* enum: licensing subsystem self-test passed */ +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1 +/* bitmask of licensed applications */ +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28 +/* reserved for future use */ +#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32 +#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24 +/* bitmask of licensed features */ +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60 +/* reserved for future use */ +#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64 +#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24 + + +/***********************************/ +/* MC_CMD_LICENSING_GET_ID_V3 + * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license + * partition - V3 licensing (Medford) + */ +#define MC_CMD_LICENSING_GET_ID_V3 0xd1 + +#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */ +#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0 + +/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num)) +/* type of license (eg 3) */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0 +/* length of the license ID (in bytes) */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4 +/* the unique license ID of the adapter */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244 + + /***********************************/ /* MC_CMD_MC2MC_PROXY * Execute an arbitrary MCDI command on the slave MC of a dual-core device. @@ -9239,7 +10215,7 @@ /* MC_CMD_GET_LICENSED_APP_STATE * Query the state of an individual licensed application. (Note that the actual * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation - * or a reboot of the MC.) + * or a reboot of the MC.) Not used for V3 licensing */ #define MC_CMD_GET_LICENSED_APP_STATE 0xf5 @@ -9260,9 +10236,69 @@ #define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1 +/***********************************/ +/* MC_CMD_GET_LICENSED_V3_APP_STATE + * Query the state of an individual licensed application. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE + * operation or a reboot of the MC.) Used for V3 licensing (Medford) + */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2 + +#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8 +/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit + * mask + */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4 + +/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4 +/* state of this application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0 +/* enum: no (or invalid) license is present for the application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0 +/* enum: a valid license is present for the application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1 + + +/***********************************/ +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES + * Query the state of an one or more licensed features. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE + * operation or a reboot of the MC.) Used for V3 licensing (Medford) + */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3 + +#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8 +/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or + * more bits set + */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4 + +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8 +/* states of these features - bit set for licensed, clear for not licensed */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4 + + /***********************************/ /* MC_CMD_LICENSED_APP_OP - * Perform an action for an individual licensed application. + * Perform an action for an individual licensed application - not used for V3 + * licensing. */ #define MC_CMD_LICENSED_APP_OP 0xf6 @@ -9327,6 +10363,67 @@ #define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0 +/***********************************/ +/* MC_CMD_LICENSED_V3_VALIDATE_APP + * Perform validation for an individual licensed application - V3 licensing + * (Medford) + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4 + +#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 72 +/* application ID expressed as a single bit mask */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 0 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 4 +/* challenge for validation */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 8 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 64 + +/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 72 +/* application expiry time */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 0 +/* application expiry units */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 4 +/* enum: expiry units are accounting units */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0 +/* enum: expiry units are calendar days */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1 +/* validation response to challenge */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 8 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 64 + + +/***********************************/ +/* MC_CMD_LICENSED_V3_MASK_FEATURES + * Mask features - V3 licensing (Medford) + */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5 + +#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12 +/* mask to be applied to features to be changed */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4 +/* whether to turn on or turn off the masked features */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8 +/* enum: turn the features off */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0 +/* enum: turn the features back on */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1 + +/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0 + + /***********************************/ /* MC_CMD_SET_PORT_SNIFF_CONFIG * Configure RX port sniffing for the physical port associated with the calling @@ -9696,12 +10793,27 @@ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 /* enum */ +/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 #define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */ +/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC + * adress. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800 +/* enum: Privilege that allows a Function to change the MAC address configured + * in its associated vAdapter/vPort. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000 +/* enum: Privilege that allows a Function to install filters that specify VLANs + * that are not in the permit list for the associated vPort. This privilege is + * primarily to support ESX where vPorts are created that restrict traffic to + * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000 /* enum: Set this bit to indicate that a new privilege mask is to be set, * otherwise the command will only read the existing mask. */ @@ -9951,7 +11063,7 @@ /* Sector type */ #define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4 /* Enum values, see field(s): */ -/* MC_CMD_XPM_READ_SECTOR_OUT/TYPE */ +/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */ /* Sector size */ #define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8 /* Sector data */ @@ -10067,4 +11179,123 @@ #define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0 +/***********************************/ +/* MC_CMD_EXEC_SIGNED + * Check the CMAC of the contents of IMEM and DMEM against the value supplied + * and if correct begin execution from the start of IMEM. The caller supplies a + * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC + * computation runs from the start of IMEM, and from the start of DMEM + 16k, + * to match flash booting. The command will respond with EINVAL if the CMAC + * does match, otherwise it will respond with success before it jumps to IMEM. + */ +#define MC_CMD_EXEC_SIGNED 0x10c + +#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_EXEC_SIGNED_IN msgrequest */ +#define MC_CMD_EXEC_SIGNED_IN_LEN 28 +/* the length of code to include in the CMAC */ +#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0 +/* the length of date to include in the CMAC */ +#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4 +/* the XPM sector containing the key to use */ +#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8 +/* the expected CMAC value */ +#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12 +#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16 + +/* MC_CMD_EXEC_SIGNED_OUT msgresponse */ +#define MC_CMD_EXEC_SIGNED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PREPARE_SIGNED + * Prepare to upload a signed image. This will scrub the specified length of + * the data region, which must be at least as large as the DATALEN supplied to + * MC_CMD_EXEC_SIGNED. + */ +#define MC_CMD_PREPARE_SIGNED 0x10d + +#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PREPARE_SIGNED_IN msgrequest */ +#define MC_CMD_PREPARE_SIGNED_IN_LEN 4 +/* the length of data area to clear */ +#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0 + +/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */ +#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS + * Configure UDP ports for tunnel encapsulation hardware acceleration. The + * parser-dispatcher will attempt to parse traffic on these ports as tunnel + * encapsulation PDUs and filter them using the tunnel encapsulation filter + * chain rather than the standard filter chain. Note that this command can + * cause all functions to see a reset. (Available on Medford only.) + */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117 + +#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num)) +/* Flags */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1 +/* The number of entries in the ENTRIES array */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2 +/* Entries defining the UDP port to protocol mapping, each laid out as a + * TUNNEL_ENCAP_UDP_PORT_ENTRY + */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16 + +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2 +/* Flags */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1 + + +/***********************************/ +/* MC_CMD_RX_BALANCING + * Configure a port upconverter to distribute the packets on both RX engines. + * Packets are distributed based on a table with the destination vFIFO. The + * index of the table is a hash of source and destination of IPV4 and VLAN + * priority. + */ +#define MC_CMD_RX_BALANCING 0x118 + +#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_RX_BALANCING_IN msgrequest */ +#define MC_CMD_RX_BALANCING_IN_LEN 4 +/* The RX port whose upconverter table will be modified */ +#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0 +#define MC_CMD_RX_BALANCING_IN_PORT_LEN 1 +/* The VLAN priority associated to the table index and vFIFO */ +#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 1 +#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 1 +/* The resulting bit of SRC^DST for indexing the table */ +#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 2 +#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 1 +/* The RX engine to which the vFIFO in the table entry will point to */ +#define MC_CMD_RX_BALANCING_IN_ENG_OFST 3 +#define MC_CMD_RX_BALANCING_IN_ENG_LEN 1 + +/* MC_CMD_RX_BALANCING_OUT msgresponse */ +#define MC_CMD_RX_BALANCING_OUT_LEN 0 + + #endif /* MCDI_PCOL_H */ diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index d13ddf970..9ff062a36 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -868,6 +868,7 @@ struct vfdi_status; * be held to modify it. * @port_initialized: Port initialized? * @net_dev: Operating system network device. Consider holding the rtnl lock + * @fixed_features: Features which cannot be turned off * @stats_buffer: DMA buffer for statistics * @phy_type: PHY type * @phy_op: PHY interface @@ -916,7 +917,6 @@ struct vfdi_status; * @stats_lock: Statistics update lock. Must be held when calling * efx_nic_type::{update,start,stop}_stats. * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb - * @mc_promisc: Whether in multicast promiscuous mode when last changed * * This is stored in the private area of the &struct net_device. */ @@ -1008,6 +1008,8 @@ struct efx_nic { bool port_initialized; struct net_device *net_dev; + netdev_features_t fixed_features; + struct efx_buffer stats_buffer; u64 rx_nodesc_drops_total; u64 rx_nodesc_drops_while_down; @@ -1065,7 +1067,6 @@ struct efx_nic { int last_irq_cpu; spinlock_t stats_lock; atomic_t n_rx_noskb_drops; - bool mc_promisc; }; static inline int efx_dev_registered(struct efx_nic *efx) @@ -1333,6 +1334,8 @@ struct efx_nic_type { int (*ptp_set_ts_config)(struct efx_nic *efx, struct hwtstamp_config *init); int (*sriov_configure)(struct efx_nic *efx, int num_vfs); + int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid); + int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid); int (*sriov_init)(struct efx_nic *efx); void (*sriov_fini)(struct efx_nic *efx); bool (*sriov_wanted)(struct efx_nic *efx); @@ -1521,4 +1524,16 @@ static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; } +/* Get all supported features. + * If a feature is not fixed, it is present in hw_features. + * If a feature is fixed, it does not present in hw_features, but + * always in features. + */ +static inline netdev_features_t efx_supported_features(const struct efx_nic *efx) +{ + const struct net_device *net_dev = efx->net_dev; + + return net_dev->features | net_dev->hw_features; +} + #endif /* EFX_NET_DRIVER_H */ diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 0b536e27d..96944c3c9 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -519,6 +519,9 @@ enum { #ifdef CONFIG_SFC_SRIOV * @vf: Pointer to VF data structure #endif + * @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero + * @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock. + * @vlan_lock: Lock to serialize access to vlan_list. */ struct efx_ef10_nic_data { struct efx_buffer mcdi_buf; @@ -550,6 +553,8 @@ struct efx_ef10_nic_data { struct ef10_vf *vf; #endif u8 vport_mac[ETH_ALEN]; + struct list_head vlan_list; + struct mutex vlan_lock; }; int efx_init_sriov(void); diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index b69d0e1e8..503a3b6dc 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2195,6 +2195,12 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device * } } +static const struct acpi_device_id smc91x_acpi_match[] = { + { "LNRO0003", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, smc91x_acpi_match); + #if IS_BUILTIN(CONFIG_OF) static const struct of_device_id smc91x_match[] = { { .compatible = "smsc,lan91c94", }, @@ -2281,7 +2287,6 @@ static int smc_drv_probe(struct platform_device *pdev) #if IS_BUILTIN(CONFIG_OF) match = of_match_device(of_match_ptr(smc91x_match), &pdev->dev); if (match) { - struct device_node *np = pdev->dev.of_node; u32 val; /* Optional pwrdwn GPIO configured? */ @@ -2307,7 +2312,8 @@ static int smc_drv_probe(struct platform_device *pdev) usleep_range(750, 1000); /* Combination of IO widths supported, default to 16-bit */ - if (!of_property_read_u32(np, "reg-io-width", &val)) { + if (!device_property_read_u32(&pdev->dev, "reg-io-width", + &val)) { if (val & 1) lp->cfg.flags |= SMC91X_USE_8BIT; if ((val == 0) || (val & 2)) @@ -2485,7 +2491,8 @@ static struct platform_driver smc_driver = { .driver = { .name = CARDNAME, .pm = &smc_drv_pm_ops, - .of_match_table = of_match_ptr(smc91x_match), + .of_match_table = of_match_ptr(smc91x_match), + .acpi_match_table = smc91x_acpi_match, }, }; diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index e17671c9d..ea8465467 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -470,7 +470,9 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma, #endif #if ! SMC_CAN_USE_8BIT +#undef SMC_inb #define SMC_inb(ioaddr, reg) ({ BUG(); 0; }) +#undef SMC_outb #define SMC_outb(x, ioaddr, reg) BUG() #define SMC_insb(a, r, p, l) BUG() #define SMC_outsb(a, r, p, l) BUG() diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index b5ab5e120..4f8910b7d 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -114,7 +114,6 @@ struct smsc911x_data { /* spinlock to ensure register accesses are serialised */ spinlock_t dev_lock; - struct phy_device *phy_dev; struct mii_bus *mii_bus; unsigned int using_extphy; int last_duplex; @@ -833,7 +832,7 @@ static int smsc911x_phy_reset(struct smsc911x_data *pdata) static int smsc911x_phy_loopbacktest(struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); - struct phy_device *phy_dev = pdata->phy_dev; + struct phy_device *phy_dev = dev->phydev; int result = -EIO; unsigned int i, val; unsigned long flags; @@ -903,7 +902,8 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev) static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata) { - struct phy_device *phy_dev = pdata->phy_dev; + struct net_device *ndev = pdata->dev; + struct phy_device *phy_dev = ndev->phydev; u32 afc = smsc911x_reg_read(pdata, AFC_CFG); u32 flow; unsigned long flags; @@ -944,7 +944,7 @@ static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata) static void smsc911x_phy_adjust_link(struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); - struct phy_device *phy_dev = pdata->phy_dev; + struct phy_device *phy_dev = dev->phydev; unsigned long flags; int carrier; @@ -1037,7 +1037,6 @@ static int smsc911x_mii_probe(struct net_device *dev) SUPPORTED_Asym_Pause); phydev->advertising = phydev->supported; - pdata->phy_dev = phydev; pdata->last_duplex = -1; pdata->last_carrier = -1; @@ -1100,15 +1099,8 @@ static int smsc911x_mii_init(struct platform_device *pdev, goto err_out_free_bus_2; } - if (smsc911x_mii_probe(dev) < 0) { - SMSC_WARN(pdata, probe, "Error registering mii bus"); - goto err_out_unregister_bus_3; - } - return 0; -err_out_unregister_bus_3: - mdiobus_unregister(pdata->mii_bus); err_out_free_bus_2: mdiobus_free(pdata->mii_bus); err_out_1: @@ -1338,9 +1330,11 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata) static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata) { + struct net_device *ndev = pdata->dev; + struct phy_device *phy_dev = ndev->phydev; int rc = 0; - if (!pdata->phy_dev) + if (!phy_dev) return rc; /* If the internal PHY is in General Power-Down mode, all, except the @@ -1350,7 +1344,7 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata) * In that case, clear the bit 0.11, so the PHY powers up and we can * access to the phy registers. */ - rc = phy_read(pdata->phy_dev, MII_BMCR); + rc = phy_read(phy_dev, MII_BMCR); if (rc < 0) { SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); return rc; @@ -1360,7 +1354,7 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata) * disable the general power down-mode. */ if (rc & BMCR_PDOWN) { - rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN); + rc = phy_write(phy_dev, MII_BMCR, rc & ~BMCR_PDOWN); if (rc < 0) { SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); return rc; @@ -1374,12 +1368,14 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata) static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) { + struct net_device *ndev = pdata->dev; + struct phy_device *phy_dev = ndev->phydev; int rc = 0; - if (!pdata->phy_dev) + if (!phy_dev) return rc; - rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS); + rc = phy_read(phy_dev, MII_LAN83C185_CTRL_STATUS); if (rc < 0) { SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); @@ -1389,7 +1385,7 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) /* Only disable if energy detect mode is already enabled */ if (rc & MII_LAN83C185_EDPWRDOWN) { /* Disable energy detect mode for this SMSC Transceivers */ - rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, + rc = phy_write(phy_dev, MII_LAN83C185_CTRL_STATUS, rc & (~MII_LAN83C185_EDPWRDOWN)); if (rc < 0) { @@ -1405,12 +1401,14 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) { + struct net_device *ndev = pdata->dev; + struct phy_device *phy_dev = ndev->phydev; int rc = 0; - if (!pdata->phy_dev) + if (!phy_dev) return rc; - rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS); + rc = phy_read(phy_dev, MII_LAN83C185_CTRL_STATUS); if (rc < 0) { SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); @@ -1420,7 +1418,7 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) /* Only enable if energy detect mode is already disabled */ if (!(rc & MII_LAN83C185_EDPWRDOWN)) { /* Enable energy detect mode for this SMSC Transceivers */ - rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, + rc = phy_write(phy_dev, MII_LAN83C185_CTRL_STATUS, rc | MII_LAN83C185_EDPWRDOWN); if (rc < 0) { @@ -1509,23 +1507,90 @@ static void smsc911x_disable_irq_chip(struct net_device *dev) smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); } +static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct smsc911x_data *pdata = netdev_priv(dev); + u32 intsts = smsc911x_reg_read(pdata, INT_STS); + u32 inten = smsc911x_reg_read(pdata, INT_EN); + int serviced = IRQ_NONE; + u32 temp; + + if (unlikely(intsts & inten & INT_STS_SW_INT_)) { + temp = smsc911x_reg_read(pdata, INT_EN); + temp &= (~INT_EN_SW_INT_EN_); + smsc911x_reg_write(pdata, INT_EN, temp); + smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_); + pdata->software_irq_signal = 1; + smp_wmb(); + serviced = IRQ_HANDLED; + } + + if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) { + /* Called when there is a multicast update scheduled and + * it is now safe to complete the update */ + SMSC_TRACE(pdata, intr, "RX Stop interrupt"); + smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); + if (pdata->multicast_update_pending) + smsc911x_rx_multicast_update_workaround(pdata); + serviced = IRQ_HANDLED; + } + + if (intsts & inten & INT_STS_TDFA_) { + temp = smsc911x_reg_read(pdata, FIFO_INT); + temp |= FIFO_INT_TX_AVAIL_LEVEL_; + smsc911x_reg_write(pdata, FIFO_INT, temp); + smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_); + netif_wake_queue(dev); + serviced = IRQ_HANDLED; + } + + if (unlikely(intsts & inten & INT_STS_RXE_)) { + SMSC_TRACE(pdata, intr, "RX Error interrupt"); + smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_); + serviced = IRQ_HANDLED; + } + + if (likely(intsts & inten & INT_STS_RSFL_)) { + if (likely(napi_schedule_prep(&pdata->napi))) { + /* Disable Rx interrupts */ + temp = smsc911x_reg_read(pdata, INT_EN); + temp &= (~INT_EN_RSFL_EN_); + smsc911x_reg_write(pdata, INT_EN, temp); + /* Schedule a NAPI poll */ + __napi_schedule(&pdata->napi); + } else { + SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed"); + } + serviced = IRQ_HANDLED; + } + + return serviced; +} + static int smsc911x_open(struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); unsigned int timeout; unsigned int temp; unsigned int intcfg; + int retval; + int irq_flags; - /* if the phy is not yet registered, retry later*/ - if (!pdata->phy_dev) { - SMSC_WARN(pdata, hw, "phy_dev is NULL"); - return -EAGAIN; + /* find and start the given phy */ + if (!dev->phydev) { + retval = smsc911x_mii_probe(dev); + if (retval < 0) { + SMSC_WARN(pdata, probe, "Error starting phy"); + goto out; + } } /* Reset the LAN911x */ - if (smsc911x_soft_reset(pdata)) { + retval = smsc911x_soft_reset(pdata); + if (retval) { SMSC_WARN(pdata, hw, "soft reset failed"); - return -EIO; + goto mii_free_out; } smsc911x_reg_write(pdata, HW_CFG, 0x00050000); @@ -1581,6 +1646,15 @@ static int smsc911x_open(struct net_device *dev) pdata->software_irq_signal = 0; smp_wmb(); + irq_flags = irq_get_trigger_type(dev->irq); + retval = request_irq(dev->irq, smsc911x_irqhandler, + irq_flags | IRQF_SHARED, dev->name, dev); + if (retval) { + SMSC_WARN(pdata, probe, + "Unable to claim requested irq: %d", dev->irq); + goto mii_free_out; + } + temp = smsc911x_reg_read(pdata, INT_EN); temp |= INT_EN_SW_INT_EN_; smsc911x_reg_write(pdata, INT_EN, temp); @@ -1595,7 +1669,8 @@ static int smsc911x_open(struct net_device *dev) if (!pdata->software_irq_signal) { netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n", dev->irq); - return -ENODEV; + retval = -ENODEV; + goto irq_stop_out; } SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d", dev->irq); @@ -1608,7 +1683,7 @@ static int smsc911x_open(struct net_device *dev) pdata->last_carrier = -1; /* Bring the PHY up */ - phy_start(pdata->phy_dev); + phy_start(dev->phydev); temp = smsc911x_reg_read(pdata, HW_CFG); /* Preserve TX FIFO size and external PHY configuration */ @@ -1641,6 +1716,14 @@ static int smsc911x_open(struct net_device *dev) netif_start_queue(dev); return 0; + +irq_stop_out: + free_irq(dev->irq, dev); +mii_free_out: + phy_disconnect(dev->phydev); + dev->phydev = NULL; +out: + return retval; } /* Entry point for stopping the interface */ @@ -1662,9 +1745,15 @@ static int smsc911x_stop(struct net_device *dev) dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP); smsc911x_tx_update_txcounters(dev); + free_irq(dev->irq, dev); + /* Bring the PHY down */ - if (pdata->phy_dev) - phy_stop(pdata->phy_dev); + if (dev->phydev) { + phy_stop(dev->phydev); + phy_disconnect(dev->phydev); + dev->phydev = NULL; + } + netif_carrier_off(dev); SMSC_TRACE(pdata, ifdown, "Interface stopped"); return 0; @@ -1806,67 +1895,6 @@ static void smsc911x_set_multicast_list(struct net_device *dev) spin_unlock_irqrestore(&pdata->mac_lock, flags); } -static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct smsc911x_data *pdata = netdev_priv(dev); - u32 intsts = smsc911x_reg_read(pdata, INT_STS); - u32 inten = smsc911x_reg_read(pdata, INT_EN); - int serviced = IRQ_NONE; - u32 temp; - - if (unlikely(intsts & inten & INT_STS_SW_INT_)) { - temp = smsc911x_reg_read(pdata, INT_EN); - temp &= (~INT_EN_SW_INT_EN_); - smsc911x_reg_write(pdata, INT_EN, temp); - smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_); - pdata->software_irq_signal = 1; - smp_wmb(); - serviced = IRQ_HANDLED; - } - - if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) { - /* Called when there is a multicast update scheduled and - * it is now safe to complete the update */ - SMSC_TRACE(pdata, intr, "RX Stop interrupt"); - smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); - if (pdata->multicast_update_pending) - smsc911x_rx_multicast_update_workaround(pdata); - serviced = IRQ_HANDLED; - } - - if (intsts & inten & INT_STS_TDFA_) { - temp = smsc911x_reg_read(pdata, FIFO_INT); - temp |= FIFO_INT_TX_AVAIL_LEVEL_; - smsc911x_reg_write(pdata, FIFO_INT, temp); - smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_); - netif_wake_queue(dev); - serviced = IRQ_HANDLED; - } - - if (unlikely(intsts & inten & INT_STS_RXE_)) { - SMSC_TRACE(pdata, intr, "RX Error interrupt"); - smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_); - serviced = IRQ_HANDLED; - } - - if (likely(intsts & inten & INT_STS_RSFL_)) { - if (likely(napi_schedule_prep(&pdata->napi))) { - /* Disable Rx interrupts */ - temp = smsc911x_reg_read(pdata, INT_EN); - temp &= (~INT_EN_RSFL_EN_); - smsc911x_reg_write(pdata, INT_EN, temp); - /* Schedule a NAPI poll */ - __napi_schedule(&pdata->napi); - } else { - SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed"); - } - serviced = IRQ_HANDLED; - } - - return serviced; -} - #ifdef CONFIG_NET_POLL_CONTROLLER static void smsc911x_poll_controller(struct net_device *dev) { @@ -1904,30 +1932,10 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p) /* Standard ioctls for mii-tool */ static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct smsc911x_data *pdata = netdev_priv(dev); - - if (!netif_running(dev) || !pdata->phy_dev) + if (!netif_running(dev) || !dev->phydev) return -EINVAL; - return phy_mii_ioctl(pdata->phy_dev, ifr, cmd); -} - -static int -smsc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - - cmd->maxtxpkt = 1; - cmd->maxrxpkt = 1; - return phy_ethtool_gset(pdata->phy_dev, cmd); -} - -static int -smsc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - - return phy_ethtool_sset(pdata->phy_dev, cmd); + return phy_mii_ioctl(dev->phydev, ifr, cmd); } static void smsc911x_ethtool_getdrvinfo(struct net_device *dev, @@ -1941,9 +1949,7 @@ static void smsc911x_ethtool_getdrvinfo(struct net_device *dev, static int smsc911x_ethtool_nwayreset(struct net_device *dev) { - struct smsc911x_data *pdata = netdev_priv(dev); - - return phy_start_aneg(pdata->phy_dev); + return phy_start_aneg(dev->phydev); } static u32 smsc911x_ethtool_getmsglevel(struct net_device *dev) @@ -1969,7 +1975,7 @@ smsc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, void *buf) { struct smsc911x_data *pdata = netdev_priv(dev); - struct phy_device *phy_dev = pdata->phy_dev; + struct phy_device *phy_dev = dev->phydev; unsigned long flags; unsigned int i; unsigned int j = 0; @@ -2115,8 +2121,6 @@ static int smsc911x_ethtool_set_eeprom(struct net_device *dev, } static const struct ethtool_ops smsc911x_ethtool_ops = { - .get_settings = smsc911x_ethtool_getsettings, - .set_settings = smsc911x_ethtool_setsettings, .get_link = ethtool_op_get_link, .get_drvinfo = smsc911x_ethtool_getdrvinfo, .nway_reset = smsc911x_ethtool_nwayreset, @@ -2128,6 +2132,8 @@ static const struct ethtool_ops smsc911x_ethtool_ops = { .get_eeprom = smsc911x_ethtool_get_eeprom, .set_eeprom = smsc911x_ethtool_set_eeprom, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops smsc911x_netdev_ops = { @@ -2308,17 +2314,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev) pdata = netdev_priv(dev); BUG_ON(!pdata); BUG_ON(!pdata->ioaddr); - BUG_ON(!pdata->phy_dev); + WARN_ON(dev->phydev); SMSC_TRACE(pdata, ifdown, "Stopping driver"); - phy_disconnect(pdata->phy_dev); - pdata->phy_dev = NULL; mdiobus_unregister(pdata->mii_bus); mdiobus_free(pdata->mii_bus); unregister_netdev(dev); - free_irq(dev->irq, dev); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smsc911x-memory"); if (!res) @@ -2403,8 +2406,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev) struct smsc911x_data *pdata; struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev); struct resource *res; - unsigned int intcfg = 0; - int res_size, irq, irq_flags; + int res_size, irq; int retval; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, @@ -2443,7 +2445,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev) pdata = netdev_priv(dev); dev->irq = irq; - irq_flags = irq_get_trigger_type(irq); pdata->ioaddr = ioremap_nocache(res->start, res_size); pdata->dev = dev; @@ -2490,43 +2491,23 @@ static int smsc911x_drv_probe(struct platform_device *pdev) if (retval < 0) goto out_disable_resources; - /* configure irq polarity and type before connecting isr */ - if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH) - intcfg |= INT_CFG_IRQ_POL_; - - if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL) - intcfg |= INT_CFG_IRQ_TYPE_; - - smsc911x_reg_write(pdata, INT_CFG, intcfg); - - /* Ensure interrupts are globally disabled before connecting ISR */ - smsc911x_disable_irq_chip(dev); + netif_carrier_off(dev); - retval = request_irq(dev->irq, smsc911x_irqhandler, - irq_flags | IRQF_SHARED, dev->name, dev); + retval = smsc911x_mii_init(pdev, dev); if (retval) { - SMSC_WARN(pdata, probe, - "Unable to claim requested irq: %d", dev->irq); + SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); goto out_disable_resources; } - netif_carrier_off(dev); - retval = register_netdev(dev); if (retval) { SMSC_WARN(pdata, probe, "Error %i registering device", retval); - goto out_free_irq; + goto out_disable_resources; } else { SMSC_TRACE(pdata, probe, "Network interface: \"%s\"", dev->name); } - retval = smsc911x_mii_init(pdev, dev); - if (retval) { - SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); - goto out_unregister_netdev_5; - } - spin_lock_irq(&pdata->mac_lock); /* Check if mac address has been specified when bringing interface up */ @@ -2562,10 +2543,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev) return 0; -out_unregister_netdev_5: - unregister_netdev(dev); -out_free_irq: - free_irq(dev->irq, dev); out_disable_resources: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 8594b9e8b..b7bfed4bc 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -76,7 +76,6 @@ struct smsc9420_pdata { bool rx_csum; u32 msg_enable; - struct phy_device *phy_dev; struct mii_bus *mii_bus; int last_duplex; int last_carrier; @@ -226,36 +225,10 @@ static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd) /* Standard ioctls for mii-tool */ static int smsc9420_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct smsc9420_pdata *pd = netdev_priv(dev); - - if (!netif_running(dev) || !pd->phy_dev) + if (!netif_running(dev) || !dev->phydev) return -EINVAL; - return phy_mii_ioctl(pd->phy_dev, ifr, cmd); -} - -static int smsc9420_ethtool_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - - if (!pd->phy_dev) - return -ENODEV; - - cmd->maxtxpkt = 1; - cmd->maxrxpkt = 1; - return phy_ethtool_gset(pd->phy_dev, cmd); -} - -static int smsc9420_ethtool_set_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - - if (!pd->phy_dev) - return -ENODEV; - - return phy_ethtool_sset(pd->phy_dev, cmd); + return phy_mii_ioctl(dev->phydev, ifr, cmd); } static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev, @@ -283,12 +256,10 @@ static void smsc9420_ethtool_set_msglevel(struct net_device *netdev, u32 data) static int smsc9420_ethtool_nway_reset(struct net_device *netdev) { - struct smsc9420_pdata *pd = netdev_priv(netdev); - - if (!pd->phy_dev) + if (!netdev->phydev) return -ENODEV; - return phy_start_aneg(pd->phy_dev); + return phy_start_aneg(netdev->phydev); } static int smsc9420_ethtool_getregslen(struct net_device *dev) @@ -302,7 +273,7 @@ smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, void *buf) { struct smsc9420_pdata *pd = netdev_priv(dev); - struct phy_device *phy_dev = pd->phy_dev; + struct phy_device *phy_dev = dev->phydev; unsigned int i, j = 0; u32 *data = buf; @@ -443,8 +414,6 @@ static int smsc9420_ethtool_set_eeprom(struct net_device *dev, } static const struct ethtool_ops smsc9420_ethtool_ops = { - .get_settings = smsc9420_ethtool_get_settings, - .set_settings = smsc9420_ethtool_set_settings, .get_drvinfo = smsc9420_ethtool_get_drvinfo, .get_msglevel = smsc9420_ethtool_get_msglevel, .set_msglevel = smsc9420_ethtool_set_msglevel, @@ -456,6 +425,8 @@ static const struct ethtool_ops smsc9420_ethtool_ops = { .get_regs_len = smsc9420_ethtool_getregslen, .get_regs = smsc9420_ethtool_getregs, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /* Sets the device MAC address to dev_addr */ @@ -736,7 +707,7 @@ static int smsc9420_stop(struct net_device *dev) ulong flags; BUG_ON(!pd); - BUG_ON(!pd->phy_dev); + BUG_ON(!dev->phydev); /* disable master interrupt */ spin_lock_irqsave(&pd->int_lock, flags); @@ -757,10 +728,9 @@ static int smsc9420_stop(struct net_device *dev) smsc9420_dmac_soft_reset(pd); - phy_stop(pd->phy_dev); + phy_stop(dev->phydev); - phy_disconnect(pd->phy_dev); - pd->phy_dev = NULL; + phy_disconnect(dev->phydev); mdiobus_unregister(pd->mii_bus); mdiobus_free(pd->mii_bus); @@ -1093,7 +1063,8 @@ static void smsc9420_set_multicast_list(struct net_device *dev) static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd) { - struct phy_device *phy_dev = pd->phy_dev; + struct net_device *dev = pd->dev; + struct phy_device *phy_dev = dev->phydev; u32 flow; if (phy_dev->duplex == DUPLEX_FULL) { @@ -1122,7 +1093,7 @@ static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd) static void smsc9420_phy_adjust_link(struct net_device *dev) { struct smsc9420_pdata *pd = netdev_priv(dev); - struct phy_device *phy_dev = pd->phy_dev; + struct phy_device *phy_dev = dev->phydev; int carrier; if (phy_dev->duplex != pd->last_duplex) { @@ -1155,7 +1126,7 @@ static int smsc9420_mii_probe(struct net_device *dev) struct smsc9420_pdata *pd = netdev_priv(dev); struct phy_device *phydev = NULL; - BUG_ON(pd->phy_dev); + BUG_ON(dev->phydev); /* Device only supports internal PHY at address 1 */ phydev = mdiobus_get_phy(pd->mii_bus, 1); @@ -1179,7 +1150,6 @@ static int smsc9420_mii_probe(struct net_device *dev) phy_attached_info(phydev); - pd->phy_dev = phydev; pd->last_duplex = -1; pd->last_carrier = -1; @@ -1440,7 +1410,7 @@ static int smsc9420_open(struct net_device *dev) } /* Bring the PHY up */ - phy_start(pd->phy_dev); + phy_start(dev->phydev); napi_enable(&pd->napi); diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index cec147d1d..8f06a6621 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -40,7 +40,7 @@ config DWMAC_GENERIC config DWMAC_IPQ806X tristate "QCA IPQ806x DWMAC support" default ARCH_QCOM - depends on OF + depends on OF && (ARCH_QCOM || COMPILE_TEST) select MFD_SYSCON help Support for QCA IPQ806X DWMAC Ethernet. @@ -53,7 +53,7 @@ config DWMAC_IPQ806X config DWMAC_LPC18XX tristate "NXP LPC18xx/43xx DWMAC support" default ARCH_LPC18XX - depends on OF + depends on OF && (ARCH_LPC18XX || COMPILE_TEST) select MFD_SYSCON ---help--- Support for NXP LPC18xx/43xx DWMAC Ethernet. @@ -61,7 +61,7 @@ config DWMAC_LPC18XX config DWMAC_MESON tristate "Amlogic Meson dwmac support" default ARCH_MESON - depends on OF + depends on OF && (ARCH_MESON || COMPILE_TEST) help Support for Ethernet controller on Amlogic Meson SoCs. @@ -72,7 +72,7 @@ config DWMAC_MESON config DWMAC_ROCKCHIP tristate "Rockchip dwmac support" default ARCH_ROCKCHIP - depends on OF + depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST) select MFD_SYSCON help Support for Ethernet controller on Rockchip RK3288 SoC. @@ -83,7 +83,7 @@ config DWMAC_ROCKCHIP config DWMAC_SOCFPGA tristate "SOCFPGA dwmac support" default ARCH_SOCFPGA - depends on OF + depends on OF && (ARCH_SOCFPGA || COMPILE_TEST) select MFD_SYSCON help Support for ethernet controller on Altera SOCFPGA @@ -95,7 +95,7 @@ config DWMAC_SOCFPGA config DWMAC_STI tristate "STi GMAC support" default ARCH_STI - depends on OF + depends on OF && (ARCH_STI || COMPILE_TEST) select MFD_SYSCON ---help--- Support for ethernet controller on STi SOCs. @@ -107,7 +107,7 @@ config DWMAC_STI config DWMAC_SUNXI tristate "Allwinner GMAC support" default ARCH_SUNXI - depends on OF + depends on OF && (ARCH_SUNXI || COMPILE_TEST) ---help--- Support for Allwinner A20/A31 GMAC ethernet controllers. diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 0fb362d5a..44b630cd1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -11,11 +11,12 @@ obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o -obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o +obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o stmmac-platform-objs:= stmmac_platform.o +dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o stmmac-pci-objs:= stmmac_pci.o diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c new file mode 100644 index 000000000..2920e2ee3 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c @@ -0,0 +1,274 @@ +/* Copyright Altera Corporation (C) 2016. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Author: Tien Hock Loh + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "stmmac.h" +#include "stmmac_platform.h" +#include "altr_tse_pcs.h" + +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0 +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII BIT(1) +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII BIT(2) +#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2 +#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK GENMASK(1, 0) + +#define TSE_PCS_CONTROL_AN_EN_MASK BIT(12) +#define TSE_PCS_CONTROL_REG 0x00 +#define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9) +#define TSE_PCS_IF_MODE_REG 0x28 +#define TSE_PCS_LINK_TIMER_0_REG 0x24 +#define TSE_PCS_LINK_TIMER_1_REG 0x26 +#define TSE_PCS_SIZE 0x40 +#define TSE_PCS_STATUS_AN_COMPLETED_MASK BIT(5) +#define TSE_PCS_STATUS_LINK_MASK 0x0004 +#define TSE_PCS_STATUS_REG 0x02 +#define TSE_PCS_SGMII_SPEED_1000 BIT(3) +#define TSE_PCS_SGMII_SPEED_100 BIT(2) +#define TSE_PCS_SGMII_SPEED_10 0x0 +#define TSE_PCS_SW_RST_MASK 0x8000 +#define TSE_PCS_PARTNER_ABILITY_REG 0x0A +#define TSE_PCS_PARTNER_DUPLEX_FULL 0x1000 +#define TSE_PCS_PARTNER_DUPLEX_HALF 0x0000 +#define TSE_PCS_PARTNER_DUPLEX_MASK 0x1000 +#define TSE_PCS_PARTNER_SPEED_MASK GENMASK(11, 10) +#define TSE_PCS_PARTNER_SPEED_1000 BIT(11) +#define TSE_PCS_PARTNER_SPEED_100 BIT(10) +#define TSE_PCS_PARTNER_SPEED_10 0x0000 +#define TSE_PCS_PARTNER_SPEED_1000 BIT(11) +#define TSE_PCS_PARTNER_SPEED_100 BIT(10) +#define TSE_PCS_PARTNER_SPEED_10 0x0000 +#define TSE_PCS_SGMII_SPEED_MASK GENMASK(3, 2) +#define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40 +#define TSE_PCS_SGMII_LINK_TIMER_1 0x0003 +#define TSE_PCS_SW_RESET_TIMEOUT 100 +#define TSE_PCS_USE_SGMII_AN_MASK BIT(2) +#define TSE_PCS_USE_SGMII_ENA BIT(1) + +#define SGMII_ADAPTER_CTRL_REG 0x00 +#define SGMII_ADAPTER_DISABLE 0x0001 +#define SGMII_ADAPTER_ENABLE 0x0000 + +#define AUTONEGO_LINK_TIMER 20 + +static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs) +{ + int counter = 0; + u16 val; + + val = readw(base + TSE_PCS_CONTROL_REG); + val |= TSE_PCS_SW_RST_MASK; + writew(val, base + TSE_PCS_CONTROL_REG); + + while (counter < TSE_PCS_SW_RESET_TIMEOUT) { + val = readw(base + TSE_PCS_CONTROL_REG); + val &= TSE_PCS_SW_RST_MASK; + if (val == 0) + break; + counter++; + udelay(1); + } + if (counter >= TSE_PCS_SW_RESET_TIMEOUT) { + dev_err(pcs->dev, "PCS could not get out of sw reset\n"); + return -ETIMEDOUT; + } + + return 0; +} + +int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs) +{ + int ret = 0; + + writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG); + + writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG); + writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG); + + ret = tse_pcs_reset(base, pcs); + if (ret == 0) + writew(SGMII_ADAPTER_ENABLE, + pcs->sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + + return ret; +} + +static void pcs_link_timer_callback(unsigned long data) +{ + u16 val = 0; + struct tse_pcs *pcs = (struct tse_pcs *)data; + void __iomem *tse_pcs_base = pcs->tse_pcs_base; + void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base; + + val = readw(tse_pcs_base + TSE_PCS_STATUS_REG); + val &= TSE_PCS_STATUS_LINK_MASK; + + if (val != 0) { + dev_dbg(pcs->dev, "Adapter: Link is established\n"); + writew(SGMII_ADAPTER_ENABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + } else { + mod_timer(&pcs->aneg_link_timer, jiffies + + msecs_to_jiffies(AUTONEGO_LINK_TIMER)); + } +} + +static void auto_nego_timer_callback(unsigned long data) +{ + u16 val = 0; + u16 speed = 0; + u16 duplex = 0; + struct tse_pcs *pcs = (struct tse_pcs *)data; + void __iomem *tse_pcs_base = pcs->tse_pcs_base; + void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base; + + val = readw(tse_pcs_base + TSE_PCS_STATUS_REG); + val &= TSE_PCS_STATUS_AN_COMPLETED_MASK; + + if (val != 0) { + dev_dbg(pcs->dev, "Adapter: Auto Negotiation is completed\n"); + val = readw(tse_pcs_base + TSE_PCS_PARTNER_ABILITY_REG); + speed = val & TSE_PCS_PARTNER_SPEED_MASK; + duplex = val & TSE_PCS_PARTNER_DUPLEX_MASK; + + if (speed == TSE_PCS_PARTNER_SPEED_10 && + duplex == TSE_PCS_PARTNER_DUPLEX_FULL) + dev_dbg(pcs->dev, + "Adapter: Link Partner is Up - 10/Full\n"); + else if (speed == TSE_PCS_PARTNER_SPEED_100 && + duplex == TSE_PCS_PARTNER_DUPLEX_FULL) + dev_dbg(pcs->dev, + "Adapter: Link Partner is Up - 100/Full\n"); + else if (speed == TSE_PCS_PARTNER_SPEED_1000 && + duplex == TSE_PCS_PARTNER_DUPLEX_FULL) + dev_dbg(pcs->dev, + "Adapter: Link Partner is Up - 1000/Full\n"); + else if (speed == TSE_PCS_PARTNER_SPEED_10 && + duplex == TSE_PCS_PARTNER_DUPLEX_HALF) + dev_err(pcs->dev, + "Adapter does not support Half Duplex\n"); + else if (speed == TSE_PCS_PARTNER_SPEED_100 && + duplex == TSE_PCS_PARTNER_DUPLEX_HALF) + dev_err(pcs->dev, + "Adapter does not support Half Duplex\n"); + else if (speed == TSE_PCS_PARTNER_SPEED_1000 && + duplex == TSE_PCS_PARTNER_DUPLEX_HALF) + dev_err(pcs->dev, + "Adapter does not support Half Duplex\n"); + else + dev_err(pcs->dev, + "Adapter: Invalid Partner Speed and Duplex\n"); + + if (duplex == TSE_PCS_PARTNER_DUPLEX_FULL && + (speed == TSE_PCS_PARTNER_SPEED_10 || + speed == TSE_PCS_PARTNER_SPEED_100 || + speed == TSE_PCS_PARTNER_SPEED_1000)) + writew(SGMII_ADAPTER_ENABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + } else { + val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG); + val |= TSE_PCS_CONTROL_RESTART_AN_MASK; + writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG); + + tse_pcs_reset(tse_pcs_base, pcs); + mod_timer(&pcs->aneg_link_timer, jiffies + + msecs_to_jiffies(AUTONEGO_LINK_TIMER)); + } +} + +static void aneg_link_timer_callback(unsigned long data) +{ + struct tse_pcs *pcs = (struct tse_pcs *)data; + + if (pcs->autoneg == AUTONEG_ENABLE) + auto_nego_timer_callback(data); + else if (pcs->autoneg == AUTONEG_DISABLE) + pcs_link_timer_callback(data); +} + +void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev, + unsigned int speed) +{ + void __iomem *tse_pcs_base = pcs->tse_pcs_base; + void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base; + u32 val; + + writew(SGMII_ADAPTER_ENABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + + pcs->autoneg = phy_dev->autoneg; + + if (phy_dev->autoneg == AUTONEG_ENABLE) { + val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG); + val |= TSE_PCS_CONTROL_AN_EN_MASK; + writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG); + + val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG); + val |= TSE_PCS_USE_SGMII_AN_MASK; + writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG); + + val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG); + val |= TSE_PCS_CONTROL_RESTART_AN_MASK; + + tse_pcs_reset(tse_pcs_base, pcs); + + setup_timer(&pcs->aneg_link_timer, + aneg_link_timer_callback, (unsigned long)pcs); + mod_timer(&pcs->aneg_link_timer, jiffies + + msecs_to_jiffies(AUTONEGO_LINK_TIMER)); + } else if (phy_dev->autoneg == AUTONEG_DISABLE) { + val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG); + val &= ~TSE_PCS_CONTROL_AN_EN_MASK; + writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG); + + val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG); + val &= ~TSE_PCS_USE_SGMII_AN_MASK; + writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG); + + val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG); + val &= ~TSE_PCS_SGMII_SPEED_MASK; + + switch (speed) { + case 1000: + val |= TSE_PCS_SGMII_SPEED_1000; + break; + case 100: + val |= TSE_PCS_SGMII_SPEED_100; + break; + case 10: + val |= TSE_PCS_SGMII_SPEED_10; + break; + default: + return; + } + writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG); + + tse_pcs_reset(tse_pcs_base, pcs); + + setup_timer(&pcs->aneg_link_timer, + aneg_link_timer_callback, (unsigned long)pcs); + mod_timer(&pcs->aneg_link_timer, jiffies + + msecs_to_jiffies(AUTONEGO_LINK_TIMER)); + } +} diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h new file mode 100644 index 000000000..2f5882450 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h @@ -0,0 +1,36 @@ +/* Copyright Altera Corporation (C) 2016. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Author: Tien Hock Loh + */ + +#ifndef __TSE_PCS_H__ +#define __TSE_PCS_H__ + +#include +#include + +struct tse_pcs { + struct device *dev; + void __iomem *tse_pcs_base; + void __iomem *sgmii_adapter_base; + struct timer_list aneg_link_timer; + int autoneg; +}; + +int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs); +void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev, + unsigned int speed); + +#endif /* __TSE_PCS_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index fc60368df..2533b91f1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -232,6 +232,11 @@ struct stmmac_extra_stats { #define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */ #define DEFAULT_DMA_PBL 8 +/* PCS status and mask defines */ +#define PCS_ANE_IRQ BIT(2) /* PCS Auto-Negotiation */ +#define PCS_LINK_IRQ BIT(1) /* PCS Link */ +#define PCS_RGSMIIIS_IRQ BIT(0) /* RGMII or SMII Interrupt */ + /* Max/Min RI Watchdog Timer count value */ #define MAX_DMA_RIWT 0xff #define MIN_DMA_RIWT 0x20 @@ -272,9 +277,6 @@ enum dma_irq_status { #define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2) #define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3) -#define CORE_PCS_ANE_COMPLETE (1 << 5) -#define CORE_PCS_LINK_STATUS (1 << 6) -#define CORE_RGMII_IRQ (1 << 7) #define CORE_IRQ_MTL_RX_OVERFLOW BIT(8) /* Physical Coding Sublayer */ @@ -469,9 +471,12 @@ struct stmmac_ops { void (*reset_eee_mode)(struct mac_device_info *hw); void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); void (*set_eee_pls)(struct mac_device_info *hw, int link); - void (*ctrl_ane)(struct mac_device_info *hw, bool restart); - void (*get_adv)(struct mac_device_info *hw, struct rgmii_adv *adv); void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x); + /* PCS calls */ + void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral, + bool loopback); + void (*pcs_rane)(void __iomem *ioaddr, bool restart); + void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv); }; /* PTP and HW Timer helpers */ @@ -524,6 +529,9 @@ struct mac_device_info { int unicast_filter_entries; int mcast_bits_log2; unsigned int rx_csum; + unsigned int pcs; + unsigned int pmt; + unsigned int ps; }; struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, @@ -546,6 +554,7 @@ void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable); void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); + extern const struct stmmac_mode_ops ring_mode_ops; extern const struct stmmac_mode_ops chain_mode_ops; extern const struct stmmac_desc_ops dwmac4_desc_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 0cd3ecff7..92105916e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -46,6 +46,7 @@ struct rk_priv_data { struct platform_device *pdev; int phy_iface; struct regulator *regulator; + bool suspended; const struct rk_gmac_ops *ops; bool clk_enabled; @@ -72,6 +73,122 @@ struct rk_priv_data { #define GRF_BIT(nr) (BIT(nr) | BIT(nr+16)) #define GRF_CLR_BIT(nr) (BIT(nr+16)) +#define RK3228_GRF_MAC_CON0 0x0900 +#define RK3228_GRF_MAC_CON1 0x0904 + +/* RK3228_GRF_MAC_CON0 */ +#define RK3228_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) +#define RK3228_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) + +/* RK3228_GRF_MAC_CON1 */ +#define RK3228_GMAC_PHY_INTF_SEL_RGMII \ + (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6)) +#define RK3228_GMAC_PHY_INTF_SEL_RMII \ + (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6)) +#define RK3228_GMAC_FLOW_CTRL GRF_BIT(3) +#define RK3228_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3) +#define RK3228_GMAC_SPEED_10M GRF_CLR_BIT(2) +#define RK3228_GMAC_SPEED_100M GRF_BIT(2) +#define RK3228_GMAC_RMII_CLK_25M GRF_BIT(7) +#define RK3228_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7) +#define RK3228_GMAC_CLK_125M (GRF_CLR_BIT(8) | GRF_CLR_BIT(9)) +#define RK3228_GMAC_CLK_25M (GRF_BIT(8) | GRF_BIT(9)) +#define RK3228_GMAC_CLK_2_5M (GRF_CLR_BIT(8) | GRF_BIT(9)) +#define RK3228_GMAC_RMII_MODE GRF_BIT(10) +#define RK3228_GMAC_RMII_MODE_CLR GRF_CLR_BIT(10) +#define RK3228_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0) +#define RK3228_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0) +#define RK3228_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1) +#define RK3228_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(1) + +static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv, + int tx_delay, int rx_delay) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_PHY_INTF_SEL_RGMII | + RK3228_GMAC_RMII_MODE_CLR | + RK3228_GMAC_RXCLK_DLY_ENABLE | + RK3228_GMAC_TXCLK_DLY_ENABLE); + + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0, + RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) | + RK3228_GMAC_CLK_TX_DL_CFG(tx_delay)); +} + +static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_PHY_INTF_SEL_RMII | + RK3228_GMAC_RMII_MODE); + + /* set MAC to RMII mode */ + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, GRF_BIT(11)); +} + +static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + if (speed == 10) + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_CLK_2_5M); + else if (speed == 100) + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_CLK_25M); + else if (speed == 1000) + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_CLK_125M); + else + dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); +} + +static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + if (speed == 10) + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_RMII_CLK_2_5M | + RK3228_GMAC_SPEED_10M); + else if (speed == 100) + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_RMII_CLK_25M | + RK3228_GMAC_SPEED_100M); + else + dev_err(dev, "unknown speed value for RMII! speed=%d", speed); +} + +static const struct rk_gmac_ops rk3228_ops = { + .set_to_rgmii = rk3228_set_to_rgmii, + .set_to_rmii = rk3228_set_to_rmii, + .set_rgmii_speed = rk3228_set_rgmii_speed, + .set_rmii_speed = rk3228_set_rmii_speed, +}; + #define RK3288_GRF_SOC_CON1 0x0248 #define RK3288_GRF_SOC_CON3 0x0250 @@ -529,9 +646,8 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev, return bsp_priv; } -static int rk_gmac_init(struct platform_device *pdev, void *priv) +static int rk_gmac_powerup(struct rk_priv_data *bsp_priv) { - struct rk_priv_data *bsp_priv = priv; int ret; ret = phy_power_on(bsp_priv, true); @@ -545,14 +661,50 @@ static int rk_gmac_init(struct platform_device *pdev, void *priv) return 0; } -static void rk_gmac_exit(struct platform_device *pdev, void *priv) +static void rk_gmac_powerdown(struct rk_priv_data *gmac) { - struct rk_priv_data *gmac = priv; - phy_power_on(gmac, false); gmac_clk_enable(gmac, false); } +static int rk_gmac_init(struct platform_device *pdev, void *priv) +{ + struct rk_priv_data *bsp_priv = priv; + + return rk_gmac_powerup(bsp_priv); +} + +static void rk_gmac_exit(struct platform_device *pdev, void *priv) +{ + struct rk_priv_data *bsp_priv = priv; + + rk_gmac_powerdown(bsp_priv); +} + +static void rk_gmac_suspend(struct platform_device *pdev, void *priv) +{ + struct rk_priv_data *bsp_priv = priv; + + /* Keep the PHY up if we use Wake-on-Lan. */ + if (device_may_wakeup(&pdev->dev)) + return; + + rk_gmac_powerdown(bsp_priv); + bsp_priv->suspended = true; +} + +static void rk_gmac_resume(struct platform_device *pdev, void *priv) +{ + struct rk_priv_data *bsp_priv = priv; + + /* The PHY was up for Wake-on-Lan. */ + if (!bsp_priv->suspended) + return; + + rk_gmac_powerup(bsp_priv); + bsp_priv->suspended = false; +} + static void rk_fix_speed(void *priv, unsigned int speed) { struct rk_priv_data *bsp_priv = priv; @@ -591,6 +743,8 @@ static int rk_gmac_probe(struct platform_device *pdev) plat_dat->init = rk_gmac_init; plat_dat->exit = rk_gmac_exit; plat_dat->fix_mac_speed = rk_fix_speed; + plat_dat->suspend = rk_gmac_suspend; + plat_dat->resume = rk_gmac_resume; plat_dat->bsp_priv = rk_gmac_setup(pdev, data); if (IS_ERR(plat_dat->bsp_priv)) @@ -604,6 +758,7 @@ static int rk_gmac_probe(struct platform_device *pdev) } static const struct of_device_id rk_gmac_dwmac_match[] = { + { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops }, { } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index f13499fa1..bec6963ac 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -27,6 +27,11 @@ #include "stmmac.h" #include "stmmac_platform.h" +#include "altr_tse_pcs.h" + +#define SGMII_ADAPTER_CTRL_REG 0x00 +#define SGMII_ADAPTER_DISABLE 0x0001 + #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2 @@ -52,35 +57,46 @@ struct socfpga_dwmac { struct reset_control *stmmac_rst; void __iomem *splitter_base; bool f2h_ptp_ref_clk; + struct tse_pcs pcs; }; static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) { struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv; void __iomem *splitter_base = dwmac->splitter_base; + void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base; + void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base; + struct device *dev = dwmac->dev; + struct net_device *ndev = dev_get_drvdata(dev); + struct phy_device *phy_dev = ndev->phydev; u32 val; - if (!splitter_base) - return; - - val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG); - val &= ~EMAC_SPLITTER_CTRL_SPEED_MASK; - - switch (speed) { - case 1000: - val |= EMAC_SPLITTER_CTRL_SPEED_1000; - break; - case 100: - val |= EMAC_SPLITTER_CTRL_SPEED_100; - break; - case 10: - val |= EMAC_SPLITTER_CTRL_SPEED_10; - break; - default: - return; + if ((tse_pcs_base) && (sgmii_adapter_base)) + writew(SGMII_ADAPTER_DISABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + + if (splitter_base) { + val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG); + val &= ~EMAC_SPLITTER_CTRL_SPEED_MASK; + + switch (speed) { + case 1000: + val |= EMAC_SPLITTER_CTRL_SPEED_1000; + break; + case 100: + val |= EMAC_SPLITTER_CTRL_SPEED_100; + break; + case 10: + val |= EMAC_SPLITTER_CTRL_SPEED_10; + break; + default: + return; + } + writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG); } - writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG); + if (tse_pcs_base && sgmii_adapter_base) + tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed); } static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev) @@ -88,9 +104,12 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * struct device_node *np = dev->of_node; struct regmap *sys_mgr_base_addr; u32 reg_offset, reg_shift; - int ret; - struct device_node *np_splitter; + int ret, index; + struct device_node *np_splitter = NULL; + struct device_node *np_sgmii_adapter = NULL; struct resource res_splitter; + struct resource res_tse_pcs; + struct resource res_sgmii_adapter; dwmac->interface = of_get_phy_mode(np); @@ -116,7 +135,9 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * np_splitter = of_parse_phandle(np, "altr,emac-splitter", 0); if (np_splitter) { - if (of_address_to_resource(np_splitter, 0, &res_splitter)) { + ret = of_address_to_resource(np_splitter, 0, &res_splitter); + of_node_put(np_splitter); + if (ret) { dev_info(dev, "Missing emac splitter address\n"); return -EINVAL; } @@ -128,12 +149,86 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * } } + np_sgmii_adapter = of_parse_phandle(np, + "altr,gmii-to-sgmii-converter", 0); + if (np_sgmii_adapter) { + index = of_property_match_string(np_sgmii_adapter, "reg-names", + "hps_emac_interface_splitter_avalon_slave"); + + if (index >= 0) { + if (of_address_to_resource(np_sgmii_adapter, index, + &res_splitter)) { + dev_err(dev, + "%s: ERROR: missing emac splitter address\n", + __func__); + ret = -EINVAL; + goto err_node_put; + } + + dwmac->splitter_base = + devm_ioremap_resource(dev, &res_splitter); + + if (IS_ERR(dwmac->splitter_base)) { + ret = PTR_ERR(dwmac->splitter_base); + goto err_node_put; + } + } + + index = of_property_match_string(np_sgmii_adapter, "reg-names", + "gmii_to_sgmii_adapter_avalon_slave"); + + if (index >= 0) { + if (of_address_to_resource(np_sgmii_adapter, index, + &res_sgmii_adapter)) { + dev_err(dev, + "%s: ERROR: failed mapping adapter\n", + __func__); + ret = -EINVAL; + goto err_node_put; + } + + dwmac->pcs.sgmii_adapter_base = + devm_ioremap_resource(dev, &res_sgmii_adapter); + + if (IS_ERR(dwmac->pcs.sgmii_adapter_base)) { + ret = PTR_ERR(dwmac->pcs.sgmii_adapter_base); + goto err_node_put; + } + } + + index = of_property_match_string(np_sgmii_adapter, "reg-names", + "eth_tse_control_port"); + + if (index >= 0) { + if (of_address_to_resource(np_sgmii_adapter, index, + &res_tse_pcs)) { + dev_err(dev, + "%s: ERROR: failed mapping tse control port\n", + __func__); + ret = -EINVAL; + goto err_node_put; + } + + dwmac->pcs.tse_pcs_base = + devm_ioremap_resource(dev, &res_tse_pcs); + + if (IS_ERR(dwmac->pcs.tse_pcs_base)) { + ret = PTR_ERR(dwmac->pcs.tse_pcs_base); + goto err_node_put; + } + } + } dwmac->reg_offset = reg_offset; dwmac->reg_shift = reg_shift; dwmac->sys_mgr_base_addr = sys_mgr_base_addr; dwmac->dev = dev; + of_node_put(np_sgmii_adapter); return 0; + +err_node_put: + of_node_put(np_sgmii_adapter); + return ret; } static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac) @@ -151,6 +246,7 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac) break; case PHY_INTERFACE_MODE_MII: case PHY_INTERFACE_MODE_GMII: + case PHY_INTERFACE_MODE_SGMII: val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII; break; default: @@ -191,6 +287,12 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac) */ if (dwmac->stmmac_rst) reset_control_deassert(dwmac->stmmac_rst); + if (phymode == PHY_INTERFACE_MODE_SGMII) { + if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) { + dev_err(dwmac->dev, "Unable to initialize TSE PCS"); + return -EINVAL; + } + } return 0; } @@ -225,6 +327,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (!ret) { struct net_device *ndev = platform_get_drvdata(pdev); struct stmmac_priv *stpriv = netdev_priv(ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index b0593a426..ff3e5ab39 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -38,19 +38,26 @@ #define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ #define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ -enum dwmac1000_irq_status { - lpiis_irq = 0x400, - time_stamp_irq = 0x0200, - mmc_rx_csum_offload_irq = 0x0080, - mmc_tx_irq = 0x0040, - mmc_rx_irq = 0x0020, - mmc_irq = 0x0010, - pmt_irq = 0x0008, - pcs_ane_irq = 0x0004, - pcs_link_irq = 0x0002, - rgmii_irq = 0x0001, -}; -#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */ +#define GMAC_INT_STATUS_PMT BIT(3) +#define GMAC_INT_STATUS_MMCIS BIT(4) +#define GMAC_INT_STATUS_MMCRIS BIT(5) +#define GMAC_INT_STATUS_MMCTIS BIT(6) +#define GMAC_INT_STATUS_MMCCSUM BIT(7) +#define GMAC_INT_STATUS_TSTAMP BIT(9) +#define GMAC_INT_STATUS_LPIIS BIT(10) + +/* interrupt mask register */ +#define GMAC_INT_MASK 0x0000003c +#define GMAC_INT_DISABLE_RGMII BIT(0) +#define GMAC_INT_DISABLE_PCSLINK BIT(1) +#define GMAC_INT_DISABLE_PCSAN BIT(2) +#define GMAC_INT_DISABLE_PMT BIT(3) +#define GMAC_INT_DISABLE_TIMESTAMP BIT(9) +#define GMAC_INT_DISABLE_PCS (GMAC_INT_DISABLE_RGMII | \ + GMAC_INT_DISABLE_PCSLINK | \ + GMAC_INT_DISABLE_PCSAN) +#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_TIMESTAMP | \ + GMAC_INT_DISABLE_PCS) /* PMT Control and Status */ #define GMAC_PMT 0x0000002c @@ -90,42 +97,23 @@ enum power_event { (reg * 8)) #define GMAC_MAX_PERFECT_ADDRESSES 1 -/* PCS registers (AN/TBI/SGMII/RGMII) offset */ -#define GMAC_AN_CTRL 0x000000c0 /* AN control */ -#define GMAC_AN_STATUS 0x000000c4 /* AN status */ -#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */ -#define GMAC_ANE_LPA 0x000000cc /* Auto-Neg. link partener ability */ -#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */ -#define GMAC_TBI 0x000000d4 /* TBI extend status */ -#define GMAC_S_R_GMII 0x000000d8 /* SGMII RGMII status */ - -/* AN Configuration defines */ -#define GMAC_AN_CTRL_RAN 0x00000200 /* Restart Auto-Negotiation */ -#define GMAC_AN_CTRL_ANE 0x00001000 /* Auto-Negotiation Enable */ -#define GMAC_AN_CTRL_ELE 0x00004000 /* External Loopback Enable */ -#define GMAC_AN_CTRL_ECD 0x00010000 /* Enable Comma Detect */ -#define GMAC_AN_CTRL_LR 0x00020000 /* Lock to Reference */ -#define GMAC_AN_CTRL_SGMRAL 0x00040000 /* SGMII RAL Control */ - -/* AN Status defines */ -#define GMAC_AN_STATUS_LS 0x00000004 /* Link Status 0:down 1:up */ -#define GMAC_AN_STATUS_ANA 0x00000008 /* Auto-Negotiation Ability */ -#define GMAC_AN_STATUS_ANC 0x00000020 /* Auto-Negotiation Complete */ -#define GMAC_AN_STATUS_ES 0x00000100 /* Extended Status */ - -/* Register 54 (SGMII/RGMII status register) */ -#define GMAC_S_R_GMII_LINK 0x8 -#define GMAC_S_R_GMII_SPEED 0x5 -#define GMAC_S_R_GMII_SPEED_SHIFT 0x1 -#define GMAC_S_R_GMII_MODE 0x1 -#define GMAC_S_R_GMII_SPEED_125 2 -#define GMAC_S_R_GMII_SPEED_25 1 - -/* Common ADV and LPA defines */ -#define GMAC_ANE_FD (1 << 5) -#define GMAC_ANE_HD (1 << 6) -#define GMAC_ANE_PSE (3 << 7) -#define GMAC_ANE_PSE_SHIFT 7 +#define GMAC_PCS_BASE 0x000000c0 /* PCS register base */ +#define GMAC_RGSMIIIS 0x000000d8 /* RGMII/SMII status */ + +/* SGMII/RGMII status register */ +#define GMAC_RGSMIIIS_LNKMODE BIT(0) +#define GMAC_RGSMIIIS_SPEED GENMASK(2, 1) +#define GMAC_RGSMIIIS_SPEED_SHIFT 1 +#define GMAC_RGSMIIIS_LNKSTS BIT(3) +#define GMAC_RGSMIIIS_JABTO BIT(4) +#define GMAC_RGSMIIIS_FALSECARDET BIT(5) +#define GMAC_RGSMIIIS_SMIDRXS BIT(16) +/* LNKMOD */ +#define GMAC_RGSMIIIS_LNKMOD_MASK 0x1 +/* LNKSPEED */ +#define GMAC_RGSMIIIS_SPEED_125 0x2 +#define GMAC_RGSMIIIS_SPEED_25 0x1 +#define GMAC_RGSMIIIS_SPEED_2_5 0x0 /* GMAC Configuration defines */ #define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index fb1eb578e..885a5e645 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -30,22 +30,48 @@ #include #include #include +#include "stmmac_pcs.h" #include "dwmac1000.h" static void dwmac1000_core_init(struct mac_device_info *hw, int mtu) { void __iomem *ioaddr = hw->pcsr; u32 value = readl(ioaddr + GMAC_CONTROL); + + /* Configure GMAC core */ value |= GMAC_CORE_INIT; + if (mtu > 1500) value |= GMAC_CONTROL_2K; if (mtu > 2000) value |= GMAC_CONTROL_JE; + if (hw->ps) { + value |= GMAC_CONTROL_TE; + + if (hw->ps == SPEED_1000) { + value &= ~GMAC_CONTROL_PS; + } else { + value |= GMAC_CONTROL_PS; + + if (hw->ps == SPEED_10) + value &= ~GMAC_CONTROL_FES; + else + value |= GMAC_CONTROL_FES; + } + } + writel(value, ioaddr + GMAC_CONTROL); /* Mask GMAC interrupts */ - writel(0x207, ioaddr + GMAC_INT_MASK); + value = GMAC_INT_DEFAULT_MASK; + + if (hw->pmt) + value &= ~GMAC_INT_DISABLE_PMT; + if (hw->pcs) + value &= ~GMAC_INT_DISABLE_PCS; + + writel(value, ioaddr + GMAC_INT_MASK); #ifdef STMMAC_VLAN_TAG_USED /* Tag detection without filtering */ @@ -235,12 +261,45 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode) } if (mode & WAKE_UCAST) { pr_debug("GMAC: WOL on global unicast\n"); - pmt |= global_unicast; + pmt |= power_down | global_unicast | wake_up_frame_en; } writel(pmt, ioaddr + GMAC_PMT); } +/* RGMII or SMII interface */ +static void dwmac1000_rgsmii(void __iomem *ioaddr, struct stmmac_extra_stats *x) +{ + u32 status; + + status = readl(ioaddr + GMAC_RGSMIIIS); + x->irq_rgmii_n++; + + /* Check the link status */ + if (status & GMAC_RGSMIIIS_LNKSTS) { + int speed_value; + + x->pcs_link = 1; + + speed_value = ((status & GMAC_RGSMIIIS_SPEED) >> + GMAC_RGSMIIIS_SPEED_SHIFT); + if (speed_value == GMAC_RGSMIIIS_SPEED_125) + x->pcs_speed = SPEED_1000; + else if (speed_value == GMAC_RGSMIIIS_SPEED_25) + x->pcs_speed = SPEED_100; + else + x->pcs_speed = SPEED_10; + + x->pcs_duplex = (status & GMAC_RGSMIIIS_LNKMOD_MASK); + + pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed, + x->pcs_duplex ? "Full" : "Half"); + } else { + x->pcs_link = 0; + pr_info("Link is Down\n"); + } +} + static int dwmac1000_irq_status(struct mac_device_info *hw, struct stmmac_extra_stats *x) { @@ -249,19 +308,20 @@ static int dwmac1000_irq_status(struct mac_device_info *hw, int ret = 0; /* Not used events (e.g. MMC interrupts) are not handled. */ - if ((intr_status & mmc_tx_irq)) + if ((intr_status & GMAC_INT_STATUS_MMCTIS)) x->mmc_tx_irq_n++; - if (unlikely(intr_status & mmc_rx_irq)) + if (unlikely(intr_status & GMAC_INT_STATUS_MMCRIS)) x->mmc_rx_irq_n++; - if (unlikely(intr_status & mmc_rx_csum_offload_irq)) + if (unlikely(intr_status & GMAC_INT_STATUS_MMCCSUM)) x->mmc_rx_csum_offload_irq_n++; - if (unlikely(intr_status & pmt_irq)) { + if (unlikely(intr_status & GMAC_INT_DISABLE_PMT)) { /* clear the PMT bits 5 and 6 by reading the PMT status reg */ readl(ioaddr + GMAC_PMT); x->irq_receive_pmt_irq_n++; } - /* MAC trx/rx EEE LPI entry/exit interrupts */ - if (intr_status & lpiis_irq) { + + /* MAC tx/rx EEE LPI entry/exit interrupts */ + if (intr_status & GMAC_INT_STATUS_LPIIS) { /* Clean LPI interrupt by reading the Reg 12 */ ret = readl(ioaddr + LPI_CTRL_STATUS); @@ -275,36 +335,10 @@ static int dwmac1000_irq_status(struct mac_device_info *hw, x->irq_rx_path_exit_lpi_mode_n++; } - if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) { - readl(ioaddr + GMAC_AN_STATUS); - x->irq_pcs_ane_n++; - } - if (intr_status & rgmii_irq) { - u32 status = readl(ioaddr + GMAC_S_R_GMII); - x->irq_rgmii_n++; - - /* Save and dump the link status. */ - if (status & GMAC_S_R_GMII_LINK) { - int speed_value = (status & GMAC_S_R_GMII_SPEED) >> - GMAC_S_R_GMII_SPEED_SHIFT; - x->pcs_duplex = (status & GMAC_S_R_GMII_MODE); - - if (speed_value == GMAC_S_R_GMII_SPEED_125) - x->pcs_speed = SPEED_1000; - else if (speed_value == GMAC_S_R_GMII_SPEED_25) - x->pcs_speed = SPEED_100; - else - x->pcs_speed = SPEED_10; + dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); - x->pcs_link = 1; - pr_debug("%s: Link is Up - %d/%s\n", __func__, - (int)x->pcs_speed, - x->pcs_duplex ? "Full" : "Half"); - } else { - x->pcs_link = 0; - pr_debug("%s: Link is Down\n", __func__); - } - } + if (intr_status & PCS_RGSMIIIS_IRQ) + dwmac1000_rgsmii(ioaddr, x); return ret; } @@ -363,38 +397,20 @@ static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw) writel(value, ioaddr + LPI_TIMER_CTRL); } -static void dwmac1000_ctrl_ane(struct mac_device_info *hw, bool restart) +static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral, + bool loopback) { - void __iomem *ioaddr = hw->pcsr; - /* auto negotiation enable and External Loopback enable */ - u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE; - - if (restart) - value |= GMAC_AN_CTRL_RAN; - - writel(value, ioaddr + GMAC_AN_CTRL); + dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); } -static void dwmac1000_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv) +static void dwmac1000_rane(void __iomem *ioaddr, bool restart) { - void __iomem *ioaddr = hw->pcsr; - u32 value = readl(ioaddr + GMAC_ANE_ADV); - - if (value & GMAC_ANE_FD) - adv->duplex = DUPLEX_FULL; - if (value & GMAC_ANE_HD) - adv->duplex |= DUPLEX_HALF; - - adv->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; - - value = readl(ioaddr + GMAC_ANE_LPA); - - if (value & GMAC_ANE_FD) - adv->lp_duplex = DUPLEX_FULL; - if (value & GMAC_ANE_HD) - adv->lp_duplex = DUPLEX_HALF; + dwmac_rane(ioaddr, GMAC_PCS_BASE, restart); +} - adv->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; +static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv) +{ + dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); } static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) @@ -485,9 +501,10 @@ static const struct stmmac_ops dwmac1000_ops = { .reset_eee_mode = dwmac1000_reset_eee_mode, .set_eee_timer = dwmac1000_set_eee_timer, .set_eee_pls = dwmac1000_set_eee_pls, - .ctrl_ane = dwmac1000_ctrl_ane, - .get_adv = dwmac1000_get_adv, .debug = dwmac1000_debug, + .pcs_ctrl_ane = dwmac1000_ctrl_ane, + .pcs_rane = dwmac1000_rane, + .pcs_get_adv_lp = dwmac1000_get_adv_lp, }; struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index bc50952a1..6f4f5ce25 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -24,10 +24,8 @@ #define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) #define GMAC_INT_STATUS 0x000000b0 #define GMAC_INT_EN 0x000000b4 -#define GMAC_AN_CTRL 0x000000e0 -#define GMAC_AN_STATUS 0x000000e4 -#define GMAC_AN_ADV 0x000000e8 -#define GMAC_AN_LPA 0x000000ec +#define GMAC_PCS_BASE 0x000000e0 +#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8 #define GMAC_PMT 0x000000c0 #define GMAC_VERSION 0x00000110 #define GMAC_DEBUG 0x00000114 @@ -54,9 +52,18 @@ #define GMAC_TX_FLOW_CTRL_PT_SHIFT 16 /* MAC Interrupt bitmap*/ +#define GMAC_INT_RGSMIIS BIT(0) +#define GMAC_INT_PCS_LINK BIT(1) +#define GMAC_INT_PCS_ANE BIT(2) +#define GMAC_INT_PCS_PHYIS BIT(3) #define GMAC_INT_PMT_EN BIT(4) #define GMAC_INT_LPI_EN BIT(5) +#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \ + GMAC_INT_PCS_ANE) + +#define GMAC_INT_DEFAULT_MASK GMAC_INT_PMT_EN + enum dwmac4_irq_status { time_stamp_irq = 0x00001000, mmc_rx_csum_offload_irq = 0x00000800, @@ -64,19 +71,8 @@ enum dwmac4_irq_status { mmc_rx_irq = 0x00000200, mmc_irq = 0x00000100, pmt_irq = 0x00000010, - pcs_ane_irq = 0x00000004, - pcs_link_irq = 0x00000002, }; -/* MAC Auto-Neg bitmap*/ -#define GMAC_AN_CTRL_RAN BIT(9) -#define GMAC_AN_CTRL_ANE BIT(12) -#define GMAC_AN_CTRL_ELE BIT(14) -#define GMAC_AN_FD BIT(5) -#define GMAC_AN_HD BIT(6) -#define GMAC_AN_PSE_MASK GENMASK(8, 7) -#define GMAC_AN_PSE_SHIFT 7 - /* MAC PMT bitmap */ enum power_event { pointer_reset = 0x80000000, @@ -250,6 +246,23 @@ enum power_event { #define MTL_DEBUG_RRCSTS_FLUSH 3 #define MTL_DEBUG_RWCSTS BIT(0) +/* SGMII/RGMII status register */ +#define GMAC_PHYIF_CTRLSTATUS_TC BIT(0) +#define GMAC_PHYIF_CTRLSTATUS_LUD BIT(1) +#define GMAC_PHYIF_CTRLSTATUS_SMIDRXS BIT(4) +#define GMAC_PHYIF_CTRLSTATUS_LNKMOD BIT(16) +#define GMAC_PHYIF_CTRLSTATUS_SPEED GENMASK(18, 17) +#define GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT 17 +#define GMAC_PHYIF_CTRLSTATUS_LNKSTS BIT(19) +#define GMAC_PHYIF_CTRLSTATUS_JABTO BIT(20) +#define GMAC_PHYIF_CTRLSTATUS_FALSECARDET BIT(21) +/* LNKMOD */ +#define GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK 0x1 +/* LNKSPEED */ +#define GMAC_PHYIF_CTRLSTATUS_SPEED_125 0x2 +#define GMAC_PHYIF_CTRLSTATUS_SPEED_25 0x1 +#define GMAC_PHYIF_CTRLSTATUS_SPEED_2_5 0x0 + extern const struct stmmac_dma_ops dwmac4_dma_ops; extern const struct stmmac_dma_ops dwmac410_dma_ops; #endif /* __DWMAC4_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 44da877d2..51019b794 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -17,6 +17,7 @@ #include #include #include +#include "stmmac_pcs.h" #include "dwmac4.h" static void dwmac4_core_init(struct mac_device_info *hw, int mtu) @@ -31,10 +32,31 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu) if (mtu > 2000) value |= GMAC_CONFIG_JE; + if (hw->ps) { + value |= GMAC_CONFIG_TE; + + if (hw->ps == SPEED_1000) { + value &= ~GMAC_CONFIG_PS; + } else { + value |= GMAC_CONFIG_PS; + + if (hw->ps == SPEED_10) + value &= ~GMAC_CONFIG_FES; + else + value |= GMAC_CONFIG_FES; + } + } + writel(value, ioaddr + GMAC_CONFIG); /* Mask GMAC interrupts */ - writel(GMAC_INT_PMT_EN, ioaddr + GMAC_INT_EN); + value = GMAC_INT_DEFAULT_MASK; + if (hw->pmt) + value |= GMAC_INT_PMT_EN; + if (hw->pcs) + value |= GMAC_PCS_IRQ_DEFAULT; + + writel(value, ioaddr + GMAC_INT_EN); } static void dwmac4_dump_regs(struct mac_device_info *hw) @@ -80,7 +102,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode) } if (mode & WAKE_UCAST) { pr_debug("GMAC: WOL on global unicast\n"); - pmt |= global_unicast; + pmt |= power_down | global_unicast | wake_up_frame_en; } writel(pmt, ioaddr + GMAC_PMT); @@ -190,39 +212,53 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, } } -static void dwmac4_ctrl_ane(struct mac_device_info *hw, bool restart) +static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral, + bool loopback) { - void __iomem *ioaddr = hw->pcsr; - - /* auto negotiation enable and External Loopback enable */ - u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE; + dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); +} - if (restart) - value |= GMAC_AN_CTRL_RAN; +static void dwmac4_rane(void __iomem *ioaddr, bool restart) +{ + dwmac_rane(ioaddr, GMAC_PCS_BASE, restart); +} - writel(value, ioaddr + GMAC_AN_CTRL); +static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv) +{ + dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); } -static void dwmac4_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv) +/* RGMII or SMII interface */ +static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x) { - void __iomem *ioaddr = hw->pcsr; - u32 value = readl(ioaddr + GMAC_AN_ADV); + u32 status; - if (value & GMAC_AN_FD) - adv->duplex = DUPLEX_FULL; - if (value & GMAC_AN_HD) - adv->duplex |= DUPLEX_HALF; + status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS); + x->irq_rgmii_n++; - adv->pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT; + /* Check the link status */ + if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) { + int speed_value; - value = readl(ioaddr + GMAC_AN_LPA); + x->pcs_link = 1; + + speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >> + GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT); + if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125) + x->pcs_speed = SPEED_1000; + else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25) + x->pcs_speed = SPEED_100; + else + x->pcs_speed = SPEED_10; - if (value & GMAC_AN_FD) - adv->lp_duplex = DUPLEX_FULL; - if (value & GMAC_AN_HD) - adv->lp_duplex = DUPLEX_HALF; + x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK); - adv->lp_pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT; + pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed, + x->pcs_duplex ? "Full" : "Half"); + } else { + x->pcs_link = 0; + pr_info("Link is Down\n"); + } } static int dwmac4_irq_status(struct mac_device_info *hw, @@ -248,11 +284,6 @@ static int dwmac4_irq_status(struct mac_device_info *hw, x->irq_receive_pmt_irq_n++; } - if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) { - readl(ioaddr + GMAC_AN_STATUS); - x->irq_pcs_ane_n++; - } - mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS); /* Check MTL Interrupt: Currently only one queue is used: Q0. */ if (mtl_int_qx_status & MTL_INT_Q0) { @@ -267,6 +298,10 @@ static int dwmac4_irq_status(struct mac_device_info *hw, } } + dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); + if (intr_status & PCS_RGSMIIIS_IRQ) + dwmac4_phystatus(ioaddr, x); + return ret; } @@ -363,8 +398,9 @@ static const struct stmmac_ops dwmac4_ops = { .pmt = dwmac4_pmt, .set_umac_addr = dwmac4_set_umac_addr, .get_umac_addr = dwmac4_get_umac_addr, - .ctrl_ane = dwmac4_ctrl_ane, - .get_adv = dwmac4_get_adv, + .pcs_ctrl_ane = dwmac4_ctrl_ane, + .pcs_rane = dwmac4_rane, + .pcs_get_adv_lp = dwmac4_get_adv_lp, .debug = dwmac4_debug, .set_filter = dwmac4_set_filter, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 59ae6088c..8dc9056c1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -117,7 +117,6 @@ struct stmmac_priv { int eee_enabled; int eee_active; int tx_lpi_timer; - int pcs; unsigned int mode; int extend_desc; struct ptp_clock *ptp_clock; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index e2b98b016..1e06173fc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -276,7 +276,8 @@ static int stmmac_ethtool_getsettings(struct net_device *dev, struct phy_device *phy = priv->phydev; int rc; - if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) { + if (priv->hw->pcs & STMMAC_PCS_RGMII || + priv->hw->pcs & STMMAC_PCS_SGMII) { struct rgmii_adv adv; if (!priv->xstats.pcs_link) { @@ -289,10 +290,10 @@ static int stmmac_ethtool_getsettings(struct net_device *dev, ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed); /* Get and convert ADV/LP_ADV from the HW AN registers */ - if (!priv->hw->mac->get_adv) + if (!priv->hw->mac->pcs_get_adv_lp) return -EOPNOTSUPP; /* should never happen indeed */ - priv->hw->mac->get_adv(priv->hw, &adv); + priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv); /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ @@ -361,7 +362,8 @@ static int stmmac_ethtool_setsettings(struct net_device *dev, struct phy_device *phy = priv->phydev; int rc; - if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) { + if (priv->hw->pcs & STMMAC_PCS_RGMII || + priv->hw->pcs & STMMAC_PCS_SGMII) { u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause; /* Only support ANE */ @@ -376,8 +378,11 @@ static int stmmac_ethtool_setsettings(struct net_device *dev, ADVERTISED_10baseT_Full); spin_lock(&priv->lock); - if (priv->hw->mac->ctrl_ane) - priv->hw->mac->ctrl_ane(priv->hw, 1); + + if (priv->hw->mac->pcs_ctrl_ane) + priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1, + priv->hw->ps, 0); + spin_unlock(&priv->lock); return 0; @@ -452,11 +457,22 @@ stmmac_get_pauseparam(struct net_device *netdev, { struct stmmac_priv *priv = netdev_priv(netdev); - if (priv->pcs) /* FIXME */ - return; - pause->rx_pause = 0; pause->tx_pause = 0; + + if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) { + struct rgmii_adv adv_lp; + + pause->autoneg = 1; + priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp); + if (!adv_lp.pause) + return; + } else { + if (!(priv->phydev->supported & SUPPORTED_Pause) || + !(priv->phydev->supported & SUPPORTED_Asym_Pause)) + return; + } + pause->autoneg = priv->phydev->autoneg; if (priv->flow_ctrl & FLOW_RX) @@ -473,10 +489,19 @@ stmmac_set_pauseparam(struct net_device *netdev, struct stmmac_priv *priv = netdev_priv(netdev); struct phy_device *phy = priv->phydev; int new_pause = FLOW_OFF; - int ret = 0; - if (priv->pcs) /* FIXME */ - return -EOPNOTSUPP; + if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) { + struct rgmii_adv adv_lp; + + pause->autoneg = 1; + priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp); + if (!adv_lp.pause) + return -EOPNOTSUPP; + } else { + if (!(phy->supported & SUPPORTED_Pause) || + !(phy->supported & SUPPORTED_Asym_Pause)) + return -EOPNOTSUPP; + } if (pause->rx_pause) new_pause |= FLOW_RX; @@ -488,11 +513,12 @@ stmmac_set_pauseparam(struct net_device *netdev, if (phy->autoneg) { if (netif_running(netdev)) - ret = phy_start_aneg(phy); - } else - priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, - priv->flow_ctrl, priv->pause); - return ret; + return phy_start_aneg(phy); + } + + priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl, + priv->pause); + return 0; } static void stmmac_get_ethtool_stats(struct net_device *dev, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e4071265b..4c8c60af7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -285,8 +285,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv) /* Using PCS we cannot dial with the phy registers at this stage * so we do not support extra feature like EEE. */ - if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) || - (priv->pcs == STMMAC_PCS_RTBI)) + if ((priv->hw->pcs == STMMAC_PCS_RGMII) || + (priv->hw->pcs == STMMAC_PCS_TBI) || + (priv->hw->pcs == STMMAC_PCS_RTBI)) goto out; /* MAC core supports the EEE feature. */ @@ -799,10 +800,10 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv) (interface == PHY_INTERFACE_MODE_RGMII_RXID) || (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { pr_debug("STMMAC: PCS RGMII support enable\n"); - priv->pcs = STMMAC_PCS_RGMII; + priv->hw->pcs = STMMAC_PCS_RGMII; } else if (interface == PHY_INTERFACE_MODE_SGMII) { pr_debug("STMMAC: PCS SGMII support enable\n"); - priv->pcs = STMMAC_PCS_SGMII; + priv->hw->pcs = STMMAC_PCS_SGMII; } } } @@ -1665,6 +1666,19 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) if (priv->plat->bus_setup) priv->plat->bus_setup(priv->ioaddr); + /* PS and related bits will be programmed according to the speed */ + if (priv->hw->pcs) { + int speed = priv->plat->mac_port_sel_speed; + + if ((speed == SPEED_10) || (speed == SPEED_100) || + (speed == SPEED_1000)) { + priv->hw->ps = speed; + } else { + dev_warn(priv->device, "invalid port speed\n"); + priv->hw->ps = 0; + } + } + /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->hw, dev->mtu); @@ -1714,8 +1728,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); } - if (priv->pcs && priv->hw->mac->ctrl_ane) - priv->hw->mac->ctrl_ane(priv->hw, 0); + if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane) + priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0); /* set TX ring length */ if (priv->hw->dma->set_tx_ring_len) @@ -1748,8 +1762,9 @@ static int stmmac_open(struct net_device *dev) stmmac_check_ether_addr(priv); - if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && - priv->pcs != STMMAC_PCS_RTBI) { + if (priv->hw->pcs != STMMAC_PCS_RGMII && + priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI) { ret = stmmac_init_phy(dev); if (ret) { pr_err("%s: Cannot attach to PHY (error: %d)\n", @@ -2809,6 +2824,14 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) priv->rx_tail_addr, STMMAC_CHAN0); } + + /* PCS link status */ + if (priv->hw->pcs) { + if (priv->xstats.pcs_link) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + } } /* To handle DMA interrupts */ @@ -3130,6 +3153,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv) */ priv->plat->enh_desc = priv->dma_cap.enh_desc; priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; + priv->hw->pmt = priv->plat->pmt; /* TXCOE doesn't work in thresh DMA mode */ if (priv->plat->force_thresh_dma_mode) @@ -3325,8 +3349,9 @@ int stmmac_dvr_probe(struct device *device, stmmac_check_pcs_mode(priv); - if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && - priv->pcs != STMMAC_PCS_RTBI) { + if (priv->hw->pcs != STMMAC_PCS_RGMII && + priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI) { /* MDIO bus Registration */ ret = stmmac_mdio_register(ndev); if (ret < 0) { @@ -3372,12 +3397,14 @@ int stmmac_dvr_remove(struct device *dev) stmmac_set_mac(priv->ioaddr, false); netif_carrier_off(ndev); unregister_netdev(ndev); + of_node_put(priv->plat->phy_node); if (priv->stmmac_rst) reset_control_assert(priv->stmmac_rst); clk_disable_unprepare(priv->pclk); clk_disable_unprepare(priv->stmmac_clk); - if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && - priv->pcs != STMMAC_PCS_RTBI) + if (priv->hw->pcs != STMMAC_PCS_RGMII && + priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); free_netdev(ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h new file mode 100644 index 000000000..eba41c24b --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h @@ -0,0 +1,159 @@ +/* + * stmmac_pcs.h: Physical Coding Sublayer Header File + * + * Copyright (C) 2016 STMicroelectronics (R&D) Limited + * Author: Giuseppe Cavallaro + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __STMMAC_PCS_H__ +#define __STMMAC_PCS_H__ + +#include +#include +#include "common.h" + +/* PCS registers (AN/TBI/SGMII/RGMII) offsets */ +#define GMAC_AN_CTRL(x) (x) /* AN control */ +#define GMAC_AN_STATUS(x) (x + 0x4) /* AN status */ +#define GMAC_ANE_ADV(x) (x + 0x8) /* ANE Advertisement */ +#define GMAC_ANE_LPA(x) (x + 0xc) /* ANE link partener ability */ +#define GMAC_ANE_EXP(x) (x + 0x10) /* ANE expansion */ +#define GMAC_TBI(x) (x + 0x14) /* TBI extend status */ + +/* AN Configuration defines */ +#define GMAC_AN_CTRL_RAN BIT(9) /* Restart Auto-Negotiation */ +#define GMAC_AN_CTRL_ANE BIT(12) /* Auto-Negotiation Enable */ +#define GMAC_AN_CTRL_ELE BIT(14) /* External Loopback Enable */ +#define GMAC_AN_CTRL_ECD BIT(16) /* Enable Comma Detect */ +#define GMAC_AN_CTRL_LR BIT(17) /* Lock to Reference */ +#define GMAC_AN_CTRL_SGMRAL BIT(18) /* SGMII RAL Control */ + +/* AN Status defines */ +#define GMAC_AN_STATUS_LS BIT(2) /* Link Status 0:down 1:up */ +#define GMAC_AN_STATUS_ANA BIT(3) /* Auto-Negotiation Ability */ +#define GMAC_AN_STATUS_ANC BIT(5) /* Auto-Negotiation Complete */ +#define GMAC_AN_STATUS_ES BIT(8) /* Extended Status */ + +/* ADV and LPA defines */ +#define GMAC_ANE_FD BIT(5) +#define GMAC_ANE_HD BIT(6) +#define GMAC_ANE_PSE GENMASK(8, 7) +#define GMAC_ANE_PSE_SHIFT 7 +#define GMAC_ANE_RFE GENMASK(13, 12) +#define GMAC_ANE_RFE_SHIFT 12 +#define GMAC_ANE_ACK BIT(14) + +/** + * dwmac_pcs_isr - TBI, RTBI, or SGMII PHY ISR + * @ioaddr: IO registers pointer + * @reg: Base address of the AN Control Register. + * @intr_status: GMAC core interrupt status + * @x: pointer to log these events as stats + * Description: it is the ISR for PCS events: Auto-Negotiation Completed and + * Link status. + */ +static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg, + unsigned int intr_status, + struct stmmac_extra_stats *x) +{ + u32 val = readl(ioaddr + GMAC_AN_STATUS(reg)); + + if (intr_status & PCS_ANE_IRQ) { + x->irq_pcs_ane_n++; + if (val & GMAC_AN_STATUS_ANC) + pr_info("stmmac_pcs: ANE process completed\n"); + } + + if (intr_status & PCS_LINK_IRQ) { + x->irq_pcs_link_n++; + if (val & GMAC_AN_STATUS_LS) + pr_info("stmmac_pcs: Link Up\n"); + else + pr_info("stmmac_pcs: Link Down\n"); + } +} + +/** + * dwmac_rane - To restart ANE + * @ioaddr: IO registers pointer + * @reg: Base address of the AN Control Register. + * @restart: to restart ANE + * Description: this is to just restart the Auto-Negotiation. + */ +static inline void dwmac_rane(void __iomem *ioaddr, u32 reg, bool restart) +{ + u32 value = readl(ioaddr + GMAC_AN_CTRL(reg)); + + if (restart) + value |= GMAC_AN_CTRL_RAN; + + writel(value, ioaddr + GMAC_AN_CTRL(reg)); +} + +/** + * dwmac_ctrl_ane - To program the AN Control Register. + * @ioaddr: IO registers pointer + * @reg: Base address of the AN Control Register. + * @ane: to enable the auto-negotiation + * @srgmi_ral: to manage MAC-2-MAC SGMII connections. + * @loopback: to cause the PHY to loopback tx data into rx path. + * Description: this is the main function to configure the AN control register + * and init the ANE, select loopback (usually for debugging purpose) and + * configure SGMII RAL. + */ +static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane, + bool srgmi_ral, bool loopback) +{ + u32 value = readl(ioaddr + GMAC_AN_CTRL(reg)); + + /* Enable and restart the Auto-Negotiation */ + if (ane) + value |= GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_RAN; + + /* In case of MAC-2-MAC connection, block is configured to operate + * according to MAC conf register. + */ + if (srgmi_ral) + value |= GMAC_AN_CTRL_SGMRAL; + + if (loopback) + value |= GMAC_AN_CTRL_ELE; + + writel(value, ioaddr + GMAC_AN_CTRL(reg)); +} + +/** + * dwmac_get_adv_lp - Get ADV and LP cap + * @ioaddr: IO registers pointer + * @reg: Base address of the AN Control Register. + * @adv_lp: structure to store the adv,lp status + * Description: this is to expose the ANE advertisement and Link partner ability + * status to ethtool support. + */ +static inline void dwmac_get_adv_lp(void __iomem *ioaddr, u32 reg, + struct rgmii_adv *adv_lp) +{ + u32 value = readl(ioaddr + GMAC_ANE_ADV(reg)); + + if (value & GMAC_ANE_FD) + adv_lp->duplex = DUPLEX_FULL; + if (value & GMAC_ANE_HD) + adv_lp->duplex |= DUPLEX_HALF; + + adv_lp->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; + + value = readl(ioaddr + GMAC_ANE_LPA(reg)); + + if (value & GMAC_ANE_FD) + adv_lp->lp_duplex = DUPLEX_FULL; + if (value & GMAC_ANE_HD) + adv_lp->lp_duplex = DUPLEX_HALF; + + adv_lp->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; +} +#endif /* __STMMAC_PCS_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 409db913b..756bb548e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -113,8 +113,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) return NULL; axi = kzalloc(sizeof(*axi), GFP_KERNEL); - if (!axi) + if (!axi) { + of_node_put(np); return ERR_PTR(-ENOMEM); + } axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en"); axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); @@ -127,6 +129,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt); of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt); of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN); + of_node_put(np); return axi; } @@ -302,7 +305,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); if (!dma_cfg) { - of_node_put(np); + of_node_put(plat->phy_node); return ERR_PTR(-ENOMEM); } plat->dma_cfg = dma_cfg; @@ -319,6 +322,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); } + of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed); + plat->axi = stmmac_axi_setup(pdev); return plat; @@ -411,7 +416,9 @@ static int stmmac_pltfr_suspend(struct device *dev) struct platform_device *pdev = to_platform_device(dev); ret = stmmac_suspend(dev); - if (priv->plat->exit) + if (priv->plat->suspend) + priv->plat->suspend(pdev, priv->plat->bsp_priv); + else if (priv->plat->exit) priv->plat->exit(pdev, priv->plat->bsp_priv); return ret; @@ -430,7 +437,9 @@ static int stmmac_pltfr_resume(struct device *dev) struct stmmac_priv *priv = netdev_priv(ndev); struct platform_device *pdev = to_platform_device(dev); - if (priv->plat->init) + if (priv->plat->resume) + priv->plat->resume(pdev, priv->plat->bsp_priv); + else if (priv->plat->init) priv->plat->init(pdev, priv->plat->bsp_priv); return stmmac_resume(dev); diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index 158213cd6..4490ebaed 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -46,7 +46,6 @@ #include #include #include -#include #include #include @@ -598,7 +597,6 @@ struct net_local { struct work_struct txtimeout_reinit; phy_interface_t phy_interface; - struct phy_device *phy_dev; struct mii_bus *mii_bus; unsigned int link; @@ -816,7 +814,7 @@ static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg, static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) { struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = lp->phy_dev; + struct phy_device *phydev = ndev->phydev; if (!netif_running(ndev)) return -EINVAL; @@ -850,6 +848,7 @@ static void dwceqos_link_down(struct net_local *lp) static void dwceqos_link_up(struct net_local *lp) { + struct net_device *ndev = lp->ndev; u32 regval; unsigned long flags; @@ -860,7 +859,7 @@ static void dwceqos_link_up(struct net_local *lp) dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); spin_unlock_irqrestore(&lp->hw_lock, flags); - lp->eee_active = !phy_init_eee(lp->phy_dev, 0); + lp->eee_active = !phy_init_eee(ndev->phydev, 0); /* Check for changed EEE capability */ if (!lp->eee_active && lp->eee_enabled) { @@ -876,7 +875,8 @@ static void dwceqos_link_up(struct net_local *lp) static void dwceqos_set_speed(struct net_local *lp) { - struct phy_device *phydev = lp->phy_dev; + struct net_device *ndev = lp->ndev; + struct phy_device *phydev = ndev->phydev; u32 regval; regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); @@ -903,7 +903,7 @@ static void dwceqos_set_speed(struct net_local *lp) static void dwceqos_adjust_link(struct net_device *ndev) { struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = lp->phy_dev; + struct phy_device *phydev = ndev->phydev; int status_change = 0; if (lp->phy_defer) @@ -987,7 +987,6 @@ static int dwceqos_mii_probe(struct net_device *ndev) lp->link = 0; lp->speed = 0; lp->duplex = DUPLEX_UNKNOWN; - lp->phy_dev = phydev; return 0; } @@ -1247,7 +1246,7 @@ static int dwceqos_mii_init(struct net_local *lp) lp->mii_bus->read = &dwceqos_mdio_read; lp->mii_bus->write = &dwceqos_mdio_write; lp->mii_bus->priv = lp; - lp->mii_bus->parent = &lp->ndev->dev; + lp->mii_bus->parent = &lp->pdev->dev; of_address_to_resource(lp->pdev->dev.of_node, 0, &res); snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx", @@ -1531,6 +1530,7 @@ static void dwceqos_configure_bus(struct net_local *lp) static void dwceqos_init_hw(struct net_local *lp) { + struct net_device *ndev = lp->ndev; u32 regval; u32 buswidth; u32 dma_skip; @@ -1622,13 +1622,7 @@ static void dwceqos_init_hw(struct net_local *lp) DWCEQOS_MMC_CTRL_RSTONRD); dwceqos_enable_mmc_interrupt(lp); - /* Enable Interrupts */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, - DWCEQOS_DMA_CH0_IE_NIE | - DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE | - DWCEQOS_DMA_CH0_IE_AIE | - DWCEQOS_DMA_CH0_IE_FBEE); - + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0); dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0); dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC | @@ -1645,10 +1639,10 @@ static void dwceqos_init_hw(struct net_local *lp) regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); lp->phy_defer = false; - mutex_lock(&lp->phy_dev->lock); - phy_read_status(lp->phy_dev); + mutex_lock(&ndev->phydev->lock); + phy_read_status(ndev->phydev); dwceqos_adjust_link(lp->ndev); - mutex_unlock(&lp->phy_dev->lock); + mutex_unlock(&ndev->phydev->lock); } static void dwceqos_tx_reclaim(unsigned long data) @@ -1898,13 +1892,22 @@ static int dwceqos_open(struct net_device *ndev) * hence the unusual init order with phy_start first. */ lp->phy_defer = true; - phy_start(lp->phy_dev); + phy_start(ndev->phydev); dwceqos_init_hw(lp); napi_enable(&lp->napi); netif_start_queue(ndev); tasklet_enable(&lp->tx_bdreclaim_tasklet); + /* Enable Interrupts -- do this only after we enable NAPI and the + * tasklet. + */ + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, + DWCEQOS_DMA_CH0_IE_NIE | + DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE | + DWCEQOS_DMA_CH0_IE_AIE | + DWCEQOS_DMA_CH0_IE_FBEE); + return 0; } @@ -1943,7 +1946,7 @@ static int dwceqos_stop(struct net_device *ndev) dwceqos_drain_dma(lp); dwceqos_reset_hw(lp); - phy_stop(lp->phy_dev); + phy_stop(ndev->phydev); dwceqos_descriptor_free(lp); @@ -2523,30 +2526,6 @@ dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s) return s; } -static int -dwceqos_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) -{ - struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = lp->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_gset(phydev, ecmd); -} - -static int -dwceqos_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) -{ - struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = lp->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_sset(phydev, ecmd); -} - static void dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed) { @@ -2574,17 +2553,17 @@ static int dwceqos_set_pauseparam(struct net_device *ndev, lp->flowcontrol.autoneg = pp->autoneg; if (pp->autoneg) { - lp->phy_dev->advertising |= ADVERTISED_Pause; - lp->phy_dev->advertising |= ADVERTISED_Asym_Pause; + ndev->phydev->advertising |= ADVERTISED_Pause; + ndev->phydev->advertising |= ADVERTISED_Asym_Pause; } else { - lp->phy_dev->advertising &= ~ADVERTISED_Pause; - lp->phy_dev->advertising &= ~ADVERTISED_Asym_Pause; + ndev->phydev->advertising &= ~ADVERTISED_Pause; + ndev->phydev->advertising &= ~ADVERTISED_Asym_Pause; lp->flowcontrol.rx = pp->rx_pause; lp->flowcontrol.tx = pp->tx_pause; } if (netif_running(ndev)) - ret = phy_start_aneg(lp->phy_dev); + ret = phy_start_aneg(ndev->phydev); return ret; } @@ -2705,7 +2684,7 @@ static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata) dwceqos_get_tx_lpi_state(regval)); } - return phy_ethtool_get_eee(lp->phy_dev, edata); + return phy_ethtool_get_eee(ndev->phydev, edata); } static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata) @@ -2747,7 +2726,7 @@ static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata) spin_unlock_irqrestore(&lp->hw_lock, flags); } - return phy_ethtool_set_eee(lp->phy_dev, edata); + return phy_ethtool_set_eee(ndev->phydev, edata); } static u32 dwceqos_get_msglevel(struct net_device *ndev) @@ -2765,8 +2744,6 @@ static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel) } static struct ethtool_ops dwceqos_ethtool_ops = { - .get_settings = dwceqos_get_settings, - .set_settings = dwceqos_set_settings, .get_drvinfo = dwceqos_get_drvinfo, .get_link = ethtool_op_get_link, .get_pauseparam = dwceqos_get_pauseparam, @@ -2780,6 +2757,8 @@ static struct ethtool_ops dwceqos_ethtool_ops = { .set_eee = dwceqos_set_eee, .get_msglevel = dwceqos_get_msglevel, .set_msglevel = dwceqos_set_msglevel, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static struct net_device_ops netdev_ops = { @@ -2874,25 +2853,17 @@ static int dwceqos_probe(struct platform_device *pdev) ndev->features = ndev->hw_features; - netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT); - - ret = register_netdev(ndev); - if (ret) { - dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); - goto err_out_clk_dis_aper; - } - lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk"); if (IS_ERR(lp->phy_ref_clk)) { dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); ret = PTR_ERR(lp->phy_ref_clk); - goto err_out_unregister_netdev; + goto err_out_clk_dis_aper; } ret = clk_prepare_enable(lp->phy_ref_clk); if (ret) { dev_err(&pdev->dev, "Unable to enable device clock.\n"); - goto err_out_unregister_netdev; + goto err_out_clk_dis_aper; } lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node, @@ -2901,7 +2872,7 @@ static int dwceqos_probe(struct platform_device *pdev) ret = of_phy_register_fixed_link(lp->pdev->dev.of_node); if (ret < 0) { dev_err(&pdev->dev, "invalid fixed-link"); - goto err_out_unregister_netdev; + goto err_out_clk_dis_phy; } lp->phy_node = of_node_get(lp->pdev->dev.of_node); @@ -2910,7 +2881,7 @@ static int dwceqos_probe(struct platform_device *pdev) ret = of_get_phy_mode(lp->pdev->dev.of_node); if (ret < 0) { dev_err(&lp->pdev->dev, "error in getting phy i/f\n"); - goto err_out_unregister_clk_notifier; + goto err_out_clk_dis_phy; } lp->phy_interface = ret; @@ -2918,14 +2889,14 @@ static int dwceqos_probe(struct platform_device *pdev) ret = dwceqos_mii_init(lp); if (ret) { dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n"); - goto err_out_unregister_clk_notifier; + goto err_out_clk_dis_phy; } ret = dwceqos_mii_probe(ndev); if (ret != 0) { netdev_err(ndev, "mii_probe fail.\n"); ret = -ENXIO; - goto err_out_unregister_clk_notifier; + goto err_out_clk_dis_phy; } dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); @@ -2934,7 +2905,8 @@ static int dwceqos_probe(struct platform_device *pdev) (unsigned long)ndev); tasklet_disable(&lp->tx_bdreclaim_tasklet); - lp->txtimeout_handler_wq = create_singlethread_workqueue(DRIVER_NAME); + lp->txtimeout_handler_wq = alloc_workqueue(DRIVER_NAME, + WQ_MEM_RECLAIM, 0); INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout); platform_set_drvdata(pdev, ndev); @@ -2942,7 +2914,7 @@ static int dwceqos_probe(struct platform_device *pdev) if (ret) { dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n", ret); - goto err_out_unregister_clk_notifier; + goto err_out_clk_dis_phy; } dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n", pdev->id, ndev->base_addr, ndev->irq); @@ -2952,18 +2924,24 @@ static int dwceqos_probe(struct platform_device *pdev) if (ret) { dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n", ndev->irq, ret); - goto err_out_unregister_clk_notifier; + goto err_out_clk_dis_phy; } if (netif_msg_probe(lp)) netdev_dbg(ndev, "net_local@%p\n", lp); + netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT); + + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); + goto err_out_clk_dis_phy; + } + return 0; -err_out_unregister_clk_notifier: +err_out_clk_dis_phy: clk_disable_unprepare(lp->phy_ref_clk); -err_out_unregister_netdev: - unregister_netdev(ndev); err_out_clk_dis_aper: clk_disable_unprepare(lp->apb_pclk); err_out_free_netdev: @@ -2981,8 +2959,8 @@ static int dwceqos_remove(struct platform_device *pdev) if (ndev) { lp = netdev_priv(ndev); - if (lp->phy_dev) - phy_disconnect(lp->phy_dev); + if (ndev->phydev) + phy_disconnect(ndev->phydev); mdiobus_unregister(lp->mii_bus); mdiobus_free(lp->mii_bus); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index f7540dd6b..f5e931e50 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1987,7 +1987,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) { err = pci_enable_msi(pdev); if (err) - pr_err("Can't eneble msi. error is %d\n", err); + pr_err("Can't enable msi. error is %d\n", err); else nic->irq_type = IRQ_MSI; } else diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index e7f0b7d95..9904d740d 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -48,8 +48,7 @@ config TI_DAVINCI_CPDMA will be called davinci_cpdma. This is recommended. config TI_CPSW_PHY_SEL - bool "TI CPSW Switch Phy sel Support" - depends on TI_CPSW + bool ---help--- This driver supports configuring of the phy mode connected to the CPSW. diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 7eef45e6d..d300d536d 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -205,7 +205,6 @@ struct cpmac_priv { dma_addr_t dma_ring; void __iomem *regs; struct mii_bus *mii_bus; - struct phy_device *phy; char phy_name[MII_BUS_ID_SIZE + 3]; int oldlink, oldspeed, oldduplex; u32 msg_enable; @@ -830,37 +829,12 @@ static void cpmac_tx_timeout(struct net_device *dev) static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct cpmac_priv *priv = netdev_priv(dev); - if (!(netif_running(dev))) return -EINVAL; - if (!priv->phy) + if (!dev->phydev) return -EINVAL; - return phy_mii_ioctl(priv->phy, ifr, cmd); -} - -static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct cpmac_priv *priv = netdev_priv(dev); - - if (priv->phy) - return phy_ethtool_gset(priv->phy, cmd); - - return -EINVAL; -} - -static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct cpmac_priv *priv = netdev_priv(dev); - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (priv->phy) - return phy_ethtool_sset(priv->phy, cmd); - - return -EINVAL; + return phy_mii_ioctl(dev->phydev, ifr, cmd); } static void cpmac_get_ringparam(struct net_device *dev, @@ -900,12 +874,12 @@ static void cpmac_get_drvinfo(struct net_device *dev, } static const struct ethtool_ops cpmac_ethtool_ops = { - .get_settings = cpmac_get_settings, - .set_settings = cpmac_set_settings, .get_drvinfo = cpmac_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = cpmac_get_ringparam, .set_ringparam = cpmac_set_ringparam, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static void cpmac_adjust_link(struct net_device *dev) @@ -914,16 +888,16 @@ static void cpmac_adjust_link(struct net_device *dev) int new_state = 0; spin_lock(&priv->lock); - if (priv->phy->link) { + if (dev->phydev->link) { netif_tx_start_all_queues(dev); - if (priv->phy->duplex != priv->oldduplex) { + if (dev->phydev->duplex != priv->oldduplex) { new_state = 1; - priv->oldduplex = priv->phy->duplex; + priv->oldduplex = dev->phydev->duplex; } - if (priv->phy->speed != priv->oldspeed) { + if (dev->phydev->speed != priv->oldspeed) { new_state = 1; - priv->oldspeed = priv->phy->speed; + priv->oldspeed = dev->phydev->speed; } if (!priv->oldlink) { @@ -938,7 +912,7 @@ static void cpmac_adjust_link(struct net_device *dev) } if (new_state && netif_msg_link(priv) && net_ratelimit()) - phy_print_status(priv->phy); + phy_print_status(dev->phydev); spin_unlock(&priv->lock); } @@ -1016,8 +990,8 @@ static int cpmac_open(struct net_device *dev) cpmac_hw_start(dev); napi_enable(&priv->napi); - priv->phy->state = PHY_CHANGELINK; - phy_start(priv->phy); + dev->phydev->state = PHY_CHANGELINK; + phy_start(dev->phydev); return 0; @@ -1032,8 +1006,10 @@ fail_desc: kfree_skb(priv->rx_head[i].skb); } } + dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * size, + priv->desc_ring, priv->dma_ring); + fail_alloc: - kfree(priv->desc_ring); iounmap(priv->regs); fail_remap: @@ -1053,7 +1029,7 @@ static int cpmac_stop(struct net_device *dev) cancel_work_sync(&priv->reset_work); napi_disable(&priv->napi); - phy_stop(priv->phy); + phy_stop(dev->phydev); cpmac_hw_stop(dev); @@ -1106,6 +1082,7 @@ static int cpmac_probe(struct platform_device *pdev) struct cpmac_priv *priv; struct net_device *dev; struct plat_cpmac_data *pdata; + struct phy_device *phydev = NULL; pdata = dev_get_platdata(&pdev->dev); @@ -1142,7 +1119,7 @@ static int cpmac_probe(struct platform_device *pdev) mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); if (!mem) { rc = -ENODEV; - goto out; + goto fail; } dev->irq = platform_get_irq_byname(pdev, "irq"); @@ -1162,15 +1139,15 @@ static int cpmac_probe(struct platform_device *pdev) snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); - priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, - PHY_INTERFACE_MODE_MII); + phydev = phy_connect(dev, priv->phy_name, cpmac_adjust_link, + PHY_INTERFACE_MODE_MII); - if (IS_ERR(priv->phy)) { + if (IS_ERR(phydev)) { if (netif_msg_drv(priv)) dev_err(&pdev->dev, "Could not attach to PHY\n"); - rc = PTR_ERR(priv->phy); - goto out; + rc = PTR_ERR(phydev); + goto fail; } rc = register_netdev(dev); @@ -1189,7 +1166,6 @@ static int cpmac_probe(struct platform_device *pdev) fail: free_netdev(dev); -out: return rc; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 53190894f..f85d605e4 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -364,7 +364,6 @@ static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset) } struct cpsw_priv { - spinlock_t lock; struct platform_device *pdev; struct net_device *ndev; struct napi_struct napi_rx; @@ -735,6 +734,7 @@ static void cpsw_rx_handler(void *token, int len, int status) netif_receive_skb(skb); ndev->stats.rx_bytes += len; ndev->stats.rx_packets++; + kmemleak_not_leak(new_skb); } else { ndev->stats.rx_dropped++; new_skb = skb; @@ -1244,6 +1244,7 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) slave->phy = NULL; cpsw_ale_control_set(priv->ale, slave_port, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); + soft_reset_slave(slave); } static int cpsw_ndo_open(struct net_device *ndev) @@ -1252,7 +1253,11 @@ static int cpsw_ndo_open(struct net_device *ndev) int i, ret; u32 reg; - pm_runtime_get_sync(&priv->pdev->dev); + ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + return ret; + } if (!cpsw_common_res_usage_state(priv)) cpsw_intr_disable(priv); @@ -1278,6 +1283,7 @@ static int cpsw_ndo_open(struct net_device *ndev) if (!cpsw_common_res_usage_state(priv)) { struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0); + int buf_num; /* setup tx dma to fixed prio and zero offset */ cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); @@ -1305,10 +1311,8 @@ static int cpsw_ndo_open(struct net_device *ndev) enable_irq(priv->irqs_table[0]); } - if (WARN_ON(!priv->data.rx_descs)) - priv->data.rx_descs = 128; - - for (i = 0; i < priv->data.rx_descs; i++) { + buf_num = cpdma_chan_get_rx_buf_num(priv->dma); + for (i = 0; i < buf_num; i++) { struct sk_buff *skb; ret = -ENOMEM; @@ -1322,6 +1326,7 @@ static int cpsw_ndo_open(struct net_device *ndev) kfree_skb(skb); goto err_cleanup; } + kmemleak_not_leak(skb); } /* continue even if we didn't manage to submit all * receive descs @@ -1611,10 +1616,17 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) struct sockaddr *addr = (struct sockaddr *)p; int flags = 0; u16 vid = 0; + int ret; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; + ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + return ret; + } + if (priv->data.dual_emac) { vid = priv->slaves[priv->emac_port].port_vlan; flags = ALE_VLAN; @@ -1629,6 +1641,8 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); for_each_slave(priv, cpsw_set_slave_mac, priv); + pm_runtime_put(&priv->pdev->dev); + return 0; } @@ -1693,10 +1707,17 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) { struct cpsw_priv *priv = netdev_priv(ndev); + int ret; if (vid == priv->data.default_vlan) return 0; + ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + return ret; + } + if (priv->data.dual_emac) { /* In dual EMAC, reserved VLAN id should not be used for * creating VLAN interfaces as this can break the dual @@ -1711,7 +1732,10 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, } dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); - return cpsw_add_vlan_ale_entry(priv, vid); + ret = cpsw_add_vlan_ale_entry(priv, vid); + + pm_runtime_put(&priv->pdev->dev); + return ret; } static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, @@ -1723,6 +1747,12 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, if (vid == priv->data.default_vlan) return 0; + ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + return ret; + } + if (priv->data.dual_emac) { int i; @@ -1742,8 +1772,10 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, if (ret != 0) return ret; - return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, - 0, ALE_VLAN, vid); + ret = cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, + 0, ALE_VLAN, vid); + pm_runtime_put(&priv->pdev->dev); + return ret; } static const struct net_device_ops cpsw_netdev_ops = { @@ -1902,10 +1934,33 @@ static int cpsw_set_pauseparam(struct net_device *ndev, priv->tx_pause = pause->tx_pause ? true : false; for_each_slave(priv, _cpsw_adjust_link, priv, &link); - return 0; } +static int cpsw_ethtool_op_begin(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + cpsw_err(priv, drv, "ethtool begin failed %d\n", ret); + pm_runtime_put_noidle(&priv->pdev->dev); + } + + return ret; +} + +static void cpsw_ethtool_op_complete(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + ret = pm_runtime_put(&priv->pdev->dev); + if (ret < 0) + cpsw_err(priv, drv, "ethtool complete failed %d\n", ret); +} + static const struct ethtool_ops cpsw_ethtool_ops = { .get_drvinfo = cpsw_get_drvinfo, .get_msglevel = cpsw_get_msglevel, @@ -1925,6 +1980,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = { .set_wol = cpsw_set_wol, .get_regs_len = cpsw_get_regs_len, .get_regs = cpsw_get_regs, + .begin = cpsw_ethtool_op_begin, + .complete = cpsw_ethtool_op_complete, }; static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, @@ -1999,12 +2056,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } data->bd_ram_size = prop; - if (of_property_read_u32(node, "rx_descs", &prop)) { - dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n"); - return -EINVAL; - } - data->rx_descs = prop; - if (of_property_read_u32(node, "mac_control", &prop)) { dev_err(&pdev->dev, "Missing mac_control property in the DT.\n"); return -EINVAL; @@ -2022,7 +2073,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, if (ret) dev_warn(&pdev->dev, "Doesn't have any child node\n"); - for_each_child_of_node(node, slave_node) { + for_each_available_child_of_node(node, slave_node) { struct cpsw_slave_data *slave_data = data->slave_data + i; const void *mac_addr = NULL; int lenp; @@ -2124,7 +2175,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev, } priv_sl2 = netdev_priv(ndev); - spin_lock_init(&priv_sl2->lock); priv_sl2->data = *data; priv_sl2->pdev = pdev; priv_sl2->ndev = ndev; @@ -2243,7 +2293,6 @@ static int cpsw_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); priv = netdev_priv(ndev); - spin_lock_init(&priv->lock); priv->pdev = pdev; priv->ndev = ndev; priv->dev = &ndev->dev; @@ -2321,7 +2370,11 @@ static int cpsw_probe(struct platform_device *pdev) /* Need to enable clocks with runtime PM api to access module * registers */ - pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); + goto clean_runtime_disable_ret; + } priv->version = readl(&priv->regs->id_ver); pm_runtime_put_sync(&pdev->dev); @@ -2513,19 +2566,17 @@ clean_ndev_ret: return ret; } -static int cpsw_remove_child_device(struct device *dev, void *c) -{ - struct platform_device *pdev = to_platform_device(dev); - - of_device_unregister(pdev); - - return 0; -} - static int cpsw_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); + return ret; + } if (priv->data.dual_emac) unregister_netdev(cpsw_get_slave_ndev(priv, 1)); @@ -2533,8 +2584,9 @@ static int cpsw_remove(struct platform_device *pdev) cpsw_ale_destroy(priv->ale); cpdma_ctlr_destroy(priv->dma); + of_platform_depopulate(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device); if (priv->data.dual_emac) free_netdev(cpsw_get_slave_ndev(priv, 1)); free_netdev(ndev); @@ -2554,16 +2606,12 @@ static int cpsw_suspend(struct device *dev) for (i = 0; i < priv->data.slaves; i++) { if (netif_running(priv->slaves[i].ndev)) cpsw_ndo_stop(priv->slaves[i].ndev); - soft_reset_slave(priv->slaves + i); } } else { if (netif_running(ndev)) cpsw_ndo_stop(ndev); - for_each_slave(priv, soft_reset_slave); } - pm_runtime_put_sync(&pdev->dev); - /* Select sleep pin state */ pinctrl_pm_select_sleep_state(&pdev->dev); @@ -2576,8 +2624,6 @@ static int cpsw_resume(struct device *dev) struct net_device *ndev = platform_get_drvdata(pdev); struct cpsw_priv *priv = netdev_priv(ndev); - pm_runtime_get_sync(&pdev->dev); - /* Select default pin state */ pinctrl_pm_select_default_state(&pdev->dev); diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h index e50afd1b2..16b54c6f3 100644 --- a/drivers/net/ethernet/ti/cpsw.h +++ b/drivers/net/ethernet/ti/cpsw.h @@ -35,7 +35,6 @@ struct cpsw_platform_data { u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */ u32 ale_entries; /* ale table size */ u32 bd_ram_size; /*buffer descriptor ram size */ - u32 rx_descs; /* Number of Rx Descriptios */ u32 mac_control; /* Mac control register */ u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/ bool dual_emac; /* Enable Dual EMAC mode */ diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 18bf3a8fd..19e5f32a8 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -21,7 +21,7 @@ #include #include #include - +#include #include "davinci_cpdma.h" /* DMA Registers */ @@ -87,9 +87,8 @@ struct cpdma_desc_pool { void *cpumap; /* dma_alloc map */ int desc_size, mem_size; int num_desc, used_desc; - unsigned long *bitmap; struct device *dev; - spinlock_t lock; + struct gen_pool *gen_pool; }; enum cpdma_state { @@ -98,8 +97,6 @@ enum cpdma_state { CPDMA_STATE_TEARDOWN, }; -static const char *cpdma_state_str[] = { "idle", "active", "teardown" }; - struct cpdma_ctlr { enum cpdma_state state; struct cpdma_params params; @@ -117,6 +114,7 @@ struct cpdma_chan { int chan_num; spinlock_t lock; int count; + u32 desc_num; u32 mask; cpdma_handler_fn handler; enum dma_data_direction dir; @@ -145,6 +143,19 @@ struct cpdma_chan { (directed << CPDMA_TO_PORT_SHIFT)); \ } while (0) +static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) +{ + if (!pool) + return; + + WARN_ON(pool->used_desc); + if (pool->cpumap) + dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, + pool->phys); + else + iounmap(pool->iomap); +} + /* * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci * emac) have dedicated on-chip memory for these descriptors. Some other @@ -155,24 +166,25 @@ static struct cpdma_desc_pool * cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, int size, int align) { - int bitmap_size; struct cpdma_desc_pool *pool; + int ret; pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); if (!pool) - goto fail; - - spin_lock_init(&pool->lock); + goto gen_pool_create_fail; pool->dev = dev; pool->mem_size = size; pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); pool->num_desc = size / pool->desc_size; - bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); - pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL); - if (!pool->bitmap) - goto fail; + pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1, + "cpdma"); + if (IS_ERR(pool->gen_pool)) { + dev_err(dev, "pool create failed %ld\n", + PTR_ERR(pool->gen_pool)); + goto gen_pool_create_fail; + } if (phys) { pool->phys = phys; @@ -185,24 +197,22 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ } - if (pool->iomap) - return pool; -fail: - return NULL; -} + if (!pool->iomap) + goto gen_pool_create_fail; -static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) -{ - if (!pool) - return; - - WARN_ON(pool->used_desc); - if (pool->cpumap) { - dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, - pool->phys); - } else { - iounmap(pool->iomap); + ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap, + pool->phys, pool->mem_size, -1); + if (ret < 0) { + dev_err(dev, "pool add failed %d\n", ret); + goto gen_pool_add_virt_fail; } + + return pool; + +gen_pool_add_virt_fail: + cpdma_desc_pool_destroy(pool); +gen_pool_create_fail: + return NULL; } static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, @@ -220,47 +230,23 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) } static struct cpdma_desc __iomem * -cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) +cpdma_desc_alloc(struct cpdma_desc_pool *pool) { - unsigned long flags; - int index; - int desc_start; - int desc_end; struct cpdma_desc __iomem *desc = NULL; - spin_lock_irqsave(&pool->lock, flags); - - if (is_rx) { - desc_start = 0; - desc_end = pool->num_desc/2; - } else { - desc_start = pool->num_desc/2; - desc_end = pool->num_desc; - } - - index = bitmap_find_next_zero_area(pool->bitmap, - desc_end, desc_start, num_desc, 0); - if (index < desc_end) { - bitmap_set(pool->bitmap, index, num_desc); - desc = pool->iomap + pool->desc_size * index; + desc = (struct cpdma_desc __iomem *)gen_pool_alloc(pool->gen_pool, + pool->desc_size); + if (desc) pool->used_desc++; - } - spin_unlock_irqrestore(&pool->lock, flags); return desc; } static void cpdma_desc_free(struct cpdma_desc_pool *pool, struct cpdma_desc __iomem *desc, int num_desc) { - unsigned long flags, index; - - index = ((unsigned long)desc - (unsigned long)pool->iomap) / - pool->desc_size; - spin_lock_irqsave(&pool->lock, flags); - bitmap_clear(pool->bitmap, index, num_desc); + gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size); pool->used_desc--; - spin_unlock_irqrestore(&pool->lock, flags); } struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) @@ -369,86 +355,13 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) } EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); -int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) -{ - struct device *dev = ctlr->dev; - unsigned long flags; - int i; - - spin_lock_irqsave(&ctlr->lock, flags); - - dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]); - - dev_info(dev, "CPDMA: txidver: %x", - dma_reg_read(ctlr, CPDMA_TXIDVER)); - dev_info(dev, "CPDMA: txcontrol: %x", - dma_reg_read(ctlr, CPDMA_TXCONTROL)); - dev_info(dev, "CPDMA: txteardown: %x", - dma_reg_read(ctlr, CPDMA_TXTEARDOWN)); - dev_info(dev, "CPDMA: rxidver: %x", - dma_reg_read(ctlr, CPDMA_RXIDVER)); - dev_info(dev, "CPDMA: rxcontrol: %x", - dma_reg_read(ctlr, CPDMA_RXCONTROL)); - dev_info(dev, "CPDMA: softreset: %x", - dma_reg_read(ctlr, CPDMA_SOFTRESET)); - dev_info(dev, "CPDMA: rxteardown: %x", - dma_reg_read(ctlr, CPDMA_RXTEARDOWN)); - dev_info(dev, "CPDMA: txintstatraw: %x", - dma_reg_read(ctlr, CPDMA_TXINTSTATRAW)); - dev_info(dev, "CPDMA: txintstatmasked: %x", - dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED)); - dev_info(dev, "CPDMA: txintmaskset: %x", - dma_reg_read(ctlr, CPDMA_TXINTMASKSET)); - dev_info(dev, "CPDMA: txintmaskclear: %x", - dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR)); - dev_info(dev, "CPDMA: macinvector: %x", - dma_reg_read(ctlr, CPDMA_MACINVECTOR)); - dev_info(dev, "CPDMA: maceoivector: %x", - dma_reg_read(ctlr, CPDMA_MACEOIVECTOR)); - dev_info(dev, "CPDMA: rxintstatraw: %x", - dma_reg_read(ctlr, CPDMA_RXINTSTATRAW)); - dev_info(dev, "CPDMA: rxintstatmasked: %x", - dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED)); - dev_info(dev, "CPDMA: rxintmaskset: %x", - dma_reg_read(ctlr, CPDMA_RXINTMASKSET)); - dev_info(dev, "CPDMA: rxintmaskclear: %x", - dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR)); - dev_info(dev, "CPDMA: dmaintstatraw: %x", - dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW)); - dev_info(dev, "CPDMA: dmaintstatmasked: %x", - dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED)); - dev_info(dev, "CPDMA: dmaintmaskset: %x", - dma_reg_read(ctlr, CPDMA_DMAINTMASKSET)); - dev_info(dev, "CPDMA: dmaintmaskclear: %x", - dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR)); - - if (!ctlr->params.has_ext_regs) { - dev_info(dev, "CPDMA: dmacontrol: %x", - dma_reg_read(ctlr, CPDMA_DMACONTROL)); - dev_info(dev, "CPDMA: dmastatus: %x", - dma_reg_read(ctlr, CPDMA_DMASTATUS)); - dev_info(dev, "CPDMA: rxbuffofs: %x", - dma_reg_read(ctlr, CPDMA_RXBUFFOFS)); - } - - for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) - if (ctlr->channels[i]) - cpdma_chan_dump(ctlr->channels[i]); - - spin_unlock_irqrestore(&ctlr->lock, flags); - return 0; -} -EXPORT_SYMBOL_GPL(cpdma_ctlr_dump); - int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) { - unsigned long flags; int ret = 0, i; if (!ctlr) return -EINVAL; - spin_lock_irqsave(&ctlr->lock, flags); if (ctlr->state != CPDMA_STATE_IDLE) cpdma_ctlr_stop(ctlr); @@ -456,7 +369,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) cpdma_chan_destroy(ctlr->channels[i]); cpdma_desc_pool_destroy(ctlr->pool); - spin_unlock_irqrestore(&ctlr->lock, flags); return ret; } EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); @@ -516,6 +428,7 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, chan->state = CPDMA_STATE_IDLE; chan->chan_num = chan_num; chan->handler = handler; + chan->desc_num = ctlr->pool->num_desc / 2; if (is_rx_chan(chan)) { chan->hdp = ctlr->params.rxhdp + offset; @@ -543,6 +456,12 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, } EXPORT_SYMBOL_GPL(cpdma_chan_create); +int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr) +{ + return ctlr->pool->num_desc / 2; +} +EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num); + int cpdma_chan_destroy(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr; @@ -574,54 +493,6 @@ int cpdma_chan_get_stats(struct cpdma_chan *chan, } EXPORT_SYMBOL_GPL(cpdma_chan_get_stats); -int cpdma_chan_dump(struct cpdma_chan *chan) -{ - unsigned long flags; - struct device *dev = chan->ctlr->dev; - - spin_lock_irqsave(&chan->lock, flags); - - dev_info(dev, "channel %d (%s %d) state %s", - chan->chan_num, is_rx_chan(chan) ? "rx" : "tx", - chan_linear(chan), cpdma_state_str[chan->state]); - dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp)); - dev_info(dev, "\tcp: %x\n", chan_read(chan, cp)); - if (chan->rxfree) { - dev_info(dev, "\trxfree: %x\n", - chan_read(chan, rxfree)); - } - - dev_info(dev, "\tstats head_enqueue: %d\n", - chan->stats.head_enqueue); - dev_info(dev, "\tstats tail_enqueue: %d\n", - chan->stats.tail_enqueue); - dev_info(dev, "\tstats pad_enqueue: %d\n", - chan->stats.pad_enqueue); - dev_info(dev, "\tstats misqueued: %d\n", - chan->stats.misqueued); - dev_info(dev, "\tstats desc_alloc_fail: %d\n", - chan->stats.desc_alloc_fail); - dev_info(dev, "\tstats pad_alloc_fail: %d\n", - chan->stats.pad_alloc_fail); - dev_info(dev, "\tstats runt_receive_buff: %d\n", - chan->stats.runt_receive_buff); - dev_info(dev, "\tstats runt_transmit_buff: %d\n", - chan->stats.runt_transmit_buff); - dev_info(dev, "\tstats empty_dequeue: %d\n", - chan->stats.empty_dequeue); - dev_info(dev, "\tstats busy_dequeue: %d\n", - chan->stats.busy_dequeue); - dev_info(dev, "\tstats good_dequeue: %d\n", - chan->stats.good_dequeue); - dev_info(dev, "\tstats requeue: %d\n", - chan->stats.requeue); - dev_info(dev, "\tstats teardown_dequeue: %d\n", - chan->stats.teardown_dequeue); - - spin_unlock_irqrestore(&chan->lock, flags); - return 0; -} - static void __cpdma_chan_submit(struct cpdma_chan *chan, struct cpdma_desc __iomem *desc) { @@ -675,7 +546,13 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, goto unlock_ret; } - desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); + if (chan->count >= chan->desc_num) { + chan->stats.desc_alloc_fail++; + ret = -ENOMEM; + goto unlock_ret; + } + + desc = cpdma_desc_alloc(ctlr->pool); if (!desc) { chan->stats.desc_alloc_fail++; ret = -ENOMEM; @@ -721,24 +598,16 @@ EXPORT_SYMBOL_GPL(cpdma_chan_submit); bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) { - unsigned long flags; - int index; - bool ret; struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc_pool *pool = ctlr->pool; + bool free_tx_desc; + unsigned long flags; - spin_lock_irqsave(&pool->lock, flags); - - index = bitmap_find_next_zero_area(pool->bitmap, - pool->num_desc, pool->num_desc/2, 1, 0); - - if (index < pool->num_desc) - ret = true; - else - ret = false; - - spin_unlock_irqrestore(&pool->lock, flags); - return ret; + spin_lock_irqsave(&chan->lock, flags); + free_tx_desc = (chan->count < chan->desc_num) && + gen_pool_avail(pool->gen_pool); + spin_unlock_irqrestore(&chan->lock, flags); + return free_tx_desc; } EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h index 86dee487f..4b46cd6e9 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.h +++ b/drivers/net/ethernet/ti/davinci_cpdma.h @@ -77,14 +77,13 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params); int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr); int cpdma_ctlr_start(struct cpdma_ctlr *ctlr); int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr); -int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr); struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, cpdma_handler_fn handler); +int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr); int cpdma_chan_destroy(struct cpdma_chan *chan); int cpdma_chan_start(struct cpdma_chan *chan); int cpdma_chan_stop(struct cpdma_chan *chan); -int cpdma_chan_dump(struct cpdma_chan *chan); int cpdma_chan_get_stats(struct cpdma_chan *chan, struct cpdma_chan_stats *stats); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index f56d66e6e..727a79f3c 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -348,7 +348,6 @@ struct emac_priv { u32 rx_addr_type; const char *phy_id; struct device_node *phy_node; - struct phy_device *phydev; spinlock_t lock; /*platform specific members*/ void (*int_enable) (void); @@ -379,97 +378,6 @@ static char *emac_rxhost_errcodes[16] = { #define emac_ctrl_read(reg) ioread32((priv->ctrl_base + (reg))) #define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg))) -/** - * emac_dump_regs - Dump important EMAC registers to debug terminal - * @priv: The DaVinci EMAC private adapter structure - * - * Executes ethtool set cmd & sets phy mode - * - */ -static void emac_dump_regs(struct emac_priv *priv) -{ - struct device *emac_dev = &priv->ndev->dev; - - /* Print important registers in EMAC */ - dev_info(emac_dev, "EMAC Basic registers\n"); - if (priv->version == EMAC_VERSION_1) { - dev_info(emac_dev, "EMAC: EWCTL: %08X, EWINTTCNT: %08X\n", - emac_ctrl_read(EMAC_CTRL_EWCTL), - emac_ctrl_read(EMAC_CTRL_EWINTTCNT)); - } - dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n", - emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL)); - dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\ - "RXMaxLen=%08X\n", emac_read(EMAC_RXMBPENABLE), - emac_read(EMAC_RXUNICASTSET), emac_read(EMAC_RXMAXLEN)); - dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\ - "MacConfig=%08X\n", emac_read(EMAC_MACCONTROL), - emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG)); - dev_info(emac_dev, "EMAC Statistics\n"); - dev_info(emac_dev, "EMAC: rx_good_frames:%d\n", - emac_read(EMAC_RXGOODFRAMES)); - dev_info(emac_dev, "EMAC: rx_broadcast_frames:%d\n", - emac_read(EMAC_RXBCASTFRAMES)); - dev_info(emac_dev, "EMAC: rx_multicast_frames:%d\n", - emac_read(EMAC_RXMCASTFRAMES)); - dev_info(emac_dev, "EMAC: rx_pause_frames:%d\n", - emac_read(EMAC_RXPAUSEFRAMES)); - dev_info(emac_dev, "EMAC: rx_crcerrors:%d\n", - emac_read(EMAC_RXCRCERRORS)); - dev_info(emac_dev, "EMAC: rx_align_code_errors:%d\n", - emac_read(EMAC_RXALIGNCODEERRORS)); - dev_info(emac_dev, "EMAC: rx_oversized_frames:%d\n", - emac_read(EMAC_RXOVERSIZED)); - dev_info(emac_dev, "EMAC: rx_jabber_frames:%d\n", - emac_read(EMAC_RXJABBER)); - dev_info(emac_dev, "EMAC: rx_undersized_frames:%d\n", - emac_read(EMAC_RXUNDERSIZED)); - dev_info(emac_dev, "EMAC: rx_fragments:%d\n", - emac_read(EMAC_RXFRAGMENTS)); - dev_info(emac_dev, "EMAC: rx_filtered_frames:%d\n", - emac_read(EMAC_RXFILTERED)); - dev_info(emac_dev, "EMAC: rx_qos_filtered_frames:%d\n", - emac_read(EMAC_RXQOSFILTERED)); - dev_info(emac_dev, "EMAC: rx_octets:%d\n", - emac_read(EMAC_RXOCTETS)); - dev_info(emac_dev, "EMAC: tx_goodframes:%d\n", - emac_read(EMAC_TXGOODFRAMES)); - dev_info(emac_dev, "EMAC: tx_bcastframes:%d\n", - emac_read(EMAC_TXBCASTFRAMES)); - dev_info(emac_dev, "EMAC: tx_mcastframes:%d\n", - emac_read(EMAC_TXMCASTFRAMES)); - dev_info(emac_dev, "EMAC: tx_pause_frames:%d\n", - emac_read(EMAC_TXPAUSEFRAMES)); - dev_info(emac_dev, "EMAC: tx_deferred_frames:%d\n", - emac_read(EMAC_TXDEFERRED)); - dev_info(emac_dev, "EMAC: tx_collision_frames:%d\n", - emac_read(EMAC_TXCOLLISION)); - dev_info(emac_dev, "EMAC: tx_single_coll_frames:%d\n", - emac_read(EMAC_TXSINGLECOLL)); - dev_info(emac_dev, "EMAC: tx_mult_coll_frames:%d\n", - emac_read(EMAC_TXMULTICOLL)); - dev_info(emac_dev, "EMAC: tx_excessive_collisions:%d\n", - emac_read(EMAC_TXEXCESSIVECOLL)); - dev_info(emac_dev, "EMAC: tx_late_collisions:%d\n", - emac_read(EMAC_TXLATECOLL)); - dev_info(emac_dev, "EMAC: tx_underrun:%d\n", - emac_read(EMAC_TXUNDERRUN)); - dev_info(emac_dev, "EMAC: tx_carrier_sense_errors:%d\n", - emac_read(EMAC_TXCARRIERSENSE)); - dev_info(emac_dev, "EMAC: tx_octets:%d\n", - emac_read(EMAC_TXOCTETS)); - dev_info(emac_dev, "EMAC: net_octets:%d\n", - emac_read(EMAC_NETOCTETS)); - dev_info(emac_dev, "EMAC: rx_sof_overruns:%d\n", - emac_read(EMAC_RXSOFOVERRUNS)); - dev_info(emac_dev, "EMAC: rx_mof_overruns:%d\n", - emac_read(EMAC_RXMOFOVERRUNS)); - dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n", - emac_read(EMAC_RXDMAOVERRUNS)); - - cpdma_ctlr_dump(priv->dma); -} - /** * emac_get_drvinfo - Get EMAC driver information * @ndev: The DaVinci EMAC network adapter @@ -485,43 +393,6 @@ static void emac_get_drvinfo(struct net_device *ndev, strlcpy(info->version, EMAC_MODULE_VERSION, sizeof(info->version)); } -/** - * emac_get_settings - Get EMAC settings - * @ndev: The DaVinci EMAC network adapter - * @ecmd: ethtool command - * - * Executes ethool get command - * - */ -static int emac_get_settings(struct net_device *ndev, - struct ethtool_cmd *ecmd) -{ - struct emac_priv *priv = netdev_priv(ndev); - if (priv->phydev) - return phy_ethtool_gset(priv->phydev, ecmd); - else - return -EOPNOTSUPP; - -} - -/** - * emac_set_settings - Set EMAC settings - * @ndev: The DaVinci EMAC network adapter - * @ecmd: ethtool command - * - * Executes ethool set command - * - */ -static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) -{ - struct emac_priv *priv = netdev_priv(ndev); - if (priv->phydev) - return phy_ethtool_sset(priv->phydev, ecmd); - else - return -EOPNOTSUPP; - -} - /** * emac_get_coalesce - Get interrupt coalesce settings for this device * @ndev : The DaVinci EMAC network adapter @@ -625,12 +496,12 @@ static int emac_set_coalesce(struct net_device *ndev, */ static const struct ethtool_ops ethtool_ops = { .get_drvinfo = emac_get_drvinfo, - .get_settings = emac_get_settings, - .set_settings = emac_set_settings, .get_link = ethtool_op_get_link, .get_coalesce = emac_get_coalesce, .set_coalesce = emac_set_coalesce, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /** @@ -651,8 +522,8 @@ static void emac_update_phystatus(struct emac_priv *priv) mac_control = emac_read(EMAC_MACCONTROL); cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ? DUPLEX_FULL : DUPLEX_HALF; - if (priv->phydev) - new_duplex = priv->phydev->duplex; + if (ndev->phydev) + new_duplex = ndev->phydev->duplex; else new_duplex = DUPLEX_FULL; @@ -1134,8 +1005,6 @@ static void emac_dev_tx_timeout(struct net_device *ndev) if (netif_msg_tx_err(priv)) dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX"); - emac_dump_regs(priv); - ndev->stats.tx_errors++; emac_int_disable(priv); cpdma_chan_stop(priv->txchan); @@ -1454,7 +1323,7 @@ static void emac_poll_controller(struct net_device *ndev) static void emac_adjust_link(struct net_device *ndev) { struct emac_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = ndev->phydev; unsigned long flags; int new_state = 0; @@ -1483,7 +1352,7 @@ static void emac_adjust_link(struct net_device *ndev) } if (new_state) { emac_update_phystatus(priv); - phy_print_status(priv->phydev); + phy_print_status(ndev->phydev); } spin_unlock_irqrestore(&priv->lock, flags); @@ -1505,15 +1374,13 @@ static void emac_adjust_link(struct net_device *ndev) */ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd) { - struct emac_priv *priv = netdev_priv(ndev); - if (!(netif_running(ndev))) return -EINVAL; /* TODO: Add phy read and write and private statistics get feature */ - if (priv->phydev) - return phy_mii_ioctl(priv->phydev, ifrq, cmd); + if (ndev->phydev) + return phy_mii_ioctl(ndev->phydev, ifrq, cmd); else return -EOPNOTSUPP; } @@ -1542,6 +1409,7 @@ static int emac_dev_open(struct net_device *ndev) int res_num = 0, irq_num = 0; int i = 0; struct emac_priv *priv = netdev_priv(ndev); + struct phy_device *phydev = NULL; ret = pm_runtime_get_sync(&priv->pdev->dev); if (ret < 0) { @@ -1607,12 +1475,10 @@ static int emac_dev_open(struct net_device *ndev) cpdma_ctlr_start(priv->dma); - priv->phydev = NULL; - if (priv->phy_node) { - priv->phydev = of_phy_connect(ndev, priv->phy_node, - &emac_adjust_link, 0, 0); - if (!priv->phydev) { + phydev = of_phy_connect(ndev, priv->phy_node, + &emac_adjust_link, 0, 0); + if (!phydev) { dev_err(emac_dev, "could not connect to phy %s\n", priv->phy_node->full_name); ret = -ENODEV; @@ -1621,7 +1487,7 @@ static int emac_dev_open(struct net_device *ndev) } /* use the first phy on the bus if pdata did not give us a phy id */ - if (!priv->phydev && !priv->phy_id) { + if (!phydev && !priv->phy_id) { struct device *phy; phy = bus_find_device(&mdio_bus_type, NULL, NULL, @@ -1630,16 +1496,15 @@ static int emac_dev_open(struct net_device *ndev) priv->phy_id = dev_name(phy); } - if (!priv->phydev && priv->phy_id && *priv->phy_id) { - priv->phydev = phy_connect(ndev, priv->phy_id, - &emac_adjust_link, - PHY_INTERFACE_MODE_MII); + if (!phydev && priv->phy_id && *priv->phy_id) { + phydev = phy_connect(ndev, priv->phy_id, + &emac_adjust_link, + PHY_INTERFACE_MODE_MII); - if (IS_ERR(priv->phydev)) { + if (IS_ERR(phydev)) { dev_err(emac_dev, "could not connect to phy %s\n", priv->phy_id); - ret = PTR_ERR(priv->phydev); - priv->phydev = NULL; + ret = PTR_ERR(phydev); goto err; } @@ -1647,10 +1512,10 @@ static int emac_dev_open(struct net_device *ndev) priv->speed = 0; priv->duplex = ~0; - phy_attached_info(priv->phydev); + phy_attached_info(phydev); } - if (!priv->phydev) { + if (!phydev) { /* No PHY , fix the link, speed and duplex settings */ dev_notice(emac_dev, "no phy, defaulting to 100/full\n"); priv->link = 1; @@ -1659,14 +1524,11 @@ static int emac_dev_open(struct net_device *ndev) emac_update_phystatus(priv); } - if (!netif_running(ndev)) /* debug only - to avoid compiler warning */ - emac_dump_regs(priv); - if (netif_msg_drv(priv)) dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name); - if (priv->phydev) - phy_start(priv->phydev); + if (phydev) + phy_start(phydev); return 0; @@ -1717,8 +1579,8 @@ static int emac_dev_stop(struct net_device *ndev) cpdma_ctlr_stop(priv->dma); emac_write(EMAC_SOFTRESET, 1); - if (priv->phydev) - phy_disconnect(priv->phydev); + if (ndev->phydev) + phy_disconnect(ndev->phydev); /* Free IRQ */ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) { @@ -2102,6 +1964,7 @@ static int davinci_emac_remove(struct platform_device *pdev) cpdma_ctlr_destroy(priv->dma); unregister_netdev(ndev); + of_node_put(priv->phy_node); pm_runtime_disable(&pdev->dev); free_netdev(ndev); diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 4e7c9b9b0..33df340db 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -53,6 +53,10 @@ #define DEF_OUT_FREQ 2200000 /* 2.2 MHz */ +struct davinci_mdio_of_param { + int autosuspend_delay_ms; +}; + struct davinci_mdio_regs { u32 version; u32 control; @@ -90,19 +94,19 @@ static const struct mdio_platform_data default_pdata = { struct davinci_mdio_data { struct mdio_platform_data pdata; struct davinci_mdio_regs __iomem *regs; - spinlock_t lock; struct clk *clk; struct device *dev; struct mii_bus *bus; - bool suspended; + bool active_in_suspend; unsigned long access_time; /* jiffies */ /* Indicates that driver shouldn't modify phy_mask in case * if MDIO bus is registered from DT. */ bool skip_scan; + u32 clk_div; }; -static void __davinci_mdio_reset(struct davinci_mdio_data *data) +static void davinci_mdio_init_clk(struct davinci_mdio_data *data) { u32 mdio_in, div, mdio_out_khz, access_time; @@ -111,9 +115,7 @@ static void __davinci_mdio_reset(struct davinci_mdio_data *data) if (div > CONTROL_MAX_DIV) div = CONTROL_MAX_DIV; - /* set enable and clock divider */ - __raw_writel(div | CONTROL_ENABLE, &data->regs->control); - + data->clk_div = div; /* * One mdio transaction consists of: * 32 bits of preamble @@ -134,12 +136,23 @@ static void __davinci_mdio_reset(struct davinci_mdio_data *data) data->access_time = 1; } +static void davinci_mdio_enable(struct davinci_mdio_data *data) +{ + /* set enable and clock divider */ + __raw_writel(data->clk_div | CONTROL_ENABLE, &data->regs->control); +} + static int davinci_mdio_reset(struct mii_bus *bus) { struct davinci_mdio_data *data = bus->priv; u32 phy_mask, ver; + int ret; - __davinci_mdio_reset(data); + ret = pm_runtime_get_sync(data->dev); + if (ret < 0) { + pm_runtime_put_noidle(data->dev); + return ret; + } /* wait for scan logic to settle */ msleep(PHY_MAX_ADDR * data->access_time); @@ -150,7 +163,7 @@ static int davinci_mdio_reset(struct mii_bus *bus) (ver >> 8) & 0xff, ver & 0xff); if (data->skip_scan) - return 0; + goto done; /* get phy mask from the alive register */ phy_mask = __raw_readl(&data->regs->alive); @@ -165,6 +178,10 @@ static int davinci_mdio_reset(struct mii_bus *bus) } data->bus->phy_mask = phy_mask; +done: + pm_runtime_mark_last_busy(data->dev); + pm_runtime_put_autosuspend(data->dev); + return 0; } @@ -190,7 +207,7 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data) * operation */ dev_warn(data->dev, "resetting idled controller\n"); - __davinci_mdio_reset(data); + davinci_mdio_enable(data); return -EAGAIN; } @@ -225,11 +242,10 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) return -EINVAL; - spin_lock(&data->lock); - - if (data->suspended) { - spin_unlock(&data->lock); - return -ENODEV; + ret = pm_runtime_get_sync(data->dev); + if (ret < 0) { + pm_runtime_put_noidle(data->dev); + return ret; } reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) | @@ -255,8 +271,8 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) break; } - spin_unlock(&data->lock); - + pm_runtime_mark_last_busy(data->dev); + pm_runtime_put_autosuspend(data->dev); return ret; } @@ -270,11 +286,10 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) return -EINVAL; - spin_lock(&data->lock); - - if (data->suspended) { - spin_unlock(&data->lock); - return -ENODEV; + ret = pm_runtime_get_sync(data->dev); + if (ret < 0) { + pm_runtime_put_noidle(data->dev); + return ret; } reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) | @@ -295,9 +310,10 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, break; } - spin_unlock(&data->lock); + pm_runtime_mark_last_busy(data->dev); + pm_runtime_put_autosuspend(data->dev); - return 0; + return ret; } #if IS_ENABLED(CONFIG_OF) @@ -320,6 +336,19 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data, } #endif +#if IS_ENABLED(CONFIG_OF) +static const struct davinci_mdio_of_param of_cpsw_mdio_data = { + .autosuspend_delay_ms = 100, +}; + +static const struct of_device_id davinci_mdio_of_mtable[] = { + { .compatible = "ti,davinci_mdio", }, + { .compatible = "ti,cpsw-mdio", .data = &of_cpsw_mdio_data}, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable); +#endif + static int davinci_mdio_probe(struct platform_device *pdev) { struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev); @@ -328,6 +357,7 @@ static int davinci_mdio_probe(struct platform_device *pdev) struct resource *res; struct phy_device *phy; int ret, addr; + int autosuspend_delay_ms = -1; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) @@ -340,9 +370,22 @@ static int davinci_mdio_probe(struct platform_device *pdev) } if (dev->of_node) { - if (davinci_mdio_probe_dt(&data->pdata, pdev)) - data->pdata = default_pdata; + const struct of_device_id *of_id; + + ret = davinci_mdio_probe_dt(&data->pdata, pdev); + if (ret) + return ret; snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); + + of_id = of_match_device(davinci_mdio_of_mtable, &pdev->dev); + if (of_id) { + const struct davinci_mdio_of_param *of_mdio_data; + + of_mdio_data = of_id->data; + if (of_mdio_data) + autosuspend_delay_ms = + of_mdio_data->autosuspend_delay_ms; + } } else { data->pdata = pdata ? (*pdata) : default_pdata; snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x", @@ -356,26 +399,25 @@ static int davinci_mdio_probe(struct platform_device *pdev) data->bus->parent = dev; data->bus->priv = data; - pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); data->clk = devm_clk_get(dev, "fck"); if (IS_ERR(data->clk)) { dev_err(dev, "failed to get device clock\n"); - ret = PTR_ERR(data->clk); - data->clk = NULL; - goto bail_out; + return PTR_ERR(data->clk); } dev_set_drvdata(dev, data); data->dev = dev; - spin_lock_init(&data->lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); data->regs = devm_ioremap_resource(dev, res); - if (IS_ERR(data->regs)) { - ret = PTR_ERR(data->regs); - goto bail_out; - } + if (IS_ERR(data->regs)) + return PTR_ERR(data->regs); + + davinci_mdio_init_clk(data); + + pm_runtime_set_autosuspend_delay(&pdev->dev, autosuspend_delay_ms); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); /* register the mii bus * Create PHYs from DT only in case if PHY child nodes are explicitly @@ -404,9 +446,8 @@ static int davinci_mdio_probe(struct platform_device *pdev) return 0; bail_out: - pm_runtime_put_sync(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_disable(&pdev->dev); - return ret; } @@ -417,29 +458,47 @@ static int davinci_mdio_remove(struct platform_device *pdev) if (data->bus) mdiobus_unregister(data->bus); - pm_runtime_put_sync(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } -#ifdef CONFIG_PM_SLEEP -static int davinci_mdio_suspend(struct device *dev) +#ifdef CONFIG_PM +static int davinci_mdio_runtime_suspend(struct device *dev) { struct davinci_mdio_data *data = dev_get_drvdata(dev); u32 ctrl; - spin_lock(&data->lock); - /* shutdown the scan state machine */ ctrl = __raw_readl(&data->regs->control); ctrl &= ~CONTROL_ENABLE; __raw_writel(ctrl, &data->regs->control); wait_for_idle(data); - data->suspended = true; - spin_unlock(&data->lock); - pm_runtime_put_sync(data->dev); + return 0; +} + +static int davinci_mdio_runtime_resume(struct device *dev) +{ + struct davinci_mdio_data *data = dev_get_drvdata(dev); + + davinci_mdio_enable(data); + return 0; +} +#endif + +#ifdef CONFIG_PM_SLEEP +static int davinci_mdio_suspend(struct device *dev) +{ + struct davinci_mdio_data *data = dev_get_drvdata(dev); + int ret = 0; + + data->active_in_suspend = !pm_runtime_status_suspended(dev); + if (data->active_in_suspend) + ret = pm_runtime_force_suspend(dev); + if (ret < 0) + return ret; /* Select sleep pin state */ pinctrl_pm_select_sleep_state(dev); @@ -454,31 +513,19 @@ static int davinci_mdio_resume(struct device *dev) /* Select default pin state */ pinctrl_pm_select_default_state(dev); - pm_runtime_get_sync(data->dev); - - spin_lock(&data->lock); - /* restart the scan state machine */ - __davinci_mdio_reset(data); - - data->suspended = false; - spin_unlock(&data->lock); + if (data->active_in_suspend) + pm_runtime_force_resume(dev); return 0; } #endif static const struct dev_pm_ops davinci_mdio_pm_ops = { + SET_RUNTIME_PM_OPS(davinci_mdio_runtime_suspend, + davinci_mdio_runtime_resume, NULL) SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume) }; -#if IS_ENABLED(CONFIG_OF) -static const struct of_device_id davinci_mdio_of_mtable[] = { - { .compatible = "ti,davinci_mdio", }, - { /* sentinel */ }, -}; -MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable); -#endif - static struct platform_driver davinci_mdio_driver = { .driver = { .name = "davinci_mdio", diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 561703317..ece0ea0f6 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -1651,7 +1651,6 @@ static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int) dma_addr_t head_list_phys; u32 ack = 1; - host_int = 0; if (priv->tlan_rev < 0x30) { TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n", diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 922a443e3..4ef605a90 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -588,7 +588,7 @@ static bool tile_net_lepp_free_comps(struct net_device *dev, bool all) static void tile_net_schedule_egress_timer(struct tile_net_cpu *info) { if (!info->egress_timer_scheduled) { - mod_timer_pinned(&info->egress_timer, jiffies + 1); + mod_timer(&info->egress_timer, jiffies + 1); info->egress_timer_scheduled = true; } } @@ -1004,7 +1004,7 @@ static void tile_net_register(void *dev_ptr) BUG(); /* Initialize the egress timer. */ - init_timer(&info->egress_timer); + init_timer_pinned(&info->egress_timer); info->egress_timer.data = (long)info; info->egress_timer.function = tile_net_handle_egress_timer; diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 548747834..5b01b3fa9 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -280,7 +280,7 @@ struct tc35815_regs { * Descriptors */ -/* Frame descripter */ +/* Frame descriptor */ struct FDesc { volatile __u32 FDNext; volatile __u32 FDSystem; @@ -288,7 +288,7 @@ struct FDesc { volatile __u32 FDCtl; }; -/* Buffer descripter */ +/* Buffer descriptor */ struct BDesc { volatile __u32 BuffData; volatile __u32 BDCtl; @@ -296,7 +296,7 @@ struct BDesc { #define FD_ALIGN 16 -/* Frame Descripter bit assign ---------------------------------------------- */ +/* Frame Descriptor bit assign ---------------------------------------------- */ #define FD_FDLength_MASK 0x0000FFFF /* Length MASK */ #define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */ #define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */ @@ -309,7 +309,7 @@ struct BDesc { #define FD_Next_EOL 0x00000001 /* FD EOL indicator */ #define FD_BDCnt_SHIFT 16 -/* Buffer Descripter bit assign --------------------------------------------- */ +/* Buffer Descriptor bit assign --------------------------------------------- */ #define BD_BuffLength_MASK 0x0000FFFF /* Receive Data Size */ #define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */ #define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */ @@ -405,7 +405,6 @@ struct tc35815_local { spinlock_t rx_lock; struct mii_bus *mii_bus; - struct phy_device *phy_dev; int duplex; int speed; int link; @@ -539,7 +538,7 @@ static int tc_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 val) static void tc_handle_link_change(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); - struct phy_device *phydev = lp->phy_dev; + struct phy_device *phydev = dev->phydev; unsigned long flags; int status_change = 0; @@ -645,7 +644,6 @@ static int tc_mii_probe(struct net_device *dev) lp->link = 0; lp->speed = 0; lp->duplex = -1; - lp->phy_dev = phydev; return 0; } @@ -853,7 +851,7 @@ static void tc35815_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct tc35815_local *lp = netdev_priv(dev); - phy_disconnect(lp->phy_dev); + phy_disconnect(dev->phydev); mdiobus_unregister(lp->mii_bus); mdiobus_free(lp->mii_bus); unregister_netdev(dev); @@ -1143,8 +1141,8 @@ static void tc35815_restart(struct net_device *dev) struct tc35815_local *lp = netdev_priv(dev); int ret; - if (lp->phy_dev) { - ret = phy_init_hw(lp->phy_dev); + if (dev->phydev) { + ret = phy_init_hw(dev->phydev); if (ret) printk(KERN_ERR "%s: PHY init failed.\n", dev->name); } @@ -1236,7 +1234,7 @@ tc35815_open(struct net_device *dev) netif_carrier_off(dev); /* schedule a link state check */ - phy_start(lp->phy_dev); + phy_start(dev->phydev); /* We are now ready to accept transmit requeusts from * the queueing layer of the networking. @@ -1387,7 +1385,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit) if (status & Int_IntExBD) { if (netif_msg_rx_err(lp)) dev_warn(&dev->dev, - "Excessive Buffer Descriptiors (%#x).\n", + "Excessive Buffer Descriptors (%#x).\n", status); dev->stats.rx_length_errors++; ret = 0; @@ -1819,8 +1817,8 @@ tc35815_close(struct net_device *dev) netif_stop_queue(dev); napi_disable(&lp->napi); - if (lp->phy_dev) - phy_stop(lp->phy_dev); + if (dev->phydev) + phy_stop(dev->phydev); cancel_work_sync(&lp->restart_work); /* Flush the Tx and disable Rx here. */ @@ -1946,24 +1944,6 @@ static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo * strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info)); } -static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct tc35815_local *lp = netdev_priv(dev); - - if (!lp->phy_dev) - return -ENODEV; - return phy_ethtool_gset(lp->phy_dev, cmd); -} - -static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct tc35815_local *lp = netdev_priv(dev); - - if (!lp->phy_dev) - return -ENODEV; - return phy_ethtool_sset(lp->phy_dev, cmd); -} - static u32 tc35815_get_msglevel(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); @@ -2013,25 +1993,23 @@ static void tc35815_get_strings(struct net_device *dev, u32 stringset, u8 *data) static const struct ethtool_ops tc35815_ethtool_ops = { .get_drvinfo = tc35815_get_drvinfo, - .get_settings = tc35815_get_settings, - .set_settings = tc35815_set_settings, .get_link = ethtool_op_get_link, .get_msglevel = tc35815_get_msglevel, .set_msglevel = tc35815_set_msglevel, .get_strings = tc35815_get_strings, .get_sset_count = tc35815_get_sset_count, .get_ethtool_stats = tc35815_get_ethtool_stats, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct tc35815_local *lp = netdev_priv(dev); - if (!netif_running(dev)) return -EINVAL; - if (!lp->phy_dev) + if (!dev->phydev) return -ENODEV; - return phy_mii_ioctl(lp->phy_dev, rq, cmd); + return phy_mii_ioctl(dev->phydev, rq, cmd); } static void tc35815_chip_reset(struct net_device *dev) @@ -2116,7 +2094,7 @@ static void tc35815_chip_init(struct net_device *dev) if (lp->chiptype == TC35815_TX4939) txctl &= ~Tx_EnLCarr; /* WORKAROUND: ignore LostCrS in full duplex operation */ - if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL) + if (!dev->phydev || !lp->link || lp->duplex == DUPLEX_FULL) txctl &= ~Tx_EnLCarr; tc_writel(txctl, &tr->Tx_Ctl); } @@ -2132,8 +2110,8 @@ static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state) if (!netif_running(dev)) return 0; netif_device_detach(dev); - if (lp->phy_dev) - phy_stop(lp->phy_dev); + if (dev->phydev) + phy_stop(dev->phydev); spin_lock_irqsave(&lp->lock, flags); tc35815_chip_reset(dev); spin_unlock_irqrestore(&lp->lock, flags); @@ -2144,7 +2122,6 @@ static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state) static int tc35815_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); - struct tc35815_local *lp = netdev_priv(dev); pci_restore_state(pdev); if (!netif_running(dev)) @@ -2152,8 +2129,8 @@ static int tc35815_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); tc35815_restart(dev); netif_carrier_off(dev); - if (lp->phy_dev) - phy_start(lp->phy_dev); + if (dev->phydev) + phy_start(dev->phydev); netif_device_attach(dev); return 0; } diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 01a77145a..8fd131207 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -166,6 +166,7 @@ static struct platform_driver tsi_eth_driver = { static void tsi108_timed_checker(unsigned long dev_ptr); +#ifdef DEBUG static void dump_eth_one(struct net_device *dev) { struct tsi108_prv_data *data = netdev_priv(dev); @@ -190,6 +191,7 @@ static void dump_eth_one(struct net_device *dev) TSI_READ(TSI108_EC_RXESTAT), TSI_READ(TSI108_EC_RXERR), data->rxpending); } +#endif /* Synchronization is needed between the thread and up/down events. * Note that the PHY is accessed through the same registers for both diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 4f6255cf6..37ab46cdb 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -1154,7 +1154,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops, if (err < 0) goto err_register; - priv->xfer_wq = create_workqueue(netdev_name(ndev)); + priv->xfer_wq = alloc_workqueue(netdev_name(ndev), WQ_MEM_RECLAIM, 0); if (!priv->xfer_wq) { err = -ENOMEM; goto err_wq; @@ -1233,7 +1233,6 @@ int w5100_remove(struct device *dev) flush_work(&priv->setrx_work); flush_work(&priv->restart_work); - flush_workqueue(priv->xfer_wq); destroy_workqueue(priv->xfer_wq); unregister_netdev(ndev); diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h index 902457e43..7d06e3e1a 100644 --- a/drivers/net/ethernet/xilinx/ll_temac.h +++ b/drivers/net/ethernet/xilinx/ll_temac.h @@ -332,7 +332,6 @@ struct temac_local { struct device *dev; /* Connection to PHY device */ - struct phy_device *phy_dev; /* Pointer to PHY device */ struct device_node *phy_node; /* MDIO bus data */ diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 739708712..a9bd665fd 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -590,7 +590,7 @@ static void temac_device_reset(struct net_device *ndev) static void temac_adjust_link(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); - struct phy_device *phy = lp->phy_dev; + struct phy_device *phy = ndev->phydev; u32 mii_speed; int link_state; @@ -843,19 +843,20 @@ static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev) static int temac_open(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); + struct phy_device *phydev = NULL; int rc; dev_dbg(&ndev->dev, "temac_open()\n"); if (lp->phy_node) { - lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, - temac_adjust_link, 0, 0); - if (!lp->phy_dev) { + phydev = of_phy_connect(lp->ndev, lp->phy_node, + temac_adjust_link, 0, 0); + if (!phydev) { dev_err(lp->dev, "of_phy_connect() failed\n"); return -ENODEV; } - phy_start(lp->phy_dev); + phy_start(phydev); } temac_device_reset(ndev); @@ -872,9 +873,8 @@ static int temac_open(struct net_device *ndev) err_rx_irq: free_irq(lp->tx_irq, ndev); err_tx_irq: - if (lp->phy_dev) - phy_disconnect(lp->phy_dev); - lp->phy_dev = NULL; + if (phydev) + phy_disconnect(phydev); dev_err(lp->dev, "request_irq() failed\n"); return rc; } @@ -882,15 +882,15 @@ static int temac_open(struct net_device *ndev) static int temac_stop(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; dev_dbg(&ndev->dev, "temac_close()\n"); free_irq(lp->tx_irq, ndev); free_irq(lp->rx_irq, ndev); - if (lp->phy_dev) - phy_disconnect(lp->phy_dev); - lp->phy_dev = NULL; + if (phydev) + phy_disconnect(phydev); temac_dma_bd_release(ndev); @@ -916,15 +916,13 @@ temac_poll_controller(struct net_device *ndev) static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) { - struct temac_local *lp = netdev_priv(ndev); - if (!netif_running(ndev)) return -EINVAL; - if (!lp->phy_dev) + if (!ndev->phydev) return -EINVAL; - return phy_mii_ioctl(lp->phy_dev, rq, cmd); + return phy_mii_ioctl(ndev->phydev, rq, cmd); } static const struct net_device_ops temac_netdev_ops = { @@ -969,30 +967,17 @@ static const struct attribute_group temac_attr_group = { }; /* ethtool support */ -static int temac_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) -{ - struct temac_local *lp = netdev_priv(ndev); - return phy_ethtool_gset(lp->phy_dev, cmd); -} - -static int temac_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) -{ - struct temac_local *lp = netdev_priv(ndev); - return phy_ethtool_sset(lp->phy_dev, cmd); -} - static int temac_nway_reset(struct net_device *ndev) { - struct temac_local *lp = netdev_priv(ndev); - return phy_start_aneg(lp->phy_dev); + return phy_start_aneg(ndev->phydev); } static const struct ethtool_ops temac_ethtool_ops = { - .get_settings = temac_get_settings, - .set_settings = temac_set_settings, .nway_reset = temac_nway_reset, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int temac_of_probe(struct platform_device *op) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 9ead4e269..af27f7d1c 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -382,7 +382,6 @@ struct axidma_bd { * struct axienet_local - axienet private per device data * @ndev: Pointer for net_device to which it will be attached. * @dev: Pointer to device structure - * @phy_dev: Pointer to PHY device structure attached to the axienet_local * @phy_node: Pointer to device node structure * @mii_bus: Pointer to MII bus structure * @regs: Base address for the axienet_local device address space @@ -420,7 +419,6 @@ struct axienet_local { struct device *dev; /* Connection to PHY device */ - struct phy_device *phy_dev; /* Pointer to PHY device */ struct device_node *phy_node; /* MDIO bus data */ diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 8c7f5be51..36ee7ab30 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -525,7 +525,7 @@ static void axienet_adjust_link(struct net_device *ndev) u32 link_state; u32 setspeed = 1; struct axienet_local *lp = netdev_priv(ndev); - struct phy_device *phy = lp->phy_dev; + struct phy_device *phy = ndev->phydev; link_state = phy->speed | (phy->duplex << 1) | phy->link; if (lp->last_link != link_state) { @@ -911,6 +911,7 @@ static int axienet_open(struct net_device *ndev) { int ret, mdio_mcreg; struct axienet_local *lp = netdev_priv(ndev); + struct phy_device *phydev = NULL; dev_dbg(&ndev->dev, "axienet_open()\n"); @@ -934,19 +935,19 @@ static int axienet_open(struct net_device *ndev) if (lp->phy_node) { if (lp->phy_type == XAE_PHY_TYPE_GMII) { - lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, - axienet_adjust_link, 0, - PHY_INTERFACE_MODE_GMII); + phydev = of_phy_connect(lp->ndev, lp->phy_node, + axienet_adjust_link, 0, + PHY_INTERFACE_MODE_GMII); } else if (lp->phy_type == XAE_PHY_TYPE_RGMII_2_0) { - lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, - axienet_adjust_link, 0, - PHY_INTERFACE_MODE_RGMII_ID); + phydev = of_phy_connect(lp->ndev, lp->phy_node, + axienet_adjust_link, 0, + PHY_INTERFACE_MODE_RGMII_ID); } - if (!lp->phy_dev) + if (!phydev) dev_err(lp->dev, "of_phy_connect() failed\n"); else - phy_start(lp->phy_dev); + phy_start(phydev); } /* Enable tasklets for Axi DMA error handling */ @@ -967,9 +968,8 @@ static int axienet_open(struct net_device *ndev) err_rx_irq: free_irq(lp->tx_irq, ndev); err_tx_irq: - if (lp->phy_dev) - phy_disconnect(lp->phy_dev); - lp->phy_dev = NULL; + if (phydev) + phy_disconnect(phydev); tasklet_kill(&lp->dma_err_tasklet); dev_err(lp->dev, "request_irq() failed\n"); return ret; @@ -1006,9 +1006,8 @@ static int axienet_stop(struct net_device *ndev) free_irq(lp->tx_irq, ndev); free_irq(lp->rx_irq, ndev); - if (lp->phy_dev) - phy_disconnect(lp->phy_dev); - lp->phy_dev = NULL; + if (ndev->phydev) + phy_disconnect(ndev->phydev); axienet_dma_bd_release(ndev); return 0; @@ -1077,51 +1076,6 @@ static const struct net_device_ops axienet_netdev_ops = { #endif }; -/** - * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY. - * @ndev: Pointer to net_device structure - * @ecmd: Pointer to ethtool_cmd structure - * - * This implements ethtool command for getting PHY settings. If PHY could - * not be found, the function returns -ENODEV. This function calls the - * relevant PHY ethtool API to get the PHY settings. - * Issue "ethtool ethX" under linux prompt to execute this function. - * - * Return: 0 on success, -ENODEV if PHY doesn't exist - */ -static int axienet_ethtools_get_settings(struct net_device *ndev, - struct ethtool_cmd *ecmd) -{ - struct axienet_local *lp = netdev_priv(ndev); - struct phy_device *phydev = lp->phy_dev; - if (!phydev) - return -ENODEV; - return phy_ethtool_gset(phydev, ecmd); -} - -/** - * axienet_ethtools_set_settings - Set PHY settings as passed in the argument. - * @ndev: Pointer to net_device structure - * @ecmd: Pointer to ethtool_cmd structure - * - * This implements ethtool command for setting various PHY settings. If PHY - * could not be found, the function returns -ENODEV. This function calls the - * relevant PHY ethtool API to set the PHY. - * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this - * function. - * - * Return: 0 on success, -ENODEV if PHY doesn't exist - */ -static int axienet_ethtools_set_settings(struct net_device *ndev, - struct ethtool_cmd *ecmd) -{ - struct axienet_local *lp = netdev_priv(ndev); - struct phy_device *phydev = lp->phy_dev; - if (!phydev) - return -ENODEV; - return phy_ethtool_sset(phydev, ecmd); -} - /** * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. * @ndev: Pointer to net_device structure @@ -1344,8 +1298,6 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev, } static struct ethtool_ops axienet_ethtool_ops = { - .get_settings = axienet_ethtools_get_settings, - .set_settings = axienet_ethtools_set_settings, .get_drvinfo = axienet_ethtools_get_drvinfo, .get_regs_len = axienet_ethtools_get_regs_len, .get_regs = axienet_ethtools_get_regs, @@ -1354,6 +1306,8 @@ static struct ethtool_ops axienet_ethtool_ops = { .set_pauseparam = axienet_ethtools_set_pauseparam, .get_coalesce = axienet_ethtools_get_coalesce, .set_coalesce = axienet_ethtools_set_coalesce, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /** @@ -1587,9 +1541,9 @@ static int axienet_probe(struct platform_device *pdev) /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); - if (IS_ERR(np)) { + if (!np) { dev_err(&pdev->dev, "could not find DMA node\n"); - ret = PTR_ERR(np); + ret = -ENODEV; goto free_netdev; } ret = of_address_to_resource(np, 0, &dmares); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 3cee84a24..93dc10b10 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1131,11 +1131,13 @@ static int xemaclite_of_probe(struct platform_device *ofdev) lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); mac_address = of_get_mac_address(ofdev->dev.of_node); - if (mac_address) + if (mac_address) { /* Set the MAC address. */ memcpy(ndev->dev_addr, mac_address, ETH_ALEN); - else - dev_warn(dev, "No MAC address found\n"); + } else { + dev_warn(dev, "No MAC address found, using random\n"); + eth_hw_addr_random(ndev); + } /* Clear the Tx CSR's in case this is a restart */ __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index 7b44968e0..ddced28e8 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1144,8 +1144,8 @@ xirc2ps_interrupt(int irq, void *dev_id) dev->stats.tx_packets += lp->last_ptr_value - n; netif_wake_queue(dev); } - if (tx_status & 0x0002) { /* Execessive collissions */ - pr_debug("tx restarted due to execssive collissions\n"); + if (tx_status & 0x0002) { /* Excessive collisions */ + pr_debug("tx restarted due to excessive collisions\n"); PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */ } if (tx_status & 0x0040) diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 513840794..7f127dc1b 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -171,7 +171,6 @@ struct port { struct npe *npe; struct net_device *netdev; struct napi_struct napi; - struct phy_device *phydev; struct eth_plat_info *plat; buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; struct desc *desc_tab; /* coherent */ @@ -562,7 +561,7 @@ static void ixp4xx_mdio_remove(void) static void ixp4xx_adjust_link(struct net_device *dev) { struct port *port = netdev_priv(dev); - struct phy_device *phydev = port->phydev; + struct phy_device *phydev = dev->phydev; if (!phydev->link) { if (port->speed) { @@ -976,8 +975,6 @@ static void eth_set_mcast_list(struct net_device *dev) static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { - struct port *port = netdev_priv(dev); - if (!netif_running(dev)) return -EINVAL; @@ -988,7 +985,7 @@ static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) return hwtstamp_get(dev, req); } - return phy_mii_ioctl(port->phydev, req, cmd); + return phy_mii_ioctl(dev->phydev, req, cmd); } /* ethtool support */ @@ -1005,22 +1002,9 @@ static void ixp4xx_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, "internal", sizeof(info->bus_info)); } -static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct port *port = netdev_priv(dev); - return phy_ethtool_gset(port->phydev, cmd); -} - -static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct port *port = netdev_priv(dev); - return phy_ethtool_sset(port->phydev, cmd); -} - static int ixp4xx_nway_reset(struct net_device *dev) { - struct port *port = netdev_priv(dev); - return phy_start_aneg(port->phydev); + return phy_start_aneg(dev->phydev); } int ixp46x_phc_index = -1; @@ -1054,11 +1038,11 @@ static int ixp4xx_get_ts_info(struct net_device *dev, static const struct ethtool_ops ixp4xx_ethtool_ops = { .get_drvinfo = ixp4xx_get_drvinfo, - .get_settings = ixp4xx_get_settings, - .set_settings = ixp4xx_set_settings, .nway_reset = ixp4xx_nway_reset, .get_link = ethtool_op_get_link, .get_ts_info = ixp4xx_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; @@ -1259,7 +1243,7 @@ static int eth_open(struct net_device *dev) } port->speed = 0; /* force "link up" message */ - phy_start(port->phydev); + phy_start(dev->phydev); for (i = 0; i < ETH_ALEN; i++) __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); @@ -1380,7 +1364,7 @@ static int eth_close(struct net_device *dev) printk(KERN_CRIT "%s: unable to disable loopback\n", dev->name); - phy_stop(port->phydev); + phy_stop(dev->phydev); if (!ports_open) qmgr_disable_irq(TXDONE_QUEUE); @@ -1405,6 +1389,7 @@ static int eth_init_one(struct platform_device *pdev) struct port *port; struct net_device *dev; struct eth_plat_info *plat = dev_get_platdata(&pdev->dev); + struct phy_device *phydev = NULL; u32 regs_phys; char phy_id[MII_BUS_ID_SIZE + 3]; int err; @@ -1466,14 +1451,14 @@ static int eth_init_one(struct platform_device *pdev) snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, mdio_bus->id, plat->phy); - port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, - PHY_INTERFACE_MODE_MII); - if (IS_ERR(port->phydev)) { - err = PTR_ERR(port->phydev); + phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, + PHY_INTERFACE_MODE_MII); + if (IS_ERR(phydev)) { + err = PTR_ERR(phydev); goto err_free_mem; } - port->phydev->irq = PHY_POLL; + phydev->irq = PHY_POLL; if ((err = register_netdev(dev))) goto err_phy_dis; @@ -1484,7 +1469,7 @@ static int eth_init_one(struct platform_device *pdev) return 0; err_phy_dis: - phy_disconnect(port->phydev); + phy_disconnect(phydev); err_free_mem: npe_port_tab[NPE_ID(port->id)] = NULL; release_resource(port->mem_res); @@ -1498,10 +1483,11 @@ err_free: static int eth_remove_one(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); + struct phy_device *phydev = dev->phydev; struct port *port = netdev_priv(dev); unregister_netdev(dev); - phy_disconnect(port->phydev); + phy_disconnect(phydev); npe_port_tab[NPE_ID(port->id)] = NULL; npe_release(port->npe); release_resource(port->mem_res); -- cgit v1.2.3-54-g00ecf