From 8d91c1e411f55d7ea91b1183a2e9f8088fb4d5be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Fabian=20Silva=20Delgado?= Date: Tue, 15 Dec 2015 14:52:16 -0300 Subject: Linux-libre 4.3.2-gnu --- drivers/net/Kconfig | 19 +- drivers/net/Makefile | 3 + drivers/net/arcnet/arcnet.c | 2 +- drivers/net/bonding/bond_3ad.c | 2 - drivers/net/bonding/bond_main.c | 10 +- drivers/net/bonding/bond_netlink.c | 17 +- drivers/net/bonding/bond_options.c | 20 +- drivers/net/bonding/bond_sysfs.c | 20 +- drivers/net/caif/caif_hsi.c | 2 +- drivers/net/caif/caif_serial.c | 2 +- drivers/net/caif/caif_spi.c | 2 +- drivers/net/can/dev.c | 2 +- drivers/net/can/flexcan.c | 2 +- drivers/net/can/sja1000/peak_pci.c | 1 + drivers/net/can/sja1000/sja1000.c | 3 + drivers/net/can/usb/gs_usb.c | 8 +- drivers/net/dsa/Kconfig | 6 +- drivers/net/dsa/mv88e6123_61_65.c | 1 + drivers/net/dsa/mv88e6131.c | 1 + drivers/net/dsa/mv88e6171.c | 12 +- drivers/net/dsa/mv88e6352.c | 115 +- drivers/net/dsa/mv88e6xxx.c | 1121 ++++++- drivers/net/dsa/mv88e6xxx.h | 91 +- drivers/net/dummy.c | 3 +- drivers/net/ethernet/3com/3c59x.c | 23 +- drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/allwinner/sun4i-emac.c | 31 +- drivers/net/ethernet/altera/altera_sgdma.c | 8 +- drivers/net/ethernet/altera/altera_sgdmahw.h | 1 + drivers/net/ethernet/altera/altera_tse.h | 1 - drivers/net/ethernet/altera/altera_tse_main.c | 1 + drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c | 4 + drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 2 +- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 8 +- drivers/net/ethernet/amd/xgbe/xgbe.h | 2 +- drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | 24 +- drivers/net/ethernet/apm/xgene/xgene_enet_hw.h | 16 +- drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 312 +- drivers/net/ethernet/apm/xgene/xgene_enet_main.h | 12 + drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c | 8 +- drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h | 2 + drivers/net/ethernet/arc/emac_arc.c | 1 + drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 10 + drivers/net/ethernet/broadcom/Kconfig | 10 + drivers/net/ethernet/broadcom/bcm63xx_enet.c | 33 +- drivers/net/ethernet/broadcom/bcmsysport.c | 19 + drivers/net/ethernet/broadcom/bgmac.c | 30 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 64 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 100 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 71 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | 12 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h | 10 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h | 10 +- .../net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 29 +- .../net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h | 6 +- .../ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h | 2 + drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | 200 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h | 4 +- .../net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h | 4 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | 254 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h | 10 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 595 +++- .../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h | 4 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h | 79 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | 337 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h | 77 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 358 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 58 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | 4 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h | 4 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 212 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h | 37 +- drivers/net/ethernet/broadcom/cnic.c | 36 +- drivers/net/ethernet/broadcom/cnic_if.h | 21 +- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 112 +- drivers/net/ethernet/broadcom/genet/bcmgenet.h | 4 +- drivers/net/ethernet/broadcom/genet/bcmmii.c | 117 +- drivers/net/ethernet/brocade/bna/bfa_ioc.c | 13 +- drivers/net/ethernet/brocade/bna/bna_tx_rx.c | 2 + drivers/net/ethernet/brocade/bna/bna_types.h | 1 + drivers/net/ethernet/brocade/bna/bnad.c | 29 +- drivers/net/ethernet/brocade/bna/bnad.h | 2 + drivers/net/ethernet/brocade/bna/bnad_ethtool.c | 4 + drivers/net/ethernet/cadence/macb.c | 6 +- drivers/net/ethernet/cadence/macb.h | 2 +- drivers/net/ethernet/cavium/Kconfig | 4 +- drivers/net/ethernet/cavium/liquidio/lio_main.c | 3 +- drivers/net/ethernet/cavium/thunder/nic.h | 93 +- drivers/net/ethernet/cavium/thunder/nic_main.c | 240 +- drivers/net/ethernet/cavium/thunder/nic_reg.h | 4 + .../net/ethernet/cavium/thunder/nicvf_ethtool.c | 182 +- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 543 ++- drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 144 +- drivers/net/ethernet/cavium/thunder/nicvf_queues.h | 44 +- drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 169 +- drivers/net/ethernet/cavium/thunder/thunder_bgx.h | 4 + drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 10 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c | 42 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 775 ++++- drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 14 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 89 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | 10 +- drivers/net/ethernet/chelsio/cxgb4/l2t.c | 94 +- drivers/net/ethernet/chelsio/cxgb4/l2t.h | 18 +- drivers/net/ethernet/chelsio/cxgb4/sge.c | 25 +- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 346 +- drivers/net/ethernet/chelsio/cxgb4/t4_hw.h | 1 - drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | 3 + drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | 23 + drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 197 +- drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 33 +- drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h | 23 +- drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 30 +- drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 3 +- drivers/net/ethernet/cisco/enic/enic.h | 21 +- drivers/net/ethernet/cisco/enic/enic_clsf.c | 2 +- drivers/net/ethernet/cisco/enic/enic_ethtool.c | 113 +- drivers/net/ethernet/cisco/enic/enic_main.c | 142 +- drivers/net/ethernet/cisco/enic/vnic_cq.c | 3 +- drivers/net/ethernet/cisco/enic/vnic_dev.c | 277 +- drivers/net/ethernet/cisco/enic/vnic_dev.h | 44 +- drivers/net/ethernet/cisco/enic/vnic_devcmd.h | 28 + drivers/net/ethernet/cisco/enic/vnic_intr.c | 3 +- drivers/net/ethernet/cisco/enic/vnic_resource.h | 7 + drivers/net/ethernet/cisco/enic/vnic_rq.c | 6 +- drivers/net/ethernet/cisco/enic/vnic_wq.c | 33 +- drivers/net/ethernet/cisco/enic/vnic_wq.h | 18 + drivers/net/ethernet/davicom/dm9000.c | 2 +- drivers/net/ethernet/ec_bhf.c | 14 +- drivers/net/ethernet/emulex/benet/be.h | 9 +- drivers/net/ethernet/emulex/benet/be_cmds.c | 100 +- drivers/net/ethernet/emulex/benet/be_cmds.h | 21 +- drivers/net/ethernet/emulex/benet/be_ethtool.c | 17 +- drivers/net/ethernet/emulex/benet/be_main.c | 107 +- drivers/net/ethernet/ethoc.c | 7 +- drivers/net/ethernet/ezchip/nps_enet.c | 37 +- drivers/net/ethernet/ezchip/nps_enet.h | 20 - drivers/net/ethernet/freescale/fec_main.c | 53 +- drivers/net/ethernet/freescale/fec_ptp.c | 6 - drivers/net/ethernet/freescale/fsl_pq_mdio.c | 34 +- drivers/net/ethernet/freescale/gianfar.c | 525 +-- drivers/net/ethernet/freescale/gianfar.h | 77 +- drivers/net/ethernet/freescale/gianfar_ethtool.c | 8 +- drivers/net/ethernet/freescale/gianfar_ptp.c | 1 + drivers/net/ethernet/freescale/ucc_geth.c | 8 +- drivers/net/ethernet/hisilicon/hip04_eth.c | 3 +- drivers/net/ethernet/hisilicon/hip04_mdio.c | 1 - drivers/net/ethernet/ibm/ibmveth.c | 145 +- drivers/net/ethernet/ibm/ibmveth.h | 18 +- drivers/net/ethernet/intel/e100.c | 12 +- drivers/net/ethernet/intel/e1000e/ich8lan.h | 4 +- drivers/net/ethernet/intel/e1000e/netdev.c | 58 +- drivers/net/ethernet/intel/e1000e/regs.h | 5 +- drivers/net/ethernet/intel/i40e/i40e.h | 74 +- drivers/net/ethernet/intel/i40e/i40e_adminq.c | 13 +- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 72 +- drivers/net/ethernet/intel/i40e/i40e_common.c | 407 ++- drivers/net/ethernet/intel/i40e/i40e_dcb.c | 4 + drivers/net/ethernet/intel/i40e/i40e_dcb.h | 8 +- drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 10 +- drivers/net/ethernet/intel/i40e/i40e_diag.c | 11 +- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 157 +- drivers/net/ethernet/intel/i40e/i40e_fcoe.c | 12 +- drivers/net/ethernet/intel/i40e/i40e_fcoe.h | 4 +- drivers/net/ethernet/intel/i40e/i40e_hmc.c | 67 +- drivers/net/ethernet/intel/i40e/i40e_hmc.h | 10 +- drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c | 18 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 790 +++-- drivers/net/ethernet/intel/i40e/i40e_nvm.c | 135 +- drivers/net/ethernet/intel/i40e/i40e_prototype.h | 13 + drivers/net/ethernet/intel/i40e/i40e_ptp.c | 7 +- drivers/net/ethernet/intel/i40e/i40e_register.h | 1938 ++++++++++- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 259 +- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 60 +- drivers/net/ethernet/intel/i40e/i40e_type.h | 85 +- drivers/net/ethernet/intel/i40e/i40e_virtchnl.h | 17 +- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 91 +- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h | 5 + drivers/net/ethernet/intel/i40evf/i40e_adminq.c | 30 +- .../net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 67 +- drivers/net/ethernet/intel/i40evf/i40e_common.c | 380 ++- drivers/net/ethernet/intel/i40evf/i40e_hmc.h | 10 +- drivers/net/ethernet/intel/i40evf/i40e_prototype.h | 13 + drivers/net/ethernet/intel/i40evf/i40e_register.h | 3155 +----------------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 199 +- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 58 +- drivers/net/ethernet/intel/i40evf/i40e_type.h | 81 +- drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h | 17 +- drivers/net/ethernet/intel/i40evf/i40evf.h | 61 +- drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 44 +- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 350 +- .../net/ethernet/intel/i40evf/i40evf_virtchnl.c | 51 +- drivers/net/ethernet/intel/igb/e1000_82575.c | 38 +- drivers/net/ethernet/intel/igb/e1000_defines.h | 5 + drivers/net/ethernet/intel/igb/e1000_phy.c | 109 +- drivers/net/ethernet/intel/igb/e1000_phy.h | 1 + drivers/net/ethernet/intel/igb/e1000_regs.h | 2 + drivers/net/ethernet/intel/igb/igb_ethtool.c | 25 +- drivers/net/ethernet/intel/igb/igb_main.c | 118 +- drivers/net/ethernet/intel/igb/igb_ptp.c | 72 +- drivers/net/ethernet/intel/igbvf/netdev.c | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe.h | 7 + drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 91 +- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 15 + drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 62 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 277 +- drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 75 +- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 73 +- drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 5 + drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 182 +- drivers/net/ethernet/intel/ixgbevf/defines.h | 12 + drivers/net/ethernet/intel/ixgbevf/ethtool.c | 51 +- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 9 +- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 114 +- drivers/net/ethernet/jme.c | 8 +- drivers/net/ethernet/marvell/mv643xx_eth.c | 57 +- drivers/net/ethernet/marvell/mvneta.c | 12 +- drivers/net/ethernet/mellanox/Kconfig | 1 + drivers/net/ethernet/mellanox/Makefile | 1 + drivers/net/ethernet/mellanox/mlx4/cmd.c | 2 +- drivers/net/ethernet/mellanox/mlx4/en_cq.c | 5 +- drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 51 +- drivers/net/ethernet/mellanox/mlx4/en_main.c | 36 +- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 46 + drivers/net/ethernet/mellanox/mlx4/en_rx.c | 36 +- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 15 +- drivers/net/ethernet/mellanox/mlx4/eq.c | 6 +- drivers/net/ethernet/mellanox/mlx4/fw.c | 82 + drivers/net/ethernet/mellanox/mlx4/fw.h | 1 + drivers/net/ethernet/mellanox/mlx4/intf.c | 3 + drivers/net/ethernet/mellanox/mlx4/main.c | 45 +- drivers/net/ethernet/mellanox/mlx4/mcg.c | 7 +- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 3 + drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/alloc.c | 48 +- drivers/net/ethernet/mellanox/mlx5/core/en.h | 172 +- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 259 +- .../ethernet/mellanox/mlx5/core/en_flow_table.c | 387 ++- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 1017 ++++-- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 46 +- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 36 +- drivers/net/ethernet/mellanox/mlx5/core/main.c | 32 +- .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 7 +- drivers/net/ethernet/mellanox/mlx5/core/port.c | 58 +- drivers/net/ethernet/mellanox/mlx5/core/transobj.c | 53 + drivers/net/ethernet/mellanox/mlx5/core/transobj.h | 8 + drivers/net/ethernet/mellanox/mlx5/core/uar.c | 6 + drivers/net/ethernet/mellanox/mlx5/core/wq.c | 12 +- drivers/net/ethernet/mellanox/mlx5/core/wq.h | 3 +- drivers/net/ethernet/mellanox/mlxsw/Kconfig | 32 + drivers/net/ethernet/mellanox/mlxsw/Makefile | 6 + drivers/net/ethernet/mellanox/mlxsw/cmd.h | 1090 ++++++ drivers/net/ethernet/mellanox/mlxsw/core.c | 1300 ++++++++ drivers/net/ethernet/mellanox/mlxsw/core.h | 207 ++ drivers/net/ethernet/mellanox/mlxsw/emad.h | 127 + drivers/net/ethernet/mellanox/mlxsw/item.h | 407 +++ drivers/net/ethernet/mellanox/mlxsw/pci.c | 1826 ++++++++++ drivers/net/ethernet/mellanox/mlxsw/pci.h | 227 ++ drivers/net/ethernet/mellanox/mlxsw/port.h | 75 + drivers/net/ethernet/mellanox/mlxsw/reg.h | 1349 ++++++++ drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 1568 +++++++++ drivers/net/ethernet/mellanox/mlxsw/trap.h | 66 + drivers/net/ethernet/mellanox/mlxsw/txheader.h | 80 + drivers/net/ethernet/micrel/ks8851.c | 1 + drivers/net/ethernet/moxa/moxart_ether.c | 1 + drivers/net/ethernet/neterion/s2io.c | 26 +- drivers/net/ethernet/neterion/s2io.h | 2 - drivers/net/ethernet/nvidia/forcedeth.c | 24 +- drivers/net/ethernet/nxp/lpc_eth.c | 13 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 20 +- .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 33 +- .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h | 2 + .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | 4 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c | 2 - drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c | 6 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h | 1 + drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 33 +- .../net/ethernet/qlogic/qlcnic/qlcnic_minidump.c | 41 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h | 3 +- .../ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | 5 +- .../net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | 3 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | 7 +- drivers/net/ethernet/realtek/8139cp.c | 111 +- drivers/net/ethernet/realtek/r8169.c | 175 +- drivers/net/ethernet/renesas/ravb.h | 5 +- drivers/net/ethernet/renesas/ravb_main.c | 118 +- drivers/net/ethernet/renesas/sh_eth.c | 18 +- drivers/net/ethernet/rocker/rocker.c | 212 +- drivers/net/ethernet/rocker/rocker.h | 2 + drivers/net/ethernet/sfc/ef10.c | 562 +++- drivers/net/ethernet/sfc/efx.c | 57 +- drivers/net/ethernet/sfc/efx.h | 1 + drivers/net/ethernet/sfc/falcon.c | 1 + drivers/net/ethernet/sfc/farch.c | 4 +- drivers/net/ethernet/sfc/mcdi.c | 28 +- drivers/net/ethernet/sfc/mcdi.h | 3 +- drivers/net/ethernet/sfc/mcdi_pcol.h | 3463 ++++++++++++++----- drivers/net/ethernet/sfc/net_driver.h | 5 + drivers/net/ethernet/sfc/nic.h | 2 + drivers/net/ethernet/sfc/selftest.c | 14 +- drivers/net/ethernet/sfc/siena.c | 7 +- drivers/net/ethernet/sfc/tx.c | 30 +- drivers/net/ethernet/smsc/smc9194.c | 32 +- drivers/net/ethernet/smsc/smsc911x.c | 65 +- .../net/ethernet/stmicro/stmmac/dwmac-generic.c | 42 +- .../net/ethernet/stmicro/stmmac/dwmac-ipq806x.c | 50 +- .../net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c | 59 +- drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c | 31 +- drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 73 +- .../net/ethernet/stmicro/stmmac/dwmac-socfpga.c | 78 +- drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c | 83 +- drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c | 95 +- .../net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 7 +- drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 11 +- .../net/ethernet/stmicro/stmmac/stmmac_platform.c | 138 +- .../net/ethernet/stmicro/stmmac/stmmac_platform.h | 9 +- drivers/net/ethernet/sun/sunvnet.c | 17 +- drivers/net/ethernet/synopsys/Kconfig | 27 + drivers/net/ethernet/synopsys/Makefile | 5 + drivers/net/ethernet/synopsys/dwc_eth_qos.c | 3019 +++++++++++++++++ drivers/net/ethernet/ti/cpmac.c | 2 + drivers/net/ethernet/ti/cpsw.c | 171 +- drivers/net/ethernet/ti/davinci_emac.c | 4 +- drivers/net/ethernet/ti/netcp_core.c | 90 +- drivers/net/ethernet/ti/netcp_ethss.c | 456 ++- drivers/net/ethernet/tile/tilegx.c | 4 +- drivers/net/ethernet/via/via-rhine.c | 3 +- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 2 + drivers/net/fddi/skfp/h/hwmtm.h | 9 - drivers/net/fjes/Makefile | 30 + drivers/net/fjes/fjes.h | 77 + drivers/net/fjes/fjes_ethtool.c | 137 + drivers/net/fjes/fjes_hw.c | 1125 +++++++ drivers/net/fjes/fjes_hw.h | 334 ++ drivers/net/fjes/fjes_main.c | 1383 ++++++++ drivers/net/fjes/fjes_regs.h | 142 + drivers/net/geneve.c | 785 ++++- drivers/net/hamradio/baycom_epp.c | 2 +- drivers/net/hyperv/hyperv_net.h | 33 + drivers/net/hyperv/netvsc.c | 43 +- drivers/net/hyperv/netvsc_drv.c | 166 +- drivers/net/hyperv/rndis_filter.c | 37 +- drivers/net/ieee802154/at86rf230.c | 56 +- drivers/net/ieee802154/cc2520.c | 2 +- drivers/net/ieee802154/mrf24j40.c | 1 - drivers/net/ifb.c | 207 +- drivers/net/ipvlan/ipvlan_main.c | 3 +- drivers/net/irda/ali-ircc.c | 6 - drivers/net/loopback.c | 3 +- drivers/net/macvlan.c | 1 + drivers/net/macvtap.c | 2 +- drivers/net/nlmon.c | 2 +- drivers/net/ntb_netdev.c | 77 + drivers/net/phy/Kconfig | 32 +- drivers/net/phy/Makefile | 4 + drivers/net/phy/aquantia.c | 201 ++ drivers/net/phy/dp83640.c | 10 +- drivers/net/phy/dp83848.c | 99 + drivers/net/phy/dp83867.c | 6 +- drivers/net/phy/fixed_phy.c | 112 +- drivers/net/phy/marvell.c | 62 +- drivers/net/phy/mdio-bcm-unimac.c | 1 + drivers/net/phy/mdio-gpio.c | 1 + drivers/net/phy/mdio-mux-mmioreg.c | 2 + drivers/net/phy/mdio-mux.c | 20 +- drivers/net/phy/mdio-octeon.c | 136 +- drivers/net/phy/mdio_bus.c | 31 +- drivers/net/phy/micrel.c | 23 +- drivers/net/phy/microchip.c | 148 + drivers/net/phy/phy.c | 7 +- drivers/net/phy/phy_device.c | 70 +- drivers/net/phy/realtek.c | 14 + drivers/net/phy/smsc.c | 19 +- drivers/net/phy/spi_ks8995.c | 22 - drivers/net/phy/teranetics.c | 135 + drivers/net/phy/vitesse.c | 14 - drivers/net/ppp/ppp_generic.c | 17 +- drivers/net/ppp/pppoe.c | 2 +- drivers/net/rionet.c | 4 +- drivers/net/team/team.c | 2 +- drivers/net/tun.c | 1 + drivers/net/usb/Kconfig | 22 + drivers/net/usb/Makefile | 3 +- drivers/net/usb/asix_common.c | 4 +- drivers/net/usb/asix_devices.c | 4 + drivers/net/usb/ch9200.c | 432 +++ drivers/net/usb/lan78xx.c | 3494 ++++++++++++++++++++ drivers/net/usb/lan78xx.h | 1069 ++++++ drivers/net/usb/qmi_wwan.c | 9 +- drivers/net/usb/r8152.c | 89 +- drivers/net/usb/usbnet.c | 39 +- drivers/net/veth.c | 2 + drivers/net/virtio_net.c | 30 +- drivers/net/vrf.c | 711 ++++ drivers/net/vxlan.c | 775 +++-- drivers/net/wan/hdlc_fr.c | 2 +- drivers/net/wan/sbni.c | 2 + drivers/net/wireless/ath/ath10k/Makefile | 3 +- drivers/net/wireless/ath/ath10k/bmi.h | 2 +- drivers/net/wireless/ath/ath10k/ce.c | 1 + drivers/net/wireless/ath/ath10k/ce.h | 17 +- drivers/net/wireless/ath/ath10k/core.c | 188 +- drivers/net/wireless/ath/ath10k/core.h | 65 +- drivers/net/wireless/ath/ath10k/debug.c | 30 +- drivers/net/wireless/ath/ath10k/htt.c | 66 +- drivers/net/wireless/ath/ath10k/htt.h | 89 +- drivers/net/wireless/ath/ath10k/htt_rx.c | 49 +- drivers/net/wireless/ath/ath10k/htt_tx.c | 137 +- drivers/net/wireless/ath/ath10k/hw.c | 90 +- drivers/net/wireless/ath/ath10k/hw.h | 137 +- drivers/net/wireless/ath/ath10k/mac.c | 300 +- drivers/net/wireless/ath/ath10k/pci.c | 228 +- drivers/net/wireless/ath/ath10k/pci.h | 13 +- drivers/net/wireless/ath/ath10k/rx_desc.h | 173 +- drivers/net/wireless/ath/ath10k/spectral.c | 18 +- drivers/net/wireless/ath/ath10k/spectral.h | 4 +- drivers/net/wireless/ath/ath10k/swap.c | 208 ++ drivers/net/wireless/ath/ath10k/swap.h | 72 + drivers/net/wireless/ath/ath10k/targaddrs.h | 3 + drivers/net/wireless/ath/ath10k/txrx.c | 23 +- drivers/net/wireless/ath/ath10k/wmi-ops.h | 32 +- drivers/net/wireless/ath/ath10k/wmi-tlv.c | 135 +- drivers/net/wireless/ath/ath10k/wmi.c | 1349 +++++++- drivers/net/wireless/ath/ath10k/wmi.h | 1024 +++++- drivers/net/wireless/ath/ath10k/wow.c | 20 +- drivers/net/wireless/ath/ath5k/Kconfig | 1 - drivers/net/wireless/ath/ath5k/ani.c | 4 +- drivers/net/wireless/ath/ath5k/ath5k.h | 4 +- drivers/net/wireless/ath/ath5k/base.c | 4 +- drivers/net/wireless/ath/ath5k/debug.c | 2 +- drivers/net/wireless/ath/ath6kl/htc.h | 2 +- drivers/net/wireless/ath/ath6kl/init.c | 1 + drivers/net/wireless/ath/ath6kl/wmi.c | 4 +- drivers/net/wireless/ath/ath9k/ar9003_phy.h | 25 +- drivers/net/wireless/ath/ath9k/ath9k.h | 22 +- drivers/net/wireless/ath/ath9k/channel.c | 23 +- drivers/net/wireless/ath/ath9k/debug.c | 2 + drivers/net/wireless/ath/ath9k/debug.h | 2 + drivers/net/wireless/ath/ath9k/debug_sta.c | 20 +- drivers/net/wireless/ath/ath9k/dfs.c | 170 +- drivers/net/wireless/ath/ath9k/htc_drv_init.c | 2 +- drivers/net/wireless/ath/ath9k/htc_drv_main.c | 13 +- drivers/net/wireless/ath/ath9k/htc_hst.c | 9 +- drivers/net/wireless/ath/ath9k/hw.c | 1 + drivers/net/wireless/ath/ath9k/init.c | 9 +- drivers/net/wireless/ath/ath9k/link.c | 2 +- drivers/net/wireless/ath/ath9k/main.c | 11 +- drivers/net/wireless/ath/ath9k/recv.c | 7 +- drivers/net/wireless/ath/ath9k/wmi.c | 3 +- drivers/net/wireless/ath/ath9k/xmit.c | 156 +- drivers/net/wireless/ath/debug.c | 2 + drivers/net/wireless/ath/dfs_pri_detector.c | 2 +- drivers/net/wireless/ath/wil6210/Makefile | 1 + drivers/net/wireless/ath/wil6210/boot_loader.h | 61 + drivers/net/wireless/ath/wil6210/cfg80211.c | 244 +- drivers/net/wireless/ath/wil6210/debugfs.c | 51 +- drivers/net/wireless/ath/wil6210/ethtool.c | 14 +- drivers/net/wireless/ath/wil6210/fw.c | 10 - drivers/net/wireless/ath/wil6210/fw_inc.c | 16 +- drivers/net/wireless/ath/wil6210/interrupt.c | 165 +- drivers/net/wireless/ath/wil6210/ioctl.c | 4 +- drivers/net/wireless/ath/wil6210/main.c | 198 +- drivers/net/wireless/ath/wil6210/netdev.c | 5 +- drivers/net/wireless/ath/wil6210/pcie_bus.c | 127 +- drivers/net/wireless/ath/wil6210/pm.c | 98 + drivers/net/wireless/ath/wil6210/rx_reorder.c | 6 + drivers/net/wireless/ath/wil6210/txrx.c | 383 ++- drivers/net/wireless/ath/wil6210/txrx.h | 8 + drivers/net/wireless/ath/wil6210/wil6210.h | 64 +- drivers/net/wireless/ath/wil6210/wil_platform.c | 2 +- drivers/net/wireless/ath/wil6210/wmi.c | 132 +- drivers/net/wireless/b43/lo.c | 4 +- drivers/net/wireless/b43/lo.h | 2 +- drivers/net/wireless/b43/main.c | 1 + drivers/net/wireless/b43/phy_g.c | 2 +- drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c | 216 +- drivers/net/wireless/brcm80211/brcmfmac/core.h | 3 + drivers/net/wireless/brcm80211/brcmfmac/firmware.c | 39 +- drivers/net/wireless/brcm80211/brcmfmac/flowring.c | 10 +- drivers/net/wireless/brcm80211/brcmfmac/fweh.h | 10 +- drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c | 56 + drivers/net/wireless/brcm80211/brcmfmac/sdio.c | 13 +- .../net/wireless/brcm80211/brcmsmac/mac80211_if.c | 4 +- drivers/net/wireless/cw1200/cw1200_spi.c | 1 - drivers/net/wireless/hostap/hostap_main.c | 4 +- drivers/net/wireless/ipw2x00/ipw2100.c | 2 +- drivers/net/wireless/ipw2x00/ipw2100.h | 2 +- drivers/net/wireless/iwlegacy/3945-mac.c | 2 +- drivers/net/wireless/iwlegacy/debug.c | 8 +- drivers/net/wireless/iwlwifi/dvm/agn.h | 21 +- drivers/net/wireless/iwlwifi/dvm/debugfs.c | 8 +- drivers/net/wireless/iwlwifi/dvm/dev.h | 7 +- drivers/net/wireless/iwlwifi/dvm/lib.c | 10 +- drivers/net/wireless/iwlwifi/dvm/mac80211.c | 14 +- drivers/net/wireless/iwlwifi/dvm/main.c | 12 - drivers/net/wireless/iwlwifi/dvm/rs.c | 51 +- drivers/net/wireless/iwlwifi/dvm/rx.c | 109 +- drivers/net/wireless/iwlwifi/dvm/rxon.c | 3 +- drivers/net/wireless/iwlwifi/dvm/scan.c | 25 +- drivers/net/wireless/iwlwifi/dvm/sta.c | 111 +- drivers/net/wireless/iwlwifi/dvm/tx.c | 18 +- drivers/net/wireless/iwlwifi/dvm/ucode.c | 5 +- drivers/net/wireless/iwlwifi/iwl-7000.c | 4 +- drivers/net/wireless/iwlwifi/iwl-8000.c | 12 +- drivers/net/wireless/iwlwifi/iwl-config.h | 2 + drivers/net/wireless/iwlwifi/iwl-csr.h | 3 + drivers/net/wireless/iwlwifi/iwl-devtrace-data.h | 7 +- .../net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h | 14 +- drivers/net/wireless/iwlwifi/iwl-drv.c | 72 +- drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c | 4 +- drivers/net/wireless/iwlwifi/iwl-fh.h | 6 - drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h | 17 + drivers/net/wireless/iwlwifi/iwl-fw-file.h | 53 +- drivers/net/wireless/iwlwifi/iwl-fw.h | 68 + drivers/net/wireless/iwlwifi/iwl-notif-wait.c | 8 +- drivers/net/wireless/iwlwifi/iwl-notif-wait.h | 5 +- drivers/net/wireless/iwlwifi/iwl-op-mode.h | 32 +- drivers/net/wireless/iwlwifi/iwl-prph.h | 12 + drivers/net/wireless/iwlwifi/iwl-trans.h | 125 +- drivers/net/wireless/iwlwifi/mvm/Makefile | 1 + drivers/net/wireless/iwlwifi/mvm/coex.c | 44 +- drivers/net/wireless/iwlwifi/mvm/coex_legacy.c | 31 +- drivers/net/wireless/iwlwifi/mvm/constants.h | 1 + drivers/net/wireless/iwlwifi/mvm/d3.c | 101 +- drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c | 751 +++++ drivers/net/wireless/iwlwifi/mvm/debugfs.c | 14 +- drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h | 7 +- drivers/net/wireless/iwlwifi/mvm/fw-api-power.h | 31 +- drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h | 147 - drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h | 4 +- drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h | 386 +++ drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h | 12 + drivers/net/wireless/iwlwifi/mvm/fw-api.h | 86 +- drivers/net/wireless/iwlwifi/mvm/fw.c | 393 ++- drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c | 13 +- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 79 +- drivers/net/wireless/iwlwifi/mvm/mvm.h | 159 +- drivers/net/wireless/iwlwifi/mvm/nvm.c | 23 +- drivers/net/wireless/iwlwifi/mvm/ops.c | 115 +- drivers/net/wireless/iwlwifi/mvm/power.c | 46 +- drivers/net/wireless/iwlwifi/mvm/rs.c | 160 +- drivers/net/wireless/iwlwifi/mvm/rs.h | 10 + drivers/net/wireless/iwlwifi/mvm/rx.c | 43 +- drivers/net/wireless/iwlwifi/mvm/scan.c | 319 +- drivers/net/wireless/iwlwifi/mvm/sta.c | 43 +- drivers/net/wireless/iwlwifi/mvm/sta.h | 5 +- drivers/net/wireless/iwlwifi/mvm/tdls.c | 33 +- drivers/net/wireless/iwlwifi/mvm/time-event.c | 14 +- drivers/net/wireless/iwlwifi/mvm/time-event.h | 5 +- drivers/net/wireless/iwlwifi/mvm/tof.c | 304 ++ drivers/net/wireless/iwlwifi/mvm/tof.h | 94 + drivers/net/wireless/iwlwifi/mvm/tt.c | 13 +- drivers/net/wireless/iwlwifi/mvm/tx.c | 94 +- drivers/net/wireless/iwlwifi/mvm/utils.c | 13 +- drivers/net/wireless/iwlwifi/pcie/drv.c | 41 +- drivers/net/wireless/iwlwifi/pcie/internal.h | 64 +- drivers/net/wireless/iwlwifi/pcie/rx.c | 496 ++- drivers/net/wireless/iwlwifi/pcie/trans.c | 424 ++- drivers/net/wireless/iwlwifi/pcie/tx.c | 135 +- drivers/net/wireless/mac80211_hwsim.c | 7 +- drivers/net/wireless/mediatek/mt7601u/dma.c | 34 +- drivers/net/wireless/mediatek/mt7601u/init.c | 2 + drivers/net/wireless/mediatek/mt7601u/mac.c | 4 + drivers/net/wireless/mediatek/mt7601u/mt7601u.h | 10 +- drivers/net/wireless/mediatek/mt7601u/tx.c | 3 + drivers/net/wireless/mediatek/mt7601u/usb.c | 63 +- drivers/net/wireless/mediatek/mt7601u/usb.h | 2 + drivers/net/wireless/mwifiex/Kconfig | 12 +- drivers/net/wireless/mwifiex/cfg80211.c | 130 +- drivers/net/wireless/mwifiex/cmdevt.c | 39 +- drivers/net/wireless/mwifiex/debugfs.c | 14 +- drivers/net/wireless/mwifiex/decl.h | 3 + drivers/net/wireless/mwifiex/fw.h | 95 +- drivers/net/wireless/mwifiex/ie.c | 3 + drivers/net/wireless/mwifiex/init.c | 10 +- drivers/net/wireless/mwifiex/join.c | 2 + drivers/net/wireless/mwifiex/main.c | 63 +- drivers/net/wireless/mwifiex/main.h | 40 + drivers/net/wireless/mwifiex/pcie.c | 12 +- drivers/net/wireless/mwifiex/pcie.h | 45 +- drivers/net/wireless/mwifiex/scan.c | 157 +- drivers/net/wireless/mwifiex/sdio.c | 206 +- drivers/net/wireless/mwifiex/sdio.h | 77 + drivers/net/wireless/mwifiex/sta_cmd.c | 90 +- drivers/net/wireless/mwifiex/sta_cmdresp.c | 7 +- drivers/net/wireless/mwifiex/sta_event.c | 207 +- drivers/net/wireless/mwifiex/sta_ioctl.c | 4 +- drivers/net/wireless/mwifiex/tdls.c | 80 +- drivers/net/wireless/mwifiex/txrx.c | 22 +- drivers/net/wireless/mwifiex/uap_cmd.c | 7 +- drivers/net/wireless/mwifiex/uap_event.c | 15 + drivers/net/wireless/mwifiex/usb.c | 23 +- drivers/net/wireless/mwifiex/usb.h | 3 + drivers/net/wireless/mwifiex/util.c | 75 +- drivers/net/wireless/mwifiex/wmm.c | 156 +- drivers/net/wireless/mwifiex/wmm.h | 8 + drivers/net/wireless/mwl8k.c | 49 +- drivers/net/wireless/orinoco/main.c | 2 - drivers/net/wireless/orinoco/orinoco_cs.c | 1 + drivers/net/wireless/orinoco/orinoco_nortel.c | 5 +- drivers/net/wireless/orinoco/orinoco_pci.c | 5 +- drivers/net/wireless/orinoco/orinoco_plx.c | 5 +- drivers/net/wireless/orinoco/orinoco_usb.c | 2 + drivers/net/wireless/rt2x00/Kconfig | 1 - drivers/net/wireless/rt2x00/rt2500usb.h | 2 +- drivers/net/wireless/rt2x00/rt2800usb.c | 1 + drivers/net/wireless/rt2x00/rt2x00.h | 6 +- drivers/net/wireless/rt2x00/rt2x00link.c | 18 +- drivers/net/wireless/rtlwifi/pci.h | 2 + drivers/net/wireless/rtlwifi/rtl8188ee/fw.c | 10 +- drivers/net/wireless/rtlwifi/rtl8188ee/fw.h | 21 +- drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c | 11 +- drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c | 12 +- drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h | 19 - drivers/net/wireless/rtlwifi/rtl8192cu/def.h | 9 - drivers/net/wireless/rtlwifi/rtl8192cu/hw.c | 110 +- drivers/net/wireless/rtlwifi/rtl8192cu/mac.c | 105 +- drivers/net/wireless/rtlwifi/rtl8192cu/mac.h | 10 - drivers/net/wireless/rtlwifi/rtl8192de/dm.c | 2 +- drivers/net/wireless/rtlwifi/rtl8192de/fw.h | 22 - drivers/net/wireless/rtlwifi/rtl8192de/phy.c | 4 +- drivers/net/wireless/rtlwifi/rtl8192ee/fw.c | 12 +- drivers/net/wireless/rtlwifi/rtl8192ee/fw.h | 21 +- drivers/net/wireless/rtlwifi/rtl8192ee/phy.c | 6 +- drivers/net/wireless/rtlwifi/rtl8723ae/sw.c | 4 +- drivers/net/wireless/rtlwifi/rtl8723be/sw.c | 4 +- .../net/wireless/rtlwifi/rtl8723com/fw_common.c | 10 +- .../net/wireless/rtlwifi/rtl8723com/fw_common.h | 19 - drivers/net/wireless/rtlwifi/rtl8821ae/fw.c | 14 +- drivers/net/wireless/rtlwifi/rtl8821ae/fw.h | 23 +- drivers/net/wireless/rtlwifi/rtl8821ae/hw.c | 17 + drivers/net/wireless/rtlwifi/rtl8821ae/sw.c | 5 + drivers/net/wireless/rtlwifi/wifi.h | 28 +- drivers/net/wireless/ti/wl12xx/scan.c | 6 +- drivers/net/wireless/ti/wl18xx/acx.c | 27 + drivers/net/wireless/ti/wl18xx/acx.h | 138 +- drivers/net/wireless/ti/wl18xx/debugfs.c | 230 +- drivers/net/wireless/ti/wl18xx/event.c | 13 + drivers/net/wireless/ti/wl18xx/event.h | 12 +- drivers/net/wireless/ti/wl18xx/main.c | 59 +- drivers/net/wireless/ti/wl18xx/scan.c | 23 +- drivers/net/wireless/ti/wl18xx/scan.h | 4 +- drivers/net/wireless/ti/wlcore/cmd.c | 56 +- drivers/net/wireless/ti/wlcore/cmd.h | 15 + drivers/net/wireless/ti/wlcore/conf.h | 11 +- drivers/net/wireless/ti/wlcore/init.c | 2 +- drivers/net/wireless/ti/wlcore/init.h | 1 + drivers/net/wireless/ti/wlcore/main.c | 69 +- drivers/net/wireless/ti/wlcore/rx.c | 9 +- drivers/net/wireless/ti/wlcore/rx.h | 3 + drivers/net/wireless/ti/wlcore/scan.h | 6 + drivers/net/wireless/ti/wlcore/sdio.c | 3 +- drivers/net/wireless/ti/wlcore/wlcore.h | 3 + drivers/net/wireless/ti/wlcore/wlcore_i.h | 5 + drivers/net/xen-netback/common.h | 28 +- drivers/net/xen-netback/interface.c | 10 + drivers/net/xen-netback/netback.c | 133 +- drivers/net/xen-netback/xenbus.c | 19 + drivers/net/xen-netfront.c | 38 +- 662 files changed, 55683 insertions(+), 13391 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/Kconfig create mode 100644 drivers/net/ethernet/mellanox/mlxsw/Makefile create mode 100644 drivers/net/ethernet/mellanox/mlxsw/cmd.h create mode 100644 drivers/net/ethernet/mellanox/mlxsw/core.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/core.h create mode 100644 drivers/net/ethernet/mellanox/mlxsw/emad.h create mode 100644 drivers/net/ethernet/mellanox/mlxsw/item.h create mode 100644 drivers/net/ethernet/mellanox/mlxsw/pci.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/pci.h create mode 100644 drivers/net/ethernet/mellanox/mlxsw/port.h create mode 100644 drivers/net/ethernet/mellanox/mlxsw/reg.h create mode 100644 drivers/net/ethernet/mellanox/mlxsw/switchx2.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/trap.h create mode 100644 drivers/net/ethernet/mellanox/mlxsw/txheader.h create mode 100644 drivers/net/ethernet/synopsys/Kconfig create mode 100644 drivers/net/ethernet/synopsys/Makefile create mode 100644 drivers/net/ethernet/synopsys/dwc_eth_qos.c create mode 100644 drivers/net/fjes/Makefile create mode 100644 drivers/net/fjes/fjes.h create mode 100644 drivers/net/fjes/fjes_ethtool.c create mode 100644 drivers/net/fjes/fjes_hw.c create mode 100644 drivers/net/fjes/fjes_hw.h create mode 100644 drivers/net/fjes/fjes_main.c create mode 100644 drivers/net/fjes/fjes_regs.h create mode 100644 drivers/net/phy/aquantia.c create mode 100644 drivers/net/phy/dp83848.c create mode 100644 drivers/net/phy/microchip.c create mode 100644 drivers/net/phy/teranetics.c create mode 100644 drivers/net/usb/ch9200.c create mode 100644 drivers/net/usb/lan78xx.c create mode 100644 drivers/net/usb/lan78xx.h create mode 100644 drivers/net/vrf.c create mode 100644 drivers/net/wireless/ath/ath10k/swap.c create mode 100644 drivers/net/wireless/ath/ath10k/swap.h create mode 100644 drivers/net/wireless/ath/wil6210/boot_loader.h create mode 100644 drivers/net/wireless/ath/wil6210/pm.c create mode 100644 drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h create mode 100644 drivers/net/wireless/iwlwifi/mvm/tof.c create mode 100644 drivers/net/wireless/iwlwifi/mvm/tof.h (limited to 'drivers/net') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index c18f9e62a..d18eb607b 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -180,8 +180,8 @@ config VXLAN will be called vxlan. config GENEVE - tristate "Generic Network Virtualization Encapsulation netdev" - depends on INET && GENEVE_CORE + tristate "Generic Network Virtualization Encapsulation" + depends on INET && NET_UDP_TUNNEL select NET_IP_TUNNEL ---help--- This allows one to create geneve virtual interfaces that provide @@ -282,7 +282,6 @@ config VETH config VIRTIO_NET tristate "Virtio network driver" depends on VIRTIO - select AVERAGE ---help--- This is the virtual network driver for virtio. It can be used with lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. @@ -297,6 +296,13 @@ config NLMON diagnostics, etc. This is mostly intended for developers or support to debug netlink issues. If unsure, say N. +config NET_VRF + tristate "Virtual Routing and Forwarding (Lite)" + depends on IP_MULTIPLE_TABLES && IPV6_MULTIPLE_TABLES + ---help--- + This option enables the support for mapping interfaces into VRF's. The + support enables VRF devices. + endif # NET_CORE config SUNGEM_PHY @@ -407,6 +413,13 @@ config VMXNET3 To compile this driver as a module, choose M here: the module will be called vmxnet3. +config FUJITSU_ES + tristate "FUJITSU Extended Socket Network Device driver" + depends on ACPI + help + This driver provides support for Extended Socket network device + on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series. + source "drivers/net/hyperv/Kconfig" endif # NETDEVICES diff --git a/drivers/net/Makefile b/drivers/net/Makefile index c12cb2247..900b0c532 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.o obj-$(CONFIG_VXLAN) += vxlan.o obj-$(CONFIG_GENEVE) += geneve.o obj-$(CONFIG_NLMON) += nlmon.o +obj-$(CONFIG_NET_VRF) += vrf.o # # Networking Drivers @@ -67,3 +68,5 @@ obj-$(CONFIG_USB_NET_DRIVERS) += usb/ obj-$(CONFIG_HYPERV_NET) += hyperv/ obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o + +obj-$(CONFIG_FUJITSU_ES) += fjes/ diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 10f71c732..816d0e949 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -326,7 +326,7 @@ static void arcdev_setup(struct net_device *dev) dev->type = ARPHRD_ARCNET; dev->netdev_ops = &arcnet_netdev_ops; dev->header_ops = &arcnet_header_ops; - dev->hard_header_len = sizeof(struct archdr); + dev->hard_header_len = sizeof(struct arc_hardware); dev->mtu = choose_mtu(); dev->addr_len = ARCNET_ALEN; diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 7fde4d5c2..3c4535884 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -1870,8 +1870,6 @@ static void ad_marker_info_received(struct bond_marker *marker_info, static void ad_marker_response_received(struct bond_marker *marker, struct port *port) { - marker = NULL; - port = NULL; /* DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW */ } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a98dd4f1b..bcd7bddbe 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -979,7 +979,6 @@ static void bond_poll_controller(struct net_device *bond_dev) if (bond_3ad_get_active_agg_info(bond, &ad_info)) return; - rcu_read_lock_bh(); bond_for_each_slave_rcu(bond, slave, iter) { ops = slave->dev->netdev_ops; if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller) @@ -1000,7 +999,6 @@ static void bond_poll_controller(struct net_device *bond_dev) ops->ndo_poll_controller(slave->dev); up(&ni->dev_lock); } - rcu_read_unlock_bh(); } static void bond_netpoll_cleanup(struct net_device *bond_dev) @@ -1751,6 +1749,7 @@ err_undo_flags: slave_dev->dev_addr)) eth_hw_addr_random(bond_dev); if (bond_dev->type != ARPHRD_ETHER) { + dev_close(bond_dev); ether_setup(bond_dev); bond_dev->flags |= IFF_MASTER; bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; @@ -3097,7 +3096,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, int noff, proto = -1; if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23) - return skb_flow_dissect_flow_keys(skb, fk); + return skb_flow_dissect_flow_keys(skb, fk, 0); fk->ports.ports = 0; noff = skb_network_offset(skb); @@ -3780,7 +3779,6 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) struct slave *slave; struct list_head *iter; struct bond_up_slave *new_arr, *old_arr; - int slaves_in_agg; int agg_id = 0; int ret = 0; @@ -3811,7 +3809,6 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) } goto out; } - slaves_in_agg = ad_info.ports; agg_id = ad_info.aggregator_id; } bond_for_each_slave(bond, slave, iter) { @@ -4122,9 +4119,8 @@ void bond_setup(struct net_device *bond_dev) SET_NETDEV_DEVTYPE(bond_dev, &bond_type); /* Initialize the device options */ - bond_dev->tx_queue_len = 0; bond_dev->flags |= IFF_MASTER|IFF_MULTICAST; - bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT; + bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE; bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); /* don't acquire bond device's netif_tx_lock when transmitting */ diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 1bda29249..db760e841 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -111,6 +111,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = { [IFLA_BOND_AD_USER_PORT_KEY] = { .type = NLA_U16 }, [IFLA_BOND_AD_ACTOR_SYSTEM] = { .type = NLA_BINARY, .len = ETH_ALEN }, + [IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 }, }; static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = { @@ -405,7 +406,6 @@ static int bond_changelink(struct net_device *bond_dev, if (err) return err; } - if (data[IFLA_BOND_AD_USER_PORT_KEY]) { int port_key = nla_get_u16(data[IFLA_BOND_AD_USER_PORT_KEY]); @@ -415,7 +415,6 @@ static int bond_changelink(struct net_device *bond_dev, if (err) return err; } - if (data[IFLA_BOND_AD_ACTOR_SYSTEM]) { if (nla_len(data[IFLA_BOND_AD_ACTOR_SYSTEM]) != ETH_ALEN) return -EINVAL; @@ -426,6 +425,15 @@ static int bond_changelink(struct net_device *bond_dev, if (err) return err; } + if (data[IFLA_BOND_TLB_DYNAMIC_LB]) { + int dynamic_lb = nla_get_u8(data[IFLA_BOND_TLB_DYNAMIC_LB]); + + bond_opt_initval(&newval, dynamic_lb); + err = __bond_opt_set(bond, BOND_OPT_TLB_DYNAMIC_LB, &newval); + if (err) + return err; + } + return 0; } @@ -476,6 +484,7 @@ static size_t bond_get_size(const struct net_device *bond_dev) nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_ACTOR_SYS_PRIO */ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_USER_PORT_KEY */ nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */ + nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */ 0; } @@ -598,6 +607,10 @@ static int bond_fill_info(struct sk_buff *skb, bond->params.ad_select)) goto nla_put_failure; + if (nla_put_u8(skb, IFLA_BOND_TLB_DYNAMIC_LB, + bond->params.tlb_dynamic_lb)) + goto nla_put_failure; + if (BOND_MODE(bond) == BOND_MODE_8023AD) { struct ad_info info; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index e9c624d54..55e93b6b6 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -420,6 +420,13 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = { .flags = BOND_OPTFLAG_IFDOWN, .values = bond_ad_user_port_key_tbl, .set = bond_option_ad_user_port_key_set, + }, + [BOND_OPT_NUM_PEER_NOTIF_ALIAS] = { + .id = BOND_OPT_NUM_PEER_NOTIF_ALIAS, + .name = "num_grat_arp", + .desc = "Number of peer notifications to send on failover event", + .values = bond_num_peer_notif_tbl, + .set = bond_option_num_peer_notif_set } }; @@ -730,19 +737,6 @@ static int bond_option_mode_set(struct bonding *bond, return 0; } -static struct net_device *__bond_option_active_slave_get(struct bonding *bond, - struct slave *slave) -{ - return bond_uses_primary(bond) && slave ? slave->dev : NULL; -} - -struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond) -{ - struct slave *slave = rcu_dereference(bond->curr_active_slave); - - return __bond_option_active_slave_get(bond, slave); -} - static int bond_option_active_slave_set(struct bonding *bond, const struct bond_opt_value *newval) { diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 31835a4da..f4ae72086 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -380,7 +380,7 @@ static ssize_t bonding_show_ad_select(struct device *d, static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR, bonding_show_ad_select, bonding_sysfs_store_option); -/* Show and set the number of peer notifications to send after a failover event. */ +/* Show the number of peer notifications to send after a failover event. */ static ssize_t bonding_show_num_peer_notif(struct device *d, struct device_attribute *attr, char *buf) @@ -388,24 +388,10 @@ static ssize_t bonding_show_num_peer_notif(struct device *d, struct bonding *bond = to_bond(d); return sprintf(buf, "%d\n", bond->params.num_peer_notif); } - -static ssize_t bonding_store_num_peer_notif(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct bonding *bond = to_bond(d); - int ret; - - ret = bond_opt_tryset_rtnl(bond, BOND_OPT_NUM_PEER_NOTIF, (char *)buf); - if (!ret) - ret = count; - - return ret; -} static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR, - bonding_show_num_peer_notif, bonding_store_num_peer_notif); + bonding_show_num_peer_notif, bonding_sysfs_store_option); static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR, - bonding_show_num_peer_notif, bonding_store_num_peer_notif); + bonding_show_num_peer_notif, bonding_sysfs_store_option); /* Show the MII monitor interval. */ static ssize_t bonding_show_miimon(struct device *d, diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index b3b922adc..615c65da3 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c @@ -1120,7 +1120,7 @@ static void cfhsi_setup(struct net_device *dev) dev->type = ARPHRD_CAIF; dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; dev->destructor = free_netdev; dev->netdev_ops = &cfhsi_netdevops; for (i = 0; i < CFHSI_PRIO_LAST; ++i) diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index 9da065372..c2dea4916 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -427,7 +427,7 @@ static void caifdev_setup(struct net_device *dev) dev->type = ARPHRD_CAIF; dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->mtu = CAIF_MAX_MTU; - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; dev->destructor = free_netdev; skb_queue_head_init(&serdev->head); serdev->common.link_select = CAIF_LINK_LOW_LATENCY; diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 72ea9ff9b..de3962014 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -710,7 +710,7 @@ static void cfspi_setup(struct net_device *dev) dev->netdev_ops = &cfspi_ops; dev->type = ARPHRD_CAIF; dev->flags = IFF_NOARP | IFF_POINTOPOINT; - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; dev->mtu = SPI_MAX_PAYLOAD_SIZE; dev->destructor = free_netdev; skb_queue_head_init(&cfspi->qhead); diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index aede70460..141c2a42d 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -915,7 +915,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put(skb, IFLA_CAN_BITTIMING_CONST, sizeof(*priv->bittiming_const), priv->bittiming_const)) || - nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) || + nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) || nla_put_u32(skb, IFLA_CAN_STATE, state) || nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) || nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) || diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index b1e8d7298..c83f0f034 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -805,7 +805,7 @@ static void flexcan_set_bittiming(struct net_device *dev) if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) reg |= FLEXCAN_CTRL_SMP; - netdev_info(dev, "writing ctrl=0x%08x\n", reg); + netdev_dbg(dev, "writing ctrl=0x%08x\n", reg); flexcan_write(reg, ®s->ctrl); /* print chip status */ diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index e5fac3680..131026fbc 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -87,6 +87,7 @@ static const struct pci_device_id peak_pci_tbl[] = { {PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,}, #ifdef CONFIG_CAN_PEAK_PCIEC {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 7b92e911a..f10834be4 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -218,6 +218,9 @@ static void sja1000_start(struct net_device *dev) priv->write_reg(priv, SJA1000_RXERR, 0x0); priv->read_reg(priv, SJA1000_ECC); + /* clear interrupt flags */ + priv->read_reg(priv, SJA1000_IR); + /* leave reset mode */ set_normal_mode(dev); } diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 8b4d3e687..5eee62bad 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -162,7 +162,7 @@ struct gs_can { struct can_bittiming_const bt_const; unsigned int channel; /* channel number */ - /* This lock prevents a race condition between xmit and recieve. */ + /* This lock prevents a race condition between xmit and receive. */ spinlock_t tx_ctx_lock; struct gs_tx_context tx_context[GS_MAX_TX_URBS]; @@ -274,7 +274,7 @@ static void gs_update_state(struct gs_can *dev, struct can_frame *cf) } } -static void gs_usb_recieve_bulk_callback(struct urb *urb) +static void gs_usb_receive_bulk_callback(struct urb *urb) { struct gs_usb *usbcan = urb->context; struct gs_can *dev; @@ -376,7 +376,7 @@ static void gs_usb_recieve_bulk_callback(struct urb *urb) usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN), hf, sizeof(struct gs_host_frame), - gs_usb_recieve_bulk_callback, + gs_usb_receive_bulk_callback, usbcan ); @@ -605,7 +605,7 @@ static int gs_can_open(struct net_device *netdev) GSUSB_ENDPOINT_IN), buf, sizeof(struct gs_host_frame), - gs_usb_recieve_bulk_callback, + gs_usb_receive_bulk_callback, parent); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 7ad0a4d8e..4c483d937 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -46,13 +46,13 @@ config NET_DSA_MV88E6171 ethernet switches chips. config NET_DSA_MV88E6352 - tristate "Marvell 88E6172/88E6176/88E6352 ethernet switch chip support" + tristate "Marvell 88E6172/6176/6320/6321/6352 ethernet switch chip support" depends on NET_DSA select NET_DSA_MV88E6XXX select NET_DSA_TAG_EDSA ---help--- - This enables support for the Marvell 88E6172, 88E6176 and 88E6352 - ethernet switch chips. + This enables support for the Marvell 88E6172, 88E6176, 88E6320, + 88E6321 and 88E6352 ethernet switch chips. config NET_DSA_BCM_SF2 tristate "Broadcom Starfighter 2 Ethernet switch support" diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c index 71a29a7ce..3de2a6d73 100644 --- a/drivers/net/dsa/mv88e6123_61_65.c +++ b/drivers/net/dsa/mv88e6123_61_65.c @@ -129,6 +129,7 @@ struct dsa_switch_driver mv88e6123_61_65_switch_driver = { .get_strings = mv88e6xxx_get_strings, .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, .get_sset_count = mv88e6xxx_get_sset_count, + .adjust_link = mv88e6xxx_adjust_link, #ifdef CONFIG_NET_DSA_HWMON .get_temp = mv88e6xxx_get_temp, #endif diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 32f4a08e9..3e8386529 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -182,6 +182,7 @@ struct dsa_switch_driver mv88e6131_switch_driver = { .get_strings = mv88e6xxx_get_strings, .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, .get_sset_count = mv88e6xxx_get_sset_count, + .adjust_link = mv88e6xxx_adjust_link, }; MODULE_ALIAS("platform:mv88e6085"); diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 1c7808495..c2daaf087 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -108,6 +108,7 @@ struct dsa_switch_driver mv88e6171_switch_driver = { .get_strings = mv88e6xxx_get_strings, .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, .get_sset_count = mv88e6xxx_get_sset_count, + .adjust_link = mv88e6xxx_adjust_link, #ifdef CONFIG_NET_DSA_HWMON .get_temp = mv88e6xxx_get_temp, #endif @@ -116,9 +117,14 @@ struct dsa_switch_driver mv88e6171_switch_driver = { .port_join_bridge = mv88e6xxx_join_bridge, .port_leave_bridge = mv88e6xxx_leave_bridge, .port_stp_update = mv88e6xxx_port_stp_update, - .fdb_add = mv88e6xxx_port_fdb_add, - .fdb_del = mv88e6xxx_port_fdb_del, - .fdb_getnext = mv88e6xxx_port_fdb_getnext, + .port_pvid_get = mv88e6xxx_port_pvid_get, + .port_pvid_set = mv88e6xxx_port_pvid_set, + .port_vlan_add = mv88e6xxx_port_vlan_add, + .port_vlan_del = mv88e6xxx_port_vlan_del, + .vlan_getnext = mv88e6xxx_vlan_getnext, + .port_fdb_add = mv88e6xxx_port_fdb_add, + .port_fdb_del = mv88e6xxx_port_fdb_del, + .port_fdb_getnext = mv88e6xxx_port_fdb_getnext, }; MODULE_ALIAS("platform:mv88e6171"); diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 632815c10..1f5129c10 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -36,6 +36,18 @@ static char *mv88e6352_probe(struct device *host_dev, int sw_addr) return "Marvell 88E6172"; if ((ret & 0xfff0) == PORT_SWITCH_ID_6176) return "Marvell 88E6176"; + if (ret == PORT_SWITCH_ID_6320_A1) + return "Marvell 88E6320 (A1)"; + if (ret == PORT_SWITCH_ID_6320_A2) + return "Marvell 88e6320 (A2)"; + if ((ret & 0xfff0) == PORT_SWITCH_ID_6320) + return "Marvell 88E6320"; + if (ret == PORT_SWITCH_ID_6321_A1) + return "Marvell 88E6321 (A1)"; + if (ret == PORT_SWITCH_ID_6321_A2) + return "Marvell 88e6321 (A2)"; + if ((ret & 0xfff0) == PORT_SWITCH_ID_6321) + return "Marvell 88E6321"; if (ret == PORT_SWITCH_ID_6352_A0) return "Marvell 88E6352 (A0)"; if (ret == PORT_SWITCH_ID_6352_A1) @@ -80,66 +92,6 @@ static int mv88e6352_setup_global(struct dsa_switch *ds) return 0; } -#ifdef CONFIG_NET_DSA_HWMON - -static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp) -{ - int ret; - - *temp = 0; - - ret = mv88e6xxx_phy_page_read(ds, 0, 6, 27); - if (ret < 0) - return ret; - - *temp = (ret & 0xff) - 25; - - return 0; -} - -static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp) -{ - int ret; - - *temp = 0; - - ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26); - if (ret < 0) - return ret; - - *temp = (((ret >> 8) & 0x1f) * 5) - 25; - - return 0; -} - -static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp) -{ - int ret; - - ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26); - if (ret < 0) - return ret; - temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); - return mv88e6xxx_phy_page_write(ds, 0, 6, 26, - (ret & 0xe0ff) | (temp << 8)); -} - -static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm) -{ - int ret; - - *alarm = false; - - ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26); - if (ret < 0) - return ret; - - *alarm = !!(ret & 0x40); - - return 0; -} -#endif /* CONFIG_NET_DSA_HWMON */ - static int mv88e6352_setup(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); @@ -171,8 +123,9 @@ static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr) mutex_lock(&ps->eeprom_mutex); - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14, - 0xc000 | (addr & 0xff)); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP, + GLOBAL2_EEPROM_OP_READ | + (addr & GLOBAL2_EEPROM_OP_ADDR_MASK)); if (ret < 0) goto error; @@ -180,7 +133,7 @@ static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr) if (ret < 0) goto error; - ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x15); + ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA); error: mutex_unlock(&ps->eeprom_mutex); return ret; @@ -253,11 +206,11 @@ static int mv88e6352_eeprom_is_readonly(struct dsa_switch *ds) { int ret; - ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x14); + ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP); if (ret < 0) return ret; - if (!(ret & 0x0400)) + if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN)) return -EROFS; return 0; @@ -271,12 +224,13 @@ static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr, mutex_lock(&ps->eeprom_mutex); - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x15, data); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data); if (ret < 0) goto error; - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14, - 0xb000 | (addr & 0xff)); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP, + GLOBAL2_EEPROM_OP_WRITE | + (addr & GLOBAL2_EEPROM_OP_ADDR_MASK)); if (ret < 0) goto error; @@ -374,13 +328,14 @@ struct dsa_switch_driver mv88e6352_switch_driver = { .get_strings = mv88e6xxx_get_strings, .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, .get_sset_count = mv88e6xxx_get_sset_count, + .adjust_link = mv88e6xxx_adjust_link, .set_eee = mv88e6xxx_set_eee, .get_eee = mv88e6xxx_get_eee, #ifdef CONFIG_NET_DSA_HWMON - .get_temp = mv88e6352_get_temp, - .get_temp_limit = mv88e6352_get_temp_limit, - .set_temp_limit = mv88e6352_set_temp_limit, - .get_temp_alarm = mv88e6352_get_temp_alarm, + .get_temp = mv88e6xxx_get_temp, + .get_temp_limit = mv88e6xxx_get_temp_limit, + .set_temp_limit = mv88e6xxx_set_temp_limit, + .get_temp_alarm = mv88e6xxx_get_temp_alarm, #endif .get_eeprom = mv88e6352_get_eeprom, .set_eeprom = mv88e6352_set_eeprom, @@ -389,10 +344,18 @@ struct dsa_switch_driver mv88e6352_switch_driver = { .port_join_bridge = mv88e6xxx_join_bridge, .port_leave_bridge = mv88e6xxx_leave_bridge, .port_stp_update = mv88e6xxx_port_stp_update, - .fdb_add = mv88e6xxx_port_fdb_add, - .fdb_del = mv88e6xxx_port_fdb_del, - .fdb_getnext = mv88e6xxx_port_fdb_getnext, + .port_pvid_get = mv88e6xxx_port_pvid_get, + .port_pvid_set = mv88e6xxx_port_pvid_set, + .port_vlan_add = mv88e6xxx_port_vlan_add, + .port_vlan_del = mv88e6xxx_port_vlan_del, + .vlan_getnext = mv88e6xxx_vlan_getnext, + .port_fdb_add = mv88e6xxx_port_fdb_add, + .port_fdb_del = mv88e6xxx_port_fdb_del, + .port_fdb_getnext = mv88e6xxx_port_fdb_getnext, }; -MODULE_ALIAS("platform:mv88e6352"); MODULE_ALIAS("platform:mv88e6172"); +MODULE_ALIAS("platform:mv88e6176"); +MODULE_ALIAS("platform:mv88e6320"); +MODULE_ALIAS("platform:mv88e6321"); +MODULE_ALIAS("platform:mv88e6352"); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 26ec2fbfa..1f7dd927c 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2,6 +2,9 @@ * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support * Copyright (c) 2008 Marvell Semiconductor * + * Copyright (c) 2015 CMC Electronics, Inc. + * Added support for VLAN Table Unit operations + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -11,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -391,6 +395,7 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds) for (i = 0; i < DSA_MAX_PORTS; i++) { struct net_device *dev; int uninitialized_var(port_status); + int pcs_ctrl; int link; int speed; int duplex; @@ -400,6 +405,10 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds) if (dev == NULL) continue; + pcs_ctrl = mv88e6xxx_reg_read(ds, REG_PORT(i), PORT_PCS_CTRL); + if (pcs_ctrl < 0 || pcs_ctrl & PORT_PCS_CTRL_FORCE_LINK) + continue; + link = 0; if (dev->flags & IFF_UP) { port_status = mv88e6xxx_reg_read(ds, REG_PORT(i), @@ -517,6 +526,18 @@ static bool mv88e6xxx_6185_family(struct dsa_switch *ds) return false; } +static bool mv88e6xxx_6320_family(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + + switch (ps->id) { + case PORT_SWITCH_ID_6320: + case PORT_SWITCH_ID_6321: + return true; + } + return false; +} + static bool mv88e6xxx_6351_family(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); @@ -545,6 +566,73 @@ static bool mv88e6xxx_6352_family(struct dsa_switch *ds) return false; } +/* We expect the switch to perform auto negotiation if there is a real + * phy. However, in the case of a fixed link phy, we force the port + * settings from the fixed link settings. + */ +void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + u32 ret, reg; + + if (!phy_is_pseudo_fixed_link(phydev)) + return; + + mutex_lock(&ps->smi_mutex); + + ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL); + if (ret < 0) + goto out; + + reg = ret & ~(PORT_PCS_CTRL_LINK_UP | + PORT_PCS_CTRL_FORCE_LINK | + PORT_PCS_CTRL_DUPLEX_FULL | + PORT_PCS_CTRL_FORCE_DUPLEX | + PORT_PCS_CTRL_UNFORCED); + + reg |= PORT_PCS_CTRL_FORCE_LINK; + if (phydev->link) + reg |= PORT_PCS_CTRL_LINK_UP; + + if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100) + goto out; + + switch (phydev->speed) { + case SPEED_1000: + reg |= PORT_PCS_CTRL_1000; + break; + case SPEED_100: + reg |= PORT_PCS_CTRL_100; + break; + case SPEED_10: + reg |= PORT_PCS_CTRL_10; + break; + default: + pr_info("Unknown speed"); + goto out; + } + + reg |= PORT_PCS_CTRL_FORCE_DUPLEX; + if (phydev->duplex == DUPLEX_FULL) + reg |= PORT_PCS_CTRL_DUPLEX_FULL; + + if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) && + (port >= ps->num_ports - 2)) { + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK; + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK; + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) + reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK | + PORT_PCS_CTRL_RGMII_DELAY_TXCLK); + } + _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg); + +out: + mutex_unlock(&ps->smi_mutex); +} + /* Must be called with SMI mutex held */ static int _mv88e6xxx_stats_wait(struct dsa_switch *ds) { @@ -565,7 +653,7 @@ static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port) { int ret; - if (mv88e6xxx_6352_family(ds)) + if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds)) port = (port + 1) << 5; /* Snapshot the hardware statistics counters for this port. */ @@ -796,54 +884,6 @@ void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, } } -#ifdef CONFIG_NET_DSA_HWMON - -int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret; - int val; - - *temp = 0; - - mutex_lock(&ps->smi_mutex); - - ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6); - if (ret < 0) - goto error; - - /* Enable temperature sensor */ - ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a); - if (ret < 0) - goto error; - - ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5)); - if (ret < 0) - goto error; - - /* Wait for temperature to stabilize */ - usleep_range(10000, 12000); - - val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a); - if (val < 0) { - ret = val; - goto error; - } - - /* Disable temperature sensor */ - ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5)); - if (ret < 0) - goto error; - - *temp = ((val & 0x1f) - 5) * 5; - -error: - _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0); - mutex_unlock(&ps->smi_mutex); - return ret; -} -#endif /* CONFIG_NET_DSA_HWMON */ - /* Must be called with SMI lock held */ static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask) @@ -1000,7 +1040,7 @@ static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd) { int ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x01, fid); + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid); if (ret < 0) return ret; @@ -1127,7 +1167,7 @@ int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask) ps->bridge_mask[fid] = br_port_mask; if (fid != ps->fid[port]) { - ps->fid_mask |= 1 << ps->fid[port]; + clear_bit(ps->fid[port], ps->fid_bitmap); ps->fid[port] = fid; ret = _mv88e6xxx_update_bridge_config(ds, fid); } @@ -1161,9 +1201,16 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask) mutex_lock(&ps->smi_mutex); - newfid = __ffs(ps->fid_mask); + newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1); + if (unlikely(newfid > ps->num_ports)) { + netdev_err(ds->ports[port], "all first %d FIDs are used\n", + ps->num_ports); + ret = -ENOSPC; + goto unlock; + } + ps->fid[port] = newfid; - ps->fid_mask &= ~(1 << newfid); + set_bit(newfid, ps->fid_bitmap); ps->bridge_mask[fid] &= ~(1 << port); ps->bridge_mask[newfid] = 1 << port; @@ -1171,6 +1218,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask) if (!ret) ret = _mv88e6xxx_update_bridge_config(ds, newfid); +unlock: mutex_unlock(&ps->smi_mutex); return ret; @@ -1210,183 +1258,748 @@ int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state) return 0; } -static int __mv88e6xxx_write_addr(struct dsa_switch *ds, - const unsigned char *addr) +int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid) { - int i, ret; + int ret; - for (i = 0; i < 3; i++) { - ret = _mv88e6xxx_reg_write( - ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i, - (addr[i * 2] << 8) | addr[i * 2 + 1]); + ret = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN); + if (ret < 0) + return ret; + + *pvid = ret & PORT_DEFAULT_VLAN_MASK; + + return 0; +} + +int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid) +{ + return mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN, + pvid & PORT_DEFAULT_VLAN_MASK); +} + +static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds) +{ + return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP, + GLOBAL_VTU_OP_BUSY); +} + +static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op) +{ + int ret; + + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op); + if (ret < 0) + return ret; + + return _mv88e6xxx_vtu_wait(ds); +} + +static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds) +{ + int ret; + + ret = _mv88e6xxx_vtu_wait(ds); + if (ret < 0) + return ret; + + return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL); +} + +static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds, + struct mv88e6xxx_vtu_stu_entry *entry, + unsigned int nibble_offset) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + u16 regs[3]; + int i; + int ret; + + for (i = 0; i < 3; ++i) { + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, + GLOBAL_VTU_DATA_0_3 + i); if (ret < 0) return ret; + + regs[i] = ret; + } + + for (i = 0; i < ps->num_ports; ++i) { + unsigned int shift = (i % 4) * 4 + nibble_offset; + u16 reg = regs[i / 4]; + + entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK; } return 0; } -static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr) +static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds, + struct mv88e6xxx_vtu_stu_entry *entry, + unsigned int nibble_offset) { - int i, ret; + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + u16 regs[3] = { 0 }; + int i; + int ret; - for (i = 0; i < 3; i++) { - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, - GLOBAL_ATU_MAC_01 + i); + for (i = 0; i < ps->num_ports; ++i) { + unsigned int shift = (i % 4) * 4 + nibble_offset; + u8 data = entry->data[i]; + + regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift; + } + + for (i = 0; i < 3; ++i) { + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, + GLOBAL_VTU_DATA_0_3 + i, regs[i]); if (ret < 0) return ret; - addr[i * 2] = ret >> 8; - addr[i * 2 + 1] = ret & 0xff; } return 0; } -static int __mv88e6xxx_port_fdb_cmd(struct dsa_switch *ds, int port, - const unsigned char *addr, int state) +static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid, + struct mv88e6xxx_vtu_stu_entry *entry) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - u8 fid = ps->fid[port]; + struct mv88e6xxx_vtu_stu_entry next = { 0 }; int ret; - ret = _mv88e6xxx_atu_wait(ds); + ret = _mv88e6xxx_vtu_wait(ds); if (ret < 0) return ret; - ret = __mv88e6xxx_write_addr(ds, addr); + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, + vid & GLOBAL_VTU_VID_MASK); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, - (0x10 << port) | state); - if (ret) + ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT); + if (ret < 0) return ret; - ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_LOAD_DB); + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID); + if (ret < 0) + return ret; - return ret; -} + next.vid = ret & GLOBAL_VTU_VID_MASK; + next.valid = !!(ret & GLOBAL_VTU_VID_VALID); -int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) -{ - int state = is_multicast_ether_addr(addr) ? - GLOBAL_ATU_DATA_STATE_MC_STATIC : - GLOBAL_ATU_DATA_STATE_UC_STATIC; - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret; + if (next.valid) { + ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0); + if (ret < 0) + return ret; - mutex_lock(&ps->smi_mutex); - ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, state); - mutex_unlock(&ps->smi_mutex); + if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || + mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) { + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, + GLOBAL_VTU_FID); + if (ret < 0) + return ret; - return ret; -} + next.fid = ret & GLOBAL_VTU_FID_MASK; -int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret; + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, + GLOBAL_VTU_SID); + if (ret < 0) + return ret; - mutex_lock(&ps->smi_mutex); - ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, - GLOBAL_ATU_DATA_STATE_UNUSED); - mutex_unlock(&ps->smi_mutex); + next.sid = ret & GLOBAL_VTU_SID_MASK; + } + } - return ret; + *entry = next; + return 0; } -static int __mv88e6xxx_port_getnext(struct dsa_switch *ds, int port, - unsigned char *addr, bool *is_static) +static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds, + struct mv88e6xxx_vtu_stu_entry *entry) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - u8 fid = ps->fid[port]; - int ret, state; + u16 reg = 0; + int ret; - ret = _mv88e6xxx_atu_wait(ds); + ret = _mv88e6xxx_vtu_wait(ds); if (ret < 0) return ret; - ret = __mv88e6xxx_write_addr(ds, addr); + if (!entry->valid) + goto loadpurge; + + /* Write port member tags */ + ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0); if (ret < 0) return ret; - do { - ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB); + if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || + mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) { + reg = entry->sid & GLOBAL_VTU_SID_MASK; + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA); + reg = entry->fid & GLOBAL_VTU_FID_MASK; + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg); if (ret < 0) return ret; - state = ret & GLOBAL_ATU_DATA_STATE_MASK; - if (state == GLOBAL_ATU_DATA_STATE_UNUSED) - return -ENOENT; - } while (!(((ret >> 4) & 0xff) & (1 << port))); + } - ret = __mv88e6xxx_read_addr(ds, addr); + reg = GLOBAL_VTU_VID_VALID; +loadpurge: + reg |= entry->vid & GLOBAL_VTU_VID_MASK; + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg); if (ret < 0) return ret; - *is_static = state == (is_multicast_ether_addr(addr) ? - GLOBAL_ATU_DATA_STATE_MC_STATIC : - GLOBAL_ATU_DATA_STATE_UC_STATIC); - - return 0; + return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE); } -/* get next entry for port */ -int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port, - unsigned char *addr, bool *is_static) +static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid, + struct mv88e6xxx_vtu_stu_entry *entry) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry next = { 0 }; int ret; - mutex_lock(&ps->smi_mutex); - ret = __mv88e6xxx_port_getnext(ds, port, addr, is_static); - mutex_unlock(&ps->smi_mutex); + ret = _mv88e6xxx_vtu_wait(ds); + if (ret < 0) + return ret; - return ret; + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, + sid & GLOBAL_VTU_SID_MASK); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID); + if (ret < 0) + return ret; + + next.sid = ret & GLOBAL_VTU_SID_MASK; + + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID); + if (ret < 0) + return ret; + + next.valid = !!(ret & GLOBAL_VTU_VID_VALID); + + if (next.valid) { + ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2); + if (ret < 0) + return ret; + } + + *entry = next; + return 0; } -static void mv88e6xxx_bridge_work(struct work_struct *work) +static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds, + struct mv88e6xxx_vtu_stu_entry *entry) { - struct mv88e6xxx_priv_state *ps; - struct dsa_switch *ds; - int port; + u16 reg = 0; + int ret; - ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work); - ds = ((struct dsa_switch *)ps) - 1; + ret = _mv88e6xxx_vtu_wait(ds); + if (ret < 0) + return ret; - while (ps->port_state_update_mask) { - port = __ffs(ps->port_state_update_mask); - clear_bit(port, &ps->port_state_update_mask); - mv88e6xxx_set_port_state(ds, port, ps->port_state[port]); - } + if (!entry->valid) + goto loadpurge; + + /* Write port states */ + ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2); + if (ret < 0) + return ret; + + reg = GLOBAL_VTU_VID_VALID; +loadpurge: + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg); + if (ret < 0) + return ret; + + reg = entry->sid & GLOBAL_VTU_SID_MASK; + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg); + if (ret < 0) + return ret; + + return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE); } -static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) +static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid, + struct mv88e6xxx_vtu_stu_entry *entry) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret, fid; - u16 reg; + struct mv88e6xxx_vtu_stu_entry vlan = { + .valid = true, + .vid = vid, + }; + int i; - mutex_lock(&ps->smi_mutex); + /* exclude all ports except the CPU */ + for (i = 0; i < ps->num_ports; ++i) + vlan.data[i] = dsa_is_cpu_port(ds, i) ? + GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED : + GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; - if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) || - mv88e6xxx_6065_family(ds)) { - /* MAC Forcing register: don't force link, speed, - * duplex or flow control state to any particular - * values on physical ports, but force the CPU port + if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || + mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) { + struct mv88e6xxx_vtu_stu_entry vstp; + int err; + + /* Adding a VTU entry requires a valid STU entry. As VSTP is not + * implemented, only one STU entry is needed to cover all VTU + * entries. Thus, validate the SID 0. + */ + vlan.sid = 0; + err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp); + if (err) + return err; + + if (vstp.sid != vlan.sid || !vstp.valid) { + memset(&vstp, 0, sizeof(vstp)); + vstp.valid = true; + vstp.sid = vlan.sid; + + err = _mv88e6xxx_stu_loadpurge(ds, &vstp); + if (err) + return err; + } + + /* Non-bridged ports and bridge groups use FIDs from 1 to + * num_ports; VLANs use FIDs from num_ports+1 to 4095. + */ + vlan.fid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, + ps->num_ports + 1); + if (unlikely(vlan.fid == VLAN_N_VID)) { + pr_err("no more FID available for VLAN %d\n", vid); + return -ENOSPC; + } + + err = _mv88e6xxx_flush_fid(ds, vlan.fid); + if (err) + return err; + + set_bit(vlan.fid, ps->fid_bitmap); + } + + *entry = vlan; + return 0; +} + +int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid, + bool untagged) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry vlan; + int err; + + mutex_lock(&ps->smi_mutex); + err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan); + if (err) + goto unlock; + + if (vlan.vid != vid || !vlan.valid) { + err = _mv88e6xxx_vlan_init(ds, vid, &vlan); + if (err) + goto unlock; + } + + vlan.data[port] = untagged ? + GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED : + GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED; + + err = _mv88e6xxx_vtu_loadpurge(ds, &vlan); +unlock: + mutex_unlock(&ps->smi_mutex); + + return err; +} + +int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry vlan; + bool keep = false; + int i, err; + + mutex_lock(&ps->smi_mutex); + + err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan); + if (err) + goto unlock; + + if (vlan.vid != vid || !vlan.valid || + vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) { + err = -ENOENT; + goto unlock; + } + + vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; + + /* keep the VLAN unless all ports are excluded */ + for (i = 0; i < ps->num_ports; ++i) { + if (dsa_is_cpu_port(ds, i)) + continue; + + if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) { + keep = true; + break; + } + } + + vlan.valid = keep; + err = _mv88e6xxx_vtu_loadpurge(ds, &vlan); + if (err) + goto unlock; + + if (!keep) + clear_bit(vlan.fid, ps->fid_bitmap); + +unlock: + mutex_unlock(&ps->smi_mutex); + + return err; +} + +static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch *ds, int port, u16 vid, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + int err; + + do { + if (vid == 4095) + return -ENOENT; + + err = _mv88e6xxx_vtu_getnext(ds, vid, entry); + if (err) + return err; + + if (!entry->valid) + return -ENOENT; + + vid = entry->vid; + } while (entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED && + entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED); + + return 0; +} + +int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid, + unsigned long *ports, unsigned long *untagged) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry next; + int port; + int err; + + if (*vid == 4095) + return -ENOENT; + + mutex_lock(&ps->smi_mutex); + err = _mv88e6xxx_vtu_getnext(ds, *vid, &next); + mutex_unlock(&ps->smi_mutex); + + if (err) + return err; + + if (!next.valid) + return -ENOENT; + + *vid = next.vid; + + for (port = 0; port < ps->num_ports; ++port) { + clear_bit(port, ports); + clear_bit(port, untagged); + + if (dsa_is_cpu_port(ds, port)) + continue; + + if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED || + next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED) + set_bit(port, ports); + + if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED) + set_bit(port, untagged); + } + + return 0; +} + +static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds, + const unsigned char *addr) +{ + int i, ret; + + for (i = 0; i < 3; i++) { + ret = _mv88e6xxx_reg_write( + ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i, + (addr[i * 2] << 8) | addr[i * 2 + 1]); + if (ret < 0) + return ret; + } + + return 0; +} + +static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr) +{ + int i, ret; + + for (i = 0; i < 3; i++) { + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, + GLOBAL_ATU_MAC_01 + i); + if (ret < 0) + return ret; + addr[i * 2] = ret >> 8; + addr[i * 2 + 1] = ret & 0xff; + } + + return 0; +} + +static int _mv88e6xxx_atu_load(struct dsa_switch *ds, + struct mv88e6xxx_atu_entry *entry) +{ + u16 reg = 0; + int ret; + + ret = _mv88e6xxx_atu_wait(ds); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_atu_mac_write(ds, entry->mac); + if (ret < 0) + return ret; + + if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) { + unsigned int mask, shift; + + if (entry->trunk) { + reg |= GLOBAL_ATU_DATA_TRUNK; + mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK; + shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT; + } else { + mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK; + shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT; + } + + reg |= (entry->portv_trunkid << shift) & mask; + } + + reg |= entry->state & GLOBAL_ATU_DATA_STATE_MASK; + + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, reg); + if (ret < 0) + return ret; + + return _mv88e6xxx_atu_cmd(ds, entry->fid, GLOBAL_ATU_OP_LOAD_DB); +} + +static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry vlan; + int err; + + if (vid == 0) + return ps->fid[port]; + + err = _mv88e6xxx_port_vtu_getnext(ds, port, vid - 1, &vlan); + if (err) + return err; + + if (vlan.vid == vid) + return vlan.fid; + + return -ENOENT; +} + +static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + u8 state) +{ + struct mv88e6xxx_atu_entry entry = { 0 }; + int ret; + + ret = _mv88e6xxx_port_vid_to_fid(ds, port, vid); + if (ret < 0) + return ret; + + entry.fid = ret; + entry.state = state; + ether_addr_copy(entry.mac, addr); + if (state != GLOBAL_ATU_DATA_STATE_UNUSED) { + entry.trunk = false; + entry.portv_trunkid = BIT(port); + } + + return _mv88e6xxx_atu_load(ds, &entry); +} + +int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) +{ + int state = is_multicast_ether_addr(addr) ? + GLOBAL_ATU_DATA_STATE_MC_STATIC : + GLOBAL_ATU_DATA_STATE_UC_STATIC; + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + + mutex_lock(&ps->smi_mutex); + ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid, state); + mutex_unlock(&ps->smi_mutex); + + return ret; +} + +int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + + mutex_lock(&ps->smi_mutex); + ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid, + GLOBAL_ATU_DATA_STATE_UNUSED); + mutex_unlock(&ps->smi_mutex); + + return ret; +} + +static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid, + const unsigned char *addr, + struct mv88e6xxx_atu_entry *entry) +{ + struct mv88e6xxx_atu_entry next = { 0 }; + int ret; + + next.fid = fid; + + ret = _mv88e6xxx_atu_wait(ds); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_atu_mac_write(ds, addr); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_atu_mac_read(ds, next.mac); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA); + if (ret < 0) + return ret; + + next.state = ret & GLOBAL_ATU_DATA_STATE_MASK; + if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) { + unsigned int mask, shift; + + if (ret & GLOBAL_ATU_DATA_TRUNK) { + next.trunk = true; + mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK; + shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT; + } else { + next.trunk = false; + mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK; + shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT; + } + + next.portv_trunkid = (ret & mask) >> shift; + } + + *entry = next; + return 0; +} + +/* get next entry for port */ +int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port, + unsigned char *addr, u16 *vid, bool *is_static) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_atu_entry next; + u16 fid; + int ret; + + mutex_lock(&ps->smi_mutex); + + ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid); + if (ret < 0) + goto unlock; + fid = ret; + + do { + if (is_broadcast_ether_addr(addr)) { + struct mv88e6xxx_vtu_stu_entry vtu; + + ret = _mv88e6xxx_port_vtu_getnext(ds, port, *vid, &vtu); + if (ret < 0) + goto unlock; + + *vid = vtu.vid; + fid = vtu.fid; + } + + ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next); + if (ret < 0) + goto unlock; + + ether_addr_copy(addr, next.mac); + + if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED) + continue; + } while (next.trunk || (next.portv_trunkid & BIT(port)) == 0); + + *is_static = next.state == (is_multicast_ether_addr(addr) ? + GLOBAL_ATU_DATA_STATE_MC_STATIC : + GLOBAL_ATU_DATA_STATE_UC_STATIC); +unlock: + mutex_unlock(&ps->smi_mutex); + + return ret; +} + +static void mv88e6xxx_bridge_work(struct work_struct *work) +{ + struct mv88e6xxx_priv_state *ps; + struct dsa_switch *ds; + int port; + + ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work); + ds = ((struct dsa_switch *)ps) - 1; + + while (ps->port_state_update_mask) { + port = __ffs(ps->port_state_update_mask); + clear_bit(port, &ps->port_state_update_mask); + mv88e6xxx_set_port_state(ds, port, ps->port_state[port]); + } +} + +static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret, fid; + u16 reg; + + mutex_lock(&ps->smi_mutex); + + if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || + mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || + mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) || + mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) { + /* MAC Forcing register: don't force link, speed, + * duplex or flow control state to any particular + * values on physical ports, but force the CPU port * and all DSA ports to their maximum bandwidth and * full duplex. */ reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL); - if (dsa_is_cpu_port(ds, port) || - ds->dsa_port_mask & (1 << port)) { + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { reg &= ~PORT_PCS_CTRL_UNFORCED; reg |= PORT_PCS_CTRL_FORCE_LINK | PORT_PCS_CTRL_LINK_UP | @@ -1424,7 +2037,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) || - mv88e6xxx_6185_family(ds)) + mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) reg = PORT_CONTROL_IGMP_MLD_SNOOP | PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP | PORT_CONTROL_STATE_FORWARDING; @@ -1432,26 +2045,33 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) reg |= PORT_CONTROL_DSA_TAG; if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) { + mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || + mv88e6xxx_6320_family(ds)) { if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA) reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA; else reg |= PORT_CONTROL_FRAME_MODE_DSA; + reg |= PORT_CONTROL_FORWARD_UNKNOWN | + PORT_CONTROL_FORWARD_UNKNOWN_MC; } if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) || - mv88e6xxx_6185_family(ds)) { + mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) { if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA) reg |= PORT_CONTROL_EGRESS_ADD_TAG; } } - if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds)) { - if (ds->dsa_port_mask & (1 << port)) + if (dsa_is_dsa_port(ds, port)) { + if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) + reg |= PORT_CONTROL_DSA_TAG; + if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || + mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || + mv88e6xxx_6320_family(ds)) { reg |= PORT_CONTROL_FRAME_MODE_DSA; + } + if (port == dsa_upstream_port(ds)) reg |= PORT_CONTROL_FORWARD_UNKNOWN | PORT_CONTROL_FORWARD_UNKNOWN_MC; @@ -1463,22 +2083,20 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) goto abort; } - /* Port Control 2: don't force a good FCS, set the maximum - * frame size to 10240 bytes, don't let the switch add or - * strip 802.1q tags, don't discard tagged or untagged frames - * on this port, do a destination address lookup on all - * received packets as usual, disable ARP mirroring and don't - * send a copy of all transmitted/received frames on this port - * to the CPU. + /* Port Control 2: don't force a good FCS, set the maximum frame size to + * 10240 bytes, enable secure 802.1q tags, don't discard tagged or + * untagged frames on this port, do a destination address lookup on all + * received packets as usual, disable ARP mirroring and don't send a + * copy of all transmitted/received frames on this port to the CPU. */ reg = 0; if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6095_family(ds)) + mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds)) reg = PORT_CONTROL_2_MAP_DA; if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds)) + mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds)) reg |= PORT_CONTROL_2_JUMBO_10240; if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) { @@ -1491,6 +2109,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) reg |= PORT_CONTROL_2_FORWARD_UNKNOWN; } + reg |= PORT_CONTROL_2_8021Q_FALLBACK; + if (reg) { ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_2, reg); @@ -1515,7 +2135,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) goto abort; if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) { + mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || + mv88e6xxx_6320_family(ds)) { /* Do not limit the period of time that this port can * be paused for by the remote end or the period of * time that this port can pause the remote end. @@ -1565,7 +2186,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) { + mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) || + mv88e6xxx_6320_family(ds)) { /* Rate Control: disable ingress rate limiting. */ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL, 0x0001); @@ -1585,9 +2207,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) * ports, and allow each of the 'real' ports to only talk to * the upstream port. */ - fid = __ffs(ps->fid_mask); + fid = port + 1; ps->fid[port] = fid; - ps->fid_mask &= ~(1 << fid); + set_bit(fid, ps->fid_bitmap); if (!dsa_is_cpu_port(ds, port)) ps->bridge_mask[fid] = 1 << port; @@ -1684,7 +2306,7 @@ static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds, unsigned char addr[6]; int ret, data, state; - ret = __mv88e6xxx_write_addr(ds, bcast); + ret = _mv88e6xxx_atu_mac_write(ds, bcast); if (ret < 0) return ret; @@ -1699,7 +2321,7 @@ static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds, state = data & GLOBAL_ATU_DATA_STATE_MASK; if (state == GLOBAL_ATU_DATA_STATE_UNUSED) break; - ret = __mv88e6xxx_read_addr(ds, addr); + ret = _mv88e6xxx_atu_mac_read(ds, addr); if (ret < 0) return ret; mv88e6xxx_atu_show_entry(s, dbnum, addr, data); @@ -1886,8 +2508,6 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds) ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0; - ps->fid_mask = (1 << DSA_MAX_PORTS) - 1; - INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work); name = kasprintf(GFP_KERNEL, "dsa%d", ds->index); @@ -1914,6 +2534,7 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds) int mv88e6xxx_setup_global(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; int i; /* Set the default address aging time to 5 minutes, and @@ -1977,7 +2598,8 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT)); if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) { + mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || + mv88e6xxx_6320_family(ds)) { /* Send all frames with destination addresses matching * 01:80:c2:00:00:2x to the CPU port. */ @@ -1996,7 +2618,8 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) { + mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) || + mv88e6xxx_6320_family(ds)) { /* Disable ingress rate limiting by resetting all * ingress rate limit registers to their initial * state. @@ -2010,9 +2633,17 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL); /* Wait for the flush to complete. */ - _mv88e6xxx_stats_wait(ds); + mutex_lock(&ps->smi_mutex); + ret = _mv88e6xxx_stats_wait(ds); + if (ret < 0) + goto unlock; - return 0; + /* Clear all the VTU and STU entries */ + ret = _mv88e6xxx_vtu_stu_flush(ds); +unlock: + mutex_unlock(&ps->smi_mutex); + + return ret; } int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active) @@ -2163,6 +2794,132 @@ mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum, return ret; } +#ifdef CONFIG_NET_DSA_HWMON + +static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + int val; + + *temp = 0; + + mutex_lock(&ps->smi_mutex); + + ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6); + if (ret < 0) + goto error; + + /* Enable temperature sensor */ + ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a); + if (ret < 0) + goto error; + + ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5)); + if (ret < 0) + goto error; + + /* Wait for temperature to stabilize */ + usleep_range(10000, 12000); + + val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a); + if (val < 0) { + ret = val; + goto error; + } + + /* Disable temperature sensor */ + ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5)); + if (ret < 0) + goto error; + + *temp = ((val & 0x1f) - 5) * 5; + +error: + _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0); + mutex_unlock(&ps->smi_mutex); + return ret; +} + +static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp) +{ + int phy = mv88e6xxx_6320_family(ds) ? 3 : 0; + int ret; + + *temp = 0; + + ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27); + if (ret < 0) + return ret; + + *temp = (ret & 0xff) - 25; + + return 0; +} + +int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) +{ + if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds)) + return mv88e63xx_get_temp(ds, temp); + + return mv88e61xx_get_temp(ds, temp); +} + +int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) +{ + int phy = mv88e6xxx_6320_family(ds) ? 3 : 0; + int ret; + + if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds)) + return -EOPNOTSUPP; + + *temp = 0; + + ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26); + if (ret < 0) + return ret; + + *temp = (((ret >> 8) & 0x1f) * 5) - 25; + + return 0; +} + +int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) +{ + int phy = mv88e6xxx_6320_family(ds) ? 3 : 0; + int ret; + + if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds)) + return -EOPNOTSUPP; + + ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26); + if (ret < 0) + return ret; + temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); + return mv88e6xxx_phy_page_write(ds, phy, 6, 26, + (ret & 0xe0ff) | (temp << 8)); +} + +int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) +{ + int phy = mv88e6xxx_6320_family(ds) ? 3 : 0; + int ret; + + if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds)) + return -EOPNOTSUPP; + + *alarm = false; + + ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26); + if (ret < 0) + return ret; + + *alarm = !!(ret & 0x40); + + return 0; +} +#endif /* CONFIG_NET_DSA_HWMON */ + static int __init mv88e6xxx_init(void) { #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index a650b2656..9b6f3d9d5 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -11,6 +11,8 @@ #ifndef __MV88E6XXX_H #define __MV88E6XXX_H +#include + #ifndef UINT64_MAX #define UINT64_MAX (u64)(~((u64)0)) #endif @@ -44,6 +46,8 @@ #define PORT_STATUS_TX_PAUSED BIT(5) #define PORT_STATUS_FLOW_CTRL BIT(4) #define PORT_PCS_CTRL 0x01 +#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15) +#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14) #define PORT_PCS_CTRL_FC BIT(7) #define PORT_PCS_CTRL_FORCE_FC BIT(6) #define PORT_PCS_CTRL_LINK_UP BIT(5) @@ -89,7 +93,12 @@ #define PORT_SWITCH_ID_6182 0x1a60 #define PORT_SWITCH_ID_6185 0x1a70 #define PORT_SWITCH_ID_6240 0x2400 -#define PORT_SWITCH_ID_6320 0x1250 +#define PORT_SWITCH_ID_6320 0x1150 +#define PORT_SWITCH_ID_6320_A1 0x1151 +#define PORT_SWITCH_ID_6320_A2 0x1152 +#define PORT_SWITCH_ID_6321 0x3100 +#define PORT_SWITCH_ID_6321_A1 0x3101 +#define PORT_SWITCH_ID_6321_A2 0x3102 #define PORT_SWITCH_ID_6350 0x3710 #define PORT_SWITCH_ID_6351 0x3750 #define PORT_SWITCH_ID_6352 0x3520 @@ -124,6 +133,7 @@ #define PORT_CONTROL_1 0x05 #define PORT_BASE_VLAN 0x06 #define PORT_DEFAULT_VLAN 0x07 +#define PORT_DEFAULT_VLAN_MASK 0xfff #define PORT_CONTROL_2 0x08 #define PORT_CONTROL_2_IGNORE_FCS BIT(15) #define PORT_CONTROL_2_VTU_PRI_OVERRIDE BIT(14) @@ -132,6 +142,11 @@ #define PORT_CONTROL_2_JUMBO_1522 (0x00 << 12) #define PORT_CONTROL_2_JUMBO_2048 (0x01 << 12) #define PORT_CONTROL_2_JUMBO_10240 (0x02 << 12) +#define PORT_CONTROL_2_8021Q_MASK (0x03 << 10) +#define PORT_CONTROL_2_8021Q_DISABLED (0x00 << 10) +#define PORT_CONTROL_2_8021Q_FALLBACK (0x01 << 10) +#define PORT_CONTROL_2_8021Q_CHECK (0x02 << 10) +#define PORT_CONTROL_2_8021Q_SECURE (0x03 << 10) #define PORT_CONTROL_2_DISCARD_TAGGED BIT(9) #define PORT_CONTROL_2_DISCARD_UNTAGGED BIT(8) #define PORT_CONTROL_2_MAP_DA BIT(7) @@ -164,6 +179,11 @@ #define GLOBAL_MAC_01 0x01 #define GLOBAL_MAC_23 0x02 #define GLOBAL_MAC_45 0x03 +#define GLOBAL_ATU_FID 0x01 /* 6097 6165 6351 6352 */ +#define GLOBAL_VTU_FID 0x02 /* 6097 6165 6351 6352 */ +#define GLOBAL_VTU_FID_MASK 0xfff +#define GLOBAL_VTU_SID 0x03 /* 6097 6165 6351 6352 */ +#define GLOBAL_VTU_SID_MASK 0x3f #define GLOBAL_CONTROL 0x04 #define GLOBAL_CONTROL_SW_RESET BIT(15) #define GLOBAL_CONTROL_PPU_ENABLE BIT(14) @@ -180,10 +200,27 @@ #define GLOBAL_CONTROL_TCAM_EN BIT(1) #define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0) #define GLOBAL_VTU_OP 0x05 +#define GLOBAL_VTU_OP_BUSY BIT(15) +#define GLOBAL_VTU_OP_FLUSH_ALL ((0x01 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_OP_VTU_LOAD_PURGE ((0x03 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_OP_VTU_GET_NEXT ((0x04 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_OP_STU_LOAD_PURGE ((0x05 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_OP_STU_GET_NEXT ((0x06 << 12) | GLOBAL_VTU_OP_BUSY) #define GLOBAL_VTU_VID 0x06 +#define GLOBAL_VTU_VID_MASK 0xfff +#define GLOBAL_VTU_VID_VALID BIT(12) #define GLOBAL_VTU_DATA_0_3 0x07 #define GLOBAL_VTU_DATA_4_7 0x08 #define GLOBAL_VTU_DATA_8_11 0x09 +#define GLOBAL_VTU_STU_DATA_MASK 0x03 +#define GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED 0x00 +#define GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED 0x01 +#define GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED 0x02 +#define GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER 0x03 +#define GLOBAL_STU_DATA_PORT_STATE_DISABLED 0x00 +#define GLOBAL_STU_DATA_PORT_STATE_BLOCKING 0x01 +#define GLOBAL_STU_DATA_PORT_STATE_LEARNING 0x02 +#define GLOBAL_STU_DATA_PORT_STATE_FORWARDING 0x03 #define GLOBAL_ATU_CONTROL 0x0a #define GLOBAL_ATU_CONTROL_LEARN2ALL BIT(3) #define GLOBAL_ATU_OP 0x0b @@ -198,6 +235,8 @@ #define GLOBAL_ATU_OP_GET_CLR_VIOLATION ((7 << 12) | GLOBAL_ATU_OP_BUSY) #define GLOBAL_ATU_DATA 0x0c #define GLOBAL_ATU_DATA_TRUNK BIT(15) +#define GLOBAL_ATU_DATA_TRUNK_ID_MASK 0x00f0 +#define GLOBAL_ATU_DATA_TRUNK_ID_SHIFT 4 #define GLOBAL_ATU_DATA_PORT_VECTOR_MASK 0x3ff0 #define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT 4 #define GLOBAL_ATU_DATA_STATE_MASK 0x0f @@ -280,8 +319,12 @@ #define GLOBAL2_PRIO_OVERRIDE_FORCE_ARP BIT(3) #define GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT 0 #define GLOBAL2_EEPROM_OP 0x14 -#define GLOBAL2_EEPROM_OP_BUSY BIT(15) -#define GLOBAL2_EEPROM_OP_LOAD BIT(11) +#define GLOBAL2_EEPROM_OP_BUSY BIT(15) +#define GLOBAL2_EEPROM_OP_WRITE ((3 << 12) | GLOBAL2_EEPROM_OP_BUSY) +#define GLOBAL2_EEPROM_OP_READ ((4 << 12) | GLOBAL2_EEPROM_OP_BUSY) +#define GLOBAL2_EEPROM_OP_LOAD BIT(11) +#define GLOBAL2_EEPROM_OP_WRITE_EN BIT(10) +#define GLOBAL2_EEPROM_OP_ADDR_MASK 0xff #define GLOBAL2_EEPROM_DATA 0x15 #define GLOBAL2_PTP_AVB_OP 0x16 #define GLOBAL2_PTP_AVB_DATA 0x17 @@ -304,6 +347,25 @@ #define GLOBAL2_QOS_WEIGHT 0x1c #define GLOBAL2_MISC 0x1d +struct mv88e6xxx_atu_entry { + u16 fid; + u8 state; + bool trunk; + u16 portv_trunkid; + u8 mac[ETH_ALEN]; +}; + +struct mv88e6xxx_vtu_stu_entry { + /* VTU only */ + u16 vid; + u16 fid; + + /* VTU and STU */ + u8 sid; + bool valid; + u8 data[DSA_MAX_PORTS]; +}; + struct mv88e6xxx_priv_state { /* When using multi-chip addressing, this mutex protects * access to the indirect access registers. (In single-chip @@ -342,9 +404,9 @@ struct mv88e6xxx_priv_state { /* hw bridging */ - u32 fid_mask; - u8 fid[DSA_MAX_PORTS]; - u16 bridge_mask[DSA_MAX_PORTS]; + DECLARE_BITMAP(fid_bitmap, VLAN_N_VID); /* FIDs 1 to 4095 available */ + u16 fid[DSA_MAX_PORTS]; /* per (non-bridged) port FID */ + u16 bridge_mask[DSA_MAX_PORTS]; /* br groups (indexed by FID) */ unsigned long port_state_update_mask; u8 port_state[DSA_MAX_PORTS]; @@ -386,10 +448,15 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); int mv88e6xxx_get_sset_count(struct dsa_switch *ds); int mv88e6xxx_get_sset_count_basic(struct dsa_switch *ds); +void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev); int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port); void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, struct ethtool_regs *regs, void *_p); -int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp); +int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp); +int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp); +int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp); +int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm); int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds); int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds); int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum); @@ -401,15 +468,23 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask); int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask); int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state); +int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *vid); +int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 vid); +int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid, + bool untagged); +int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid); +int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid, + unsigned long *ports, unsigned long *untagged); int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid); int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid); int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port, - unsigned char *addr, bool *is_static); + unsigned char *addr, u16 *vid, bool *is_static); int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg); int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page, int reg, int val); + extern struct dsa_switch_driver mv88e6131_switch_driver; extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; extern struct dsa_switch_driver mv88e6352_switch_driver; diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 49adbf1b7..815eb9499 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -144,10 +144,9 @@ static void dummy_setup(struct net_device *dev) dev->destructor = free_netdev; /* Fill in device structure with ethernet-generic values. */ - dev->tx_queue_len = 0; dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; - dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO; dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX; eth_hw_addr_random(dev); diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 753887d02..2839af00f 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1726,6 +1726,7 @@ vortex_up(struct net_device *dev) if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ iowrite32(0x8000, vp->cb_fn_base + 4); netif_start_queue (dev); + netdev_reset_queue(dev); err_out: return err; } @@ -1935,16 +1936,18 @@ static void vortex_tx_timeout(struct net_device *dev) if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0) iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc), ioaddr + DownListPtr); - if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) + if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) { netif_wake_queue (dev); + netdev_reset_queue (dev); + } if (vp->drv_flags & IS_BOOMERANG) iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); iowrite16(DownUnstall, ioaddr + EL3_CMD); } else { dev->stats.tx_dropped++; netif_wake_queue(dev); + netdev_reset_queue(dev); } - /* Issue Tx Enable */ iowrite16(TxEnable, ioaddr + EL3_CMD); dev->trans_start = jiffies; /* prevent tx timeout */ @@ -2063,6 +2066,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; + int skblen = skb->len; /* Put out the doubleword header... */ iowrite32(skb->len, ioaddr + TX_FIFO); @@ -2094,6 +2098,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) } } + netdev_sent_queue(dev, skblen); /* Clear the Tx status stack. */ { @@ -2125,6 +2130,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) void __iomem *ioaddr = vp->ioaddr; /* Calculate the next Tx descriptor entry. */ int entry = vp->cur_tx % TX_RING_SIZE; + int skblen = skb->len; struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; unsigned long flags; dma_addr_t dma_addr; @@ -2230,6 +2236,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) } vp->cur_tx++; + netdev_sent_queue(dev, skblen); + if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) { netif_stop_queue (dev); } else { /* Clear previous interrupt enable. */ @@ -2267,6 +2275,7 @@ vortex_interrupt(int irq, void *dev_id) int status; int work_done = max_interrupt_work; int handled = 0; + unsigned int bytes_compl = 0, pkts_compl = 0; ioaddr = vp->ioaddr; spin_lock(&vp->lock); @@ -2314,6 +2323,8 @@ vortex_interrupt(int irq, void *dev_id) if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) { iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); + pkts_compl++; + bytes_compl += vp->tx_skb->len; dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ if (ioread16(ioaddr + TxFree) > 1536) { /* @@ -2358,6 +2369,7 @@ vortex_interrupt(int irq, void *dev_id) iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); } while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); + netdev_completed_queue(dev, pkts_compl, bytes_compl); spin_unlock(&vp->window_lock); if (vortex_debug > 4) @@ -2382,6 +2394,7 @@ boomerang_interrupt(int irq, void *dev_id) int status; int work_done = max_interrupt_work; int handled = 0; + unsigned int bytes_compl = 0, pkts_compl = 0; ioaddr = vp->ioaddr; @@ -2455,6 +2468,8 @@ boomerang_interrupt(int irq, void *dev_id) pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); #endif + pkts_compl++; + bytes_compl += skb->len; dev_kfree_skb_irq(skb); vp->tx_skbuff[entry] = NULL; } else { @@ -2495,6 +2510,7 @@ boomerang_interrupt(int irq, void *dev_id) iowrite32(0x8000, vp->cb_fn_base + 4); } while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch); + netdev_completed_queue(dev, pkts_compl, bytes_compl); if (vortex_debug > 4) pr_debug("%s: exiting interrupt, status %4.4x.\n", @@ -2696,7 +2712,8 @@ vortex_down(struct net_device *dev, int final_down) struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; - netif_stop_queue (dev); + netdev_reset_queue(dev); + netif_stop_queue(dev); del_timer_sync(&vp->rx_oom_timer); del_timer_sync(&vp->timer); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index f3bb17840..05aa7597d 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -167,6 +167,7 @@ source "drivers/net/ethernet/sgi/Kconfig" source "drivers/net/ethernet/smsc/Kconfig" source "drivers/net/ethernet/stmicro/Kconfig" source "drivers/net/ethernet/sun/Kconfig" +source "drivers/net/ethernet/synopsys/Kconfig" source "drivers/net/ethernet/tehuti/Kconfig" source "drivers/net/ethernet/ti/Kconfig" source "drivers/net/ethernet/tile/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index b52e0f63f..ddfc80811 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -77,6 +77,7 @@ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/ obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/ obj-$(CONFIG_NET_VENDOR_SUN) += sun/ +obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/ obj-$(CONFIG_NET_VENDOR_TI) += ti/ obj-$(CONFIG_TILE_NET) += tile/ diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index bab01c849..8d50314ac 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "sun4i-emac.h" @@ -846,22 +847,32 @@ static int emac_probe(struct platform_device *pdev) if (ndev->irq == -ENXIO) { netdev_err(ndev, "No irq resource\n"); ret = ndev->irq; - goto out; + goto out_iounmap; } db->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(db->clk)) { ret = PTR_ERR(db->clk); - goto out; + goto out_iounmap; + } + + ret = clk_prepare_enable(db->clk); + if (ret) { + dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret); + goto out_iounmap; } - clk_prepare_enable(db->clk); + ret = sunxi_sram_claim(&pdev->dev); + if (ret) { + dev_err(&pdev->dev, "Error couldn't map SRAM to device\n"); + goto out_clk_disable_unprepare; + } db->phy_node = of_parse_phandle(np, "phy", 0); if (!db->phy_node) { dev_err(&pdev->dev, "no associated PHY\n"); ret = -ENODEV; - goto out; + goto out_release_sram; } /* Read MAC-address from DT */ @@ -893,7 +904,7 @@ static int emac_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Registering netdev failed!\n"); ret = -ENODEV; - goto out; + goto out_release_sram; } dev_info(&pdev->dev, "%s: at %p, IRQ %d MAC: %pM\n", @@ -901,6 +912,12 @@ static int emac_probe(struct platform_device *pdev) return 0; +out_release_sram: + sunxi_sram_release(&pdev->dev); +out_clk_disable_unprepare: + clk_disable_unprepare(db->clk); +out_iounmap: + iounmap(db->membase); out: dev_err(db->dev, "not found (%d).\n", ret); @@ -912,8 +929,12 @@ out: static int emac_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); + struct emac_board_info *db = netdev_priv(ndev); unregister_netdev(ndev); + sunxi_sram_release(&pdev->dev); + clk_disable_unprepare(db->clk); + iounmap(db->membase); free_netdev(ndev); dev_dbg(&pdev->dev, "released and freed device\n"); diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c index 580553d42..88ef67a99 100644 --- a/drivers/net/ethernet/altera/altera_sgdma.c +++ b/drivers/net/ethernet/altera/altera_sgdma.c @@ -71,8 +71,6 @@ int sgdma_initialize(struct altera_tse_private *priv) SGDMA_CTRLREG_INTEN | SGDMA_CTRLREG_ILASTD; - priv->sgdmadesclen = sizeof(struct sgdma_descrip); - INIT_LIST_HEAD(&priv->txlisthd); INIT_LIST_HEAD(&priv->rxlisthd); @@ -254,7 +252,7 @@ u32 sgdma_rx_status(struct altera_tse_private *priv) unsigned int pktstatus = 0; dma_sync_single_for_cpu(priv->device, priv->rxdescphys, - priv->sgdmadesclen, + SGDMA_DESC_LEN, DMA_FROM_DEVICE); pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred)); @@ -374,7 +372,7 @@ static int sgdma_async_read(struct altera_tse_private *priv) dma_sync_single_for_device(priv->device, priv->rxdescphys, - priv->sgdmadesclen, + SGDMA_DESC_LEN, DMA_TO_DEVICE); csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), @@ -402,7 +400,7 @@ static int sgdma_async_write(struct altera_tse_private *priv, csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status)); dma_sync_single_for_device(priv->device, priv->txdescphys, - priv->sgdmadesclen, DMA_TO_DEVICE); + SGDMA_DESC_LEN, DMA_TO_DEVICE); csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)), priv->tx_dma_csr, diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h index 85bc33b21..bbd52f023 100644 --- a/drivers/net/ethernet/altera/altera_sgdmahw.h +++ b/drivers/net/ethernet/altera/altera_sgdmahw.h @@ -50,6 +50,7 @@ struct sgdma_descrip { u8 control; } __packed; +#define SGDMA_DESC_LEN sizeof(struct sgdma_descrip) #define SGDMA_STATUS_ERR BIT(0) #define SGDMA_STATUS_LENGTH_ERR BIT(1) diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h index 2adb24d45..103c30ddd 100644 --- a/drivers/net/ethernet/altera/altera_tse.h +++ b/drivers/net/ethernet/altera/altera_tse.h @@ -458,7 +458,6 @@ struct altera_tse_private { u32 rxctrlreg; dma_addr_t rxdescphys; dma_addr_t txdescphys; - size_t sgdmadesclen; struct list_head txlisthd; struct list_head rxlisthd; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 8207877d6..fe644823c 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -1517,6 +1517,7 @@ static int altera_tse_probe(struct platform_device *pdev) spin_lock_init(&priv->tx_lock); spin_lock_init(&priv->rxdma_irq_lock); + netif_carrier_off(ndev); ret = register_netdev(ndev); if (ret) { dev_err(&pdev->dev, "failed to register TSE net device\n"); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index 2c063b60d..96f485ab6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -327,9 +327,13 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata) pdata->debugfs_xpcs_reg = 0; buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name); + if (!buf) + return; + pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL); if (!pdata->xgbe_debugfs) { netdev_err(pdata->netdev, "debugfs_create_dir failed\n"); + kfree(buf); return; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index a4473d8ff..f672dba34 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1595,7 +1595,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) packet->rdesc_count, 1); /* Make sure ownership is written to the descriptor */ - dma_wmb(); + smp_wmb(); ring->cur = cur_index + 1; if (!packet->skb->xmit_more || diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index aae9d5ecd..dde048666 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1807,6 +1807,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) struct netdev_queue *txq; int processed = 0; unsigned int tx_packets = 0, tx_bytes = 0; + unsigned int cur; DBGPR("-->xgbe_tx_poll\n"); @@ -1814,10 +1815,15 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) if (!ring) return 0; + cur = ring->cur; + + /* Be sure we get ring->cur before accessing descriptor data */ + smp_rmb(); + txq = netdev_get_tx_queue(netdev, channel->queue_index); while ((processed < XGBE_TX_DESC_MAX_PROC) && - (ring->dirty != ring->cur)) { + (ring->dirty != cur)) { rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); rdesc = rdata->rdesc; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 717ce21b6..8c9d01ef7 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -140,7 +140,7 @@ #define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) -/* Descriptors required for maximum contigous TSO/GSO packet */ +/* Descriptors required for maximum contiguous TSO/GSO packet */ #define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1) /* Maximum possible descriptors needed for an SKB: diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index cfa37041a..c4bb8027b 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -689,16 +689,24 @@ static int xgene_enet_phy_connect(struct net_device *ndev) netdev_dbg(ndev, "No phy-handle found in DT\n"); return -ENODEV; } - pdata->phy_dev = of_phy_find_device(phy_np); - } - phy_dev = pdata->phy_dev; + phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link, + 0, pdata->phy_mode); + if (!phy_dev) { + netdev_err(ndev, "Could not connect to PHY\n"); + return -ENODEV; + } + + pdata->phy_dev = phy_dev; + } else { + phy_dev = pdata->phy_dev; - if (!phy_dev || - phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link, - pdata->phy_mode)) { - netdev_err(ndev, "Could not connect to PHY\n"); - return -ENODEV; + if (!phy_dev || + phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link, + pdata->phy_mode)) { + netdev_err(ndev, "Could not connect to PHY\n"); + return -ENODEV; + } } pdata->phy_speed = SPEED_UNKNOWN; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 541bed056..ff05bbcff 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -193,12 +193,16 @@ enum xgene_enet_rm { #define USERINFO_LEN 32 #define FPQNUM_POS 32 #define FPQNUM_LEN 12 +#define NV_POS 50 +#define NV_LEN 1 +#define LL_POS 51 +#define LL_LEN 1 #define LERR_POS 60 #define LERR_LEN 3 #define STASH_POS 52 #define STASH_LEN 2 #define BUFDATALEN_POS 48 -#define BUFDATALEN_LEN 12 +#define BUFDATALEN_LEN 15 #define DATAADDR_POS 0 #define DATAADDR_LEN 42 #define COHERENT_POS 63 @@ -215,9 +219,19 @@ enum xgene_enet_rm { #define IPHDR_LEN 6 #define EC_POS 22 /* Enable checksum */ #define EC_LEN 1 +#define ET_POS 23 /* Enable TSO */ #define IS_POS 24 /* IP protocol select */ #define IS_LEN 1 #define TYPE_ETH_WORK_MESSAGE_POS 44 +#define LL_BYTES_MSB_POS 56 +#define LL_BYTES_MSB_LEN 8 +#define LL_BYTES_LSB_POS 48 +#define LL_BYTES_LSB_LEN 12 +#define LL_LEN_POS 48 +#define LL_LEN_LEN 8 +#define DATALEN_MASK GENMASK(11, 0) + +#define LAST_BUFFER (0x7800ULL << BUFDATALEN_POS) struct xgene_enet_raw_desc { __le64 m0; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index a02ea7f8f..e47298faf 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -147,18 +147,27 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, { struct sk_buff *skb; struct device *dev; + skb_frag_t *frag; + dma_addr_t *frag_dma_addr; u16 skb_index; u8 status; - int ret = 0; + int i, ret = 0; skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); skb = cp_ring->cp_skb[skb_index]; + frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS]; dev = ndev_to_dev(cp_ring->ndev); dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), - GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)), + skb_headlen(skb), DMA_TO_DEVICE); + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag), + DMA_TO_DEVICE); + } + /* Checking for error */ status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); if (unlikely(status > 2)) { @@ -179,12 +188,16 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, static u64 xgene_enet_work_msg(struct sk_buff *skb) { + struct net_device *ndev = skb->dev; + struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct iphdr *iph; - u8 l3hlen, l4hlen = 0; - u8 csum_enable = 0; - u8 proto = 0; - u8 ethhdr; - u64 hopinfo; + u8 l3hlen = 0, l4hlen = 0; + u8 ethhdr, proto = 0, csum_enable = 0; + u64 hopinfo = 0; + u32 hdr_len, mss = 0; + u32 i, len, nr_frags; + + ethhdr = xgene_enet_hdr_len(skb->data); if (unlikely(skb->protocol != htons(ETH_P_IP)) && unlikely(skb->protocol != htons(ETH_P_8021Q))) @@ -201,14 +214,40 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb) l4hlen = tcp_hdrlen(skb) >> 2; csum_enable = 1; proto = TSO_IPPROTO_TCP; + if (ndev->features & NETIF_F_TSO) { + hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + + if (skb_is_nonlinear(skb)) { + len = skb_headlen(skb); + nr_frags = skb_shinfo(skb)->nr_frags; + + for (i = 0; i < 2 && i < nr_frags; i++) + len += skb_shinfo(skb)->frags[i].size; + + /* HW requires header must reside in 3 buffer */ + if (unlikely(hdr_len > len)) { + if (skb_linearize(skb)) + return 0; + } + } + + if (!mss || ((skb->len - hdr_len) <= mss)) + goto out; + + if (mss != pdata->mss) { + pdata->mss = mss; + pdata->mac_ops->set_mss(pdata); + } + hopinfo |= SET_BIT(ET); + } } else if (iph->protocol == IPPROTO_UDP) { l4hlen = UDP_HDR_SIZE; csum_enable = 1; } out: l3hlen = ip_hdrlen(skb) >> 2; - ethhdr = xgene_enet_hdr_len(skb->data); - hopinfo = SET_VAL(TCPHDR, l4hlen) | + hopinfo |= SET_VAL(TCPHDR, l4hlen) | SET_VAL(IPHDR, l3hlen) | SET_VAL(ETHHDR, ethhdr) | SET_VAL(EC, csum_enable) | @@ -219,35 +258,170 @@ out: return hopinfo; } +static u16 xgene_enet_encode_len(u16 len) +{ + return (len == BUFLEN_16K) ? 0 : len; +} + +static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len) +{ + desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) | + SET_VAL(BUFDATALEN, len)); +} + +static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring) +{ + __le64 *exp_bufs; + + exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS]; + memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS); + ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1); + + return exp_bufs; +} + +static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring) +{ + return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS]; +} + static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, struct sk_buff *skb) { struct device *dev = ndev_to_dev(tx_ring->ndev); struct xgene_enet_raw_desc *raw_desc; - dma_addr_t dma_addr; + __le64 *exp_desc = NULL, *exp_bufs = NULL; + dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr; + skb_frag_t *frag; u16 tail = tx_ring->tail; u64 hopinfo; + u32 len, hw_len; + u8 ll = 0, nv = 0, idx = 0; + bool split = false; + u32 size, offset, ell_bytes = 0; + u32 i, fidx, nr_frags, count = 1; raw_desc = &tx_ring->raw_desc[tail]; + tail = (tail + 1) & (tx_ring->slots - 1); memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc)); - dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE); + hopinfo = xgene_enet_work_msg(skb); + if (!hopinfo) + return -EINVAL; + raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) | + hopinfo); + + len = skb_headlen(skb); + hw_len = xgene_enet_encode_len(len); + + dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma_addr)) { netdev_err(tx_ring->ndev, "DMA mapping error\n"); return -EINVAL; } /* Hardware expects descriptor in little endian format */ - raw_desc->m0 = cpu_to_le64(tail); raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | - SET_VAL(BUFDATALEN, skb->len) | + SET_VAL(BUFDATALEN, hw_len) | SET_BIT(COHERENT)); - hopinfo = xgene_enet_work_msg(skb); - raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) | - hopinfo); - tx_ring->cp_ring->cp_skb[tail] = skb; - return 0; + if (!skb_is_nonlinear(skb)) + goto out; + + /* scatter gather */ + nv = 1; + exp_desc = (void *)&tx_ring->raw_desc[tail]; + tail = (tail + 1) & (tx_ring->slots - 1); + memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc)); + + nr_frags = skb_shinfo(skb)->nr_frags; + for (i = nr_frags; i < 4 ; i++) + exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER); + + frag_dma_addr = xgene_get_frag_dma_array(tx_ring); + + for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) { + if (!split) { + frag = &skb_shinfo(skb)->frags[fidx]; + size = skb_frag_size(frag); + offset = 0; + + pbuf_addr = skb_frag_dma_map(dev, frag, 0, size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, pbuf_addr)) + return -EINVAL; + + frag_dma_addr[fidx] = pbuf_addr; + fidx++; + + if (size > BUFLEN_16K) + split = true; + } + + if (size > BUFLEN_16K) { + len = BUFLEN_16K; + size -= BUFLEN_16K; + } else { + len = size; + split = false; + } + + dma_addr = pbuf_addr + offset; + hw_len = xgene_enet_encode_len(len); + + switch (i) { + case 0: + case 1: + case 2: + xgene_set_addr_len(exp_desc, i, dma_addr, hw_len); + break; + case 3: + if (split || (fidx != nr_frags)) { + exp_bufs = xgene_enet_get_exp_bufs(tx_ring); + xgene_set_addr_len(exp_bufs, idx, dma_addr, + hw_len); + idx++; + ell_bytes += len; + } else { + xgene_set_addr_len(exp_desc, i, dma_addr, + hw_len); + } + break; + default: + xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len); + idx++; + ell_bytes += len; + break; + } + + if (split) + offset += BUFLEN_16K; + } + count++; + + if (idx) { + ll = 1; + dma_addr = dma_map_single(dev, exp_bufs, + sizeof(u64) * MAX_EXP_BUFFS, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + dev_kfree_skb_any(skb); + return -EINVAL; + } + i = ell_bytes >> LL_BYTES_LSB_LEN; + exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | + SET_VAL(LL_BYTES_MSB, i) | + SET_VAL(LL_LEN, idx)); + raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes)); + } + +out: + raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | + SET_VAL(USERINFO, tx_ring->tail)); + tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; + tx_ring->tail = tail; + + return count; } static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, @@ -257,6 +431,7 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; u32 tx_level, cq_level; + int count; tx_level = pdata->ring_ops->len(tx_ring); cq_level = pdata->ring_ops->len(cp_ring); @@ -266,14 +441,17 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, return NETDEV_TX_BUSY; } - if (xgene_enet_setup_tx_desc(tx_ring, skb)) { + if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE)) + return NETDEV_TX_OK; + + count = xgene_enet_setup_tx_desc(tx_ring, skb); + if (count <= 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } - pdata->ring_ops->wr_cmd(tx_ring, 1); + pdata->ring_ops->wr_cmd(tx_ring, count); skb_tx_timestamp(skb); - tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1); pdata->stats.tx_packets++; pdata->stats.tx_bytes += skb->len; @@ -326,7 +504,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, /* strip off CRC as HW isn't doing this */ datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)); - datalen -= 4; + datalen = (datalen & DATALEN_MASK) - 4; prefetch(skb->data - NET_IP_ALIGN); skb_put(skb, datalen); @@ -358,26 +536,41 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, int budget) { struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); - struct xgene_enet_raw_desc *raw_desc; + struct xgene_enet_raw_desc *raw_desc, *exp_desc; u16 head = ring->head; u16 slots = ring->slots - 1; - int ret, count = 0; + int ret, count = 0, processed = 0; do { raw_desc = &ring->raw_desc[head]; + exp_desc = NULL; if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) break; /* read fpqnum field after dataaddr field */ dma_rmb(); + if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) { + head = (head + 1) & slots; + exp_desc = &ring->raw_desc[head]; + + if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) { + head = (head - 1) & slots; + break; + } + dma_rmb(); + count++; + } if (is_rx_desc(raw_desc)) ret = xgene_enet_rx_frame(ring, raw_desc); else ret = xgene_enet_tx_completion(ring, raw_desc); xgene_enet_mark_desc_slot_empty(raw_desc); + if (exp_desc) + xgene_enet_mark_desc_slot_empty(exp_desc); head = (head + 1) & slots; count++; + processed++; if (ret) break; @@ -393,7 +586,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, } } - return count; + return processed; } static int xgene_enet_napi(struct napi_struct *napi, const int budget) @@ -738,12 +931,13 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; struct xgene_enet_desc_ring *buf_pool = NULL; enum xgene_ring_owner owner; + dma_addr_t dma_exp_bufs; u8 cpu_bufnum = pdata->cpu_bufnum; u8 eth_bufnum = pdata->eth_bufnum; u8 bp_bufnum = pdata->bp_bufnum; u16 ring_num = pdata->ring_num; u16 ring_id; - int ret; + int ret, size; /* allocate rx descriptor ring */ owner = xgene_derive_ring_owner(pdata); @@ -794,6 +988,15 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) ret = -ENOMEM; goto err; } + + size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS; + tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, &dma_exp_bufs, + GFP_KERNEL); + if (!tx_ring->exp_bufs) { + ret = -ENOMEM; + goto err; + } + pdata->tx_ring = tx_ring; if (!pdata->cq_cnt) { @@ -818,6 +1021,16 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) ret = -ENOMEM; goto err; } + + size = sizeof(dma_addr_t) * MAX_SKB_FRAGS; + cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots, + size, GFP_KERNEL); + if (!cp_ring->frag_dma_addr) { + devm_kfree(dev, cp_ring->cp_skb); + ret = -ENOMEM; + goto err; + } + pdata->tx_ring->cp_ring = cp_ring; pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); @@ -905,40 +1118,6 @@ static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pda return ret; } -static int xgene_get_mac_address(struct device *dev, - unsigned char *addr) -{ - int ret; - - ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6); - if (ret) - ret = device_property_read_u8_array(dev, "mac-address", - addr, 6); - if (ret) - return -ENODEV; - - return ETH_ALEN; -} - -static int xgene_get_phy_mode(struct device *dev) -{ - int i, ret; - char *modestr; - - ret = device_property_read_string(dev, "phy-connection-type", - (const char **)&modestr); - if (ret) - ret = device_property_read_string(dev, "phy-mode", - (const char **)&modestr); - if (ret) - return -ENODEV; - - for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) { - if (!strcasecmp(modestr, phy_modes(i))) - return i; - } - return -ENODEV; -} static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) { @@ -998,12 +1177,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) if (ret) return ret; - if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN) + if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN)) eth_hw_addr_random(ndev); memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); - pdata->phy_mode = xgene_get_phy_mode(dev); + pdata->phy_mode = device_get_phy_mode(dev); if (pdata->phy_mode < 0) { dev_err(dev, "Unable to get phy-connection-type\n"); return pdata->phy_mode; @@ -1207,7 +1386,8 @@ static int xgene_enet_probe(struct platform_device *pdev) xgene_enet_set_ethtool_ops(ndev); ndev->features |= NETIF_F_IP_CSUM | NETIF_F_GSO | - NETIF_F_GRO; + NETIF_F_GRO | + NETIF_F_SG; of_id = of_match_device(xgene_enet_of_match, &pdev->dev); if (of_id) { @@ -1233,6 +1413,12 @@ static int xgene_enet_probe(struct platform_device *pdev) xgene_enet_setup_ops(pdata); + if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { + ndev->features |= NETIF_F_TSO; + pdata->mss = XGENE_ENET_MSS; + } + ndev->hw_features = ndev->features; + ret = register_netdev(ndev); if (ret) { netdev_err(ndev, "Failed to register netdev\n"); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 1c85fc877..50f92c39e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -40,8 +40,12 @@ #define XGENE_DRV_VERSION "v1.0" #define XGENE_ENET_MAX_MTU 1536 #define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) +#define BUFLEN_16K (16 * 1024) #define NUM_PKT_BUF 64 #define NUM_BUFPOOL 32 +#define MAX_EXP_BUFFS 256 +#define XGENE_ENET_MSS 1448 +#define XGENE_MIN_ENET_FRAME_SIZE 60 #define START_CPU_BUFNUM_0 0 #define START_ETH_BUFNUM_0 2 @@ -79,6 +83,7 @@ struct xgene_enet_desc_ring { u16 num; u16 head; u16 tail; + u16 exp_buf_tail; u16 slots; u16 irq; char irq_name[IRQ_ID_SIZE]; @@ -93,6 +98,7 @@ struct xgene_enet_desc_ring { u8 nbufpool; struct sk_buff *(*rx_skb); struct sk_buff *(*cp_skb); + dma_addr_t *frag_dma_addr; enum xgene_enet_ring_cfgsize cfgsize; struct xgene_enet_desc_ring *cp_ring; struct xgene_enet_desc_ring *buf_pool; @@ -102,6 +108,7 @@ struct xgene_enet_desc_ring { struct xgene_enet_raw_desc *raw_desc; struct xgene_enet_raw_desc16 *raw_desc16; }; + __le64 *exp_bufs; }; struct xgene_mac_ops { @@ -112,6 +119,7 @@ struct xgene_mac_ops { void (*tx_disable)(struct xgene_enet_pdata *pdata); void (*rx_disable)(struct xgene_enet_pdata *pdata); void (*set_mac_addr)(struct xgene_enet_pdata *pdata); + void (*set_mss)(struct xgene_enet_pdata *pdata); void (*link_state)(struct work_struct *work); }; @@ -170,6 +178,7 @@ struct xgene_enet_pdata { u8 eth_bufnum; u8 bp_bufnum; u16 ring_num; + u32 mss; }; struct xgene_indirect_ctl { @@ -204,6 +213,9 @@ static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src) #define GET_VAL(field, src) \ xgene_enet_get_field_value(field ## _POS, field ## _LEN, src) +#define GET_BIT(field, src) \ + xgene_enet_get_field_value(field ## _POS, 1, src) + static inline struct device *ndev_to_dev(struct net_device *ndev) { return ndev->dev.parent; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index 05edb847c..7a28a48cb 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -184,6 +184,11 @@ static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata) xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1); } +static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata) +{ + xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR, pdata->mss); +} + static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata) { u32 data; @@ -204,8 +209,8 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata) data &= ~HSTLENCHK; xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data); - xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR, 0x06000600); xgene_xgmac_set_mac_addr(pdata); + xgene_xgmac_set_mss(pdata); xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data); data |= CFG_RSIF_FPBUFF_TIMEOUT_EN; @@ -329,6 +334,7 @@ struct xgene_mac_ops xgene_xgmac_ops = { .rx_disable = xgene_xgmac_rx_disable, .tx_disable = xgene_xgmac_tx_disable, .set_mac_addr = xgene_xgmac_set_mac_addr, + .set_mss = xgene_xgmac_set_mss, .link_state = xgene_enet_link_state }; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h index bf0a99435..f8f908dbf 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h @@ -62,7 +62,9 @@ #define XCLE_BYPASS_REG0_ADDR 0x0160 #define XCLE_BYPASS_REG1_ADDR 0x0164 #define XG_CFG_BYPASS_ADDR 0x0204 +#define XG_CFG_LINK_AGGR_RESUME_0_ADDR 0x0214 #define XG_LINK_STATUS_ADDR 0x0228 +#define XG_TSIF_MSS_REG0_ADDR 0x02a4 #define XG_ENET_SPARE_CFG_REG_ADDR 0x040c #define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410 #define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804 diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c index f9cb99bfb..ffd180570 100644 --- a/drivers/net/ethernet/arc/emac_arc.c +++ b/drivers/net/ethernet/arc/emac_arc.c @@ -78,6 +78,7 @@ static const struct of_device_id emac_arc_dt_ids[] = { { .compatible = "snps,arc-emac" }, { /* Sentinel */ } }; +MODULE_DEVICE_TABLE(of, emac_arc_dt_ids); static struct platform_driver emac_arc_driver = { .probe = emac_arc_probe, diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 932bd1862..2795d6db1 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -874,6 +874,8 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter, atl1c_clean_buffer(pdev, buffer_info); } + netdev_reset_queue(adapter->netdev); + /* Zero out Tx-buffers */ memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) * ring_count); @@ -1551,6 +1553,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); u16 hw_next_to_clean; u16 reg; + unsigned int total_bytes = 0, total_packets = 0; reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX; @@ -1558,12 +1561,18 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, while (next_to_clean != hw_next_to_clean) { buffer_info = &tpd_ring->buffer_info[next_to_clean]; + if (buffer_info->skb) { + total_bytes += buffer_info->skb->len; + total_packets++; + } atl1c_clean_buffer(pdev, buffer_info); if (++next_to_clean == tpd_ring->count) next_to_clean = 0; atomic_set(&tpd_ring->next_to_clean, next_to_clean); } + netdev_completed_queue(adapter->netdev, total_packets, total_bytes); + if (netif_queue_stopped(adapter->netdev) && netif_carrier_ok(adapter->netdev)) { netif_wake_queue(adapter->netdev); @@ -2256,6 +2265,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, spin_unlock_irqrestore(&adapter->tx_lock, flags); dev_kfree_skb_any(skb); } else { + netdev_sent_queue(adapter->netdev, skb->len); atl1c_tx_queue(adapter, skb, tpd, type); spin_unlock_irqrestore(&adapter->tx_lock, flags); } diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 8be9eab73..e930aa9a3 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -139,6 +139,16 @@ config BNX2X_SRIOV Virtualization support in the 578xx and 57712 products. This allows for virtual function acceleration in virtual environments. +config BNX2X_VXLAN + bool "Virtual eXtensible Local Area Network support" + default n + depends on BNX2X && VXLAN && !(BNX2X=y && VXLAN=m) + ---help--- + This enables hardward offload support for VXLAN protocol over the + NetXtremeII series adapters. + Say Y here if you want to enable hardware offload support for + Virtual eXtensible Local Area Network (VXLAN) in the driver. + config BGMAC tristate "BCMA bus GBit core support" depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X) diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index a7f2cc3e4..4183c2abe 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -2049,7 +2049,7 @@ static void swphy_poll_timer(unsigned long data) for (i = 0; i < priv->num_ports; i++) { struct bcm63xx_enetsw_port *port; - int val, j, up, advertise, lpa, lpa2, speed, duplex, media; + int val, j, up, advertise, lpa, speed, duplex, media; int external_phy = bcm_enet_port_is_rgmii(i); u8 override; @@ -2092,22 +2092,27 @@ static void swphy_poll_timer(unsigned long data) lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, MII_LPA); - lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, - MII_STAT1000); - /* figure out media and duplex from advertise and LPA values */ media = mii_nway_result(lpa & advertise); duplex = (media & ADVERTISE_FULL) ? 1 : 0; - if (lpa2 & LPA_1000FULL) - duplex = 1; - - if (lpa2 & (LPA_1000FULL | LPA_1000HALF)) - speed = 1000; - else { - if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) - speed = 100; - else - speed = 10; + + if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) + speed = 100; + else + speed = 10; + + if (val & BMSR_ESTATEN) { + advertise = bcmenet_sw_mdio_read(priv, external_phy, + port->phy_id, MII_CTRL1000); + + lpa = bcmenet_sw_mdio_read(priv, external_phy, + port->phy_id, MII_STAT1000); + + if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF) + && lpa & (LPA_1000FULL | LPA_1000HALF)) { + speed = 1000; + duplex = (lpa & LPA_1000FULL); + } } dev_info(&priv->pdev->dev, diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 4566cdf0b..f1b5364f3 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -933,6 +933,21 @@ static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id) return IRQ_HANDLED; } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void bcm_sysport_poll_controller(struct net_device *dev) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + + disable_irq(priv->irq0); + bcm_sysport_rx_isr(priv->irq0, priv); + enable_irq(priv->irq0); + + disable_irq(priv->irq1); + bcm_sysport_tx_isr(priv->irq1, priv); + enable_irq(priv->irq1); +} +#endif + static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) { @@ -1723,6 +1738,9 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { .ndo_set_features = bcm_sysport_set_features, .ndo_set_rx_mode = bcm_sysport_set_rx_mode, .ndo_set_mac_address = bcm_sysport_change_mac, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bcm_sysport_poll_controller, +#endif }; #define REV_FMT "v%2x.%02x" @@ -2061,6 +2079,7 @@ static const struct of_device_id bcm_sysport_of_match[] = { { .compatible = "brcm,systemport" }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); static struct platform_driver bcm_sysport_driver = { .probe = bcm_sysport_probe, diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 21e3c38c7..28f7610b0 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1447,7 +1447,7 @@ static int bgmac_fixed_phy_register(struct bgmac *bgmac) struct phy_device *phy_dev; int err; - phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL); if (!phy_dev || IS_ERR(phy_dev)) { bgmac_err(bgmac, "Failed to register fixed PHY device\n"); return -ENODEV; @@ -1549,11 +1549,20 @@ static int bgmac_probe(struct bcma_device *core) struct net_device *net_dev; struct bgmac *bgmac; struct ssb_sprom *sprom = &core->bus->sprom; - u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac; + u8 *mac; int err; - /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */ - if (core->core_unit > 1) { + switch (core->core_unit) { + case 0: + mac = sprom->et0mac; + break; + case 1: + mac = sprom->et1mac; + break; + case 2: + mac = sprom->et2mac; + break; + default: pr_err("Unsupported core_unit %d\n", core->core_unit); return -ENOTSUPP; } @@ -1588,8 +1597,17 @@ static int bgmac_probe(struct bcma_device *core) } bgmac->cmn = core->bus->drv_gmac_cmn.core; - bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr : - sprom->et0phyaddr; + switch (core->core_unit) { + case 0: + bgmac->phyaddr = sprom->et0phyaddr; + break; + case 1: + bgmac->phyaddr = sprom->et1phyaddr; + break; + case 2: + bgmac->phyaddr = sprom->et2phyaddr; + break; + } bgmac->phyaddr &= BGMAC_PHY_MASK; if (bgmac->phyaddr == BGMAC_PHY_MASK) { bgmac_err(bgmac, "No PHY found\n"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index cd4ae76bb..b5e64b022 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1,6 +1,8 @@ -/* bnx2x.h: Broadcom Everest network driver. +/* bnx2x.h: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -30,7 +32,7 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.710.51-0" +#define DRV_MODULE_VERSION "1.712.30-0" #define DRV_MODULE_RELDATE "2014/02/10" #define BNX2X_BC_VER 0x040200 @@ -1226,6 +1228,10 @@ struct bnx2x_slowpath { struct eth_classify_rules_ramrod_data e2; } mac_rdata; + union { + struct eth_classify_rules_ramrod_data e2; + } vlan_rdata; + union { struct tstorm_eth_mac_filter_config e1x; struct eth_filter_rules_ramrod_data e2; @@ -1386,6 +1392,8 @@ enum sp_rtnl_flag { BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_SP_RTNL_TX_STOP, BNX2X_SP_RTNL_GET_DRV_VERSION, + BNX2X_SP_RTNL_ADD_VXLAN_PORT, + BNX2X_SP_RTNL_DEL_VXLAN_PORT, }; enum bnx2x_iov_flag { @@ -1408,6 +1416,9 @@ struct bnx2x_sp_objs { /* Queue State object */ struct bnx2x_queue_sp_obj q_obj; + + /* VLANs object */ + struct bnx2x_vlan_mac_obj vlan_obj; }; struct bnx2x_fp_stats { @@ -1422,6 +1433,13 @@ enum { SUB_MF_MODE_UNKNOWN = 0, SUB_MF_MODE_UFP, SUB_MF_MODE_NPAR1_DOT_5, + SUB_MF_MODE_BD, +}; + +struct bnx2x_vlan_entry { + struct list_head link; + u16 vid; + bool hw; }; struct bnx2x { @@ -1636,6 +1654,8 @@ struct bnx2x { u8 mf_sub_mode; #define IS_MF_UFP(bp) (IS_MF_SD(bp) && \ bp->mf_sub_mode == SUB_MF_MODE_UFP) +#define IS_MF_BD(bp) (IS_MF_SD(bp) && \ + bp->mf_sub_mode == SUB_MF_MODE_BD) u8 wol; @@ -1860,8 +1880,6 @@ struct bnx2x { int dcb_version; /* CAM credit pools */ - - /* used only in sriov */ struct bnx2x_credit_pool_obj vlans_pool; struct bnx2x_credit_pool_obj macs_pool; @@ -1924,6 +1942,12 @@ struct bnx2x { u16 rx_filter; struct bnx2x_link_report_data vf_link_vars; + struct list_head vlan_reg; + u16 vlan_cnt; + u16 vlan_credit; + u16 vxlan_dst_port; + u8 vxlan_dst_port_count; + bool accept_any_vlan; }; /* Tx queues may be less or equal to Rx queues */ @@ -1951,23 +1975,14 @@ extern int num_queues; #define RSS_IPV6_TCP_CAP_MASK \ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY -/* func init flags */ -#define FUNC_FLG_RSS 0x0001 -#define FUNC_FLG_STATS 0x0002 -/* removed FUNC_FLG_UNMATCHED 0x0004 */ -#define FUNC_FLG_TPA 0x0008 -#define FUNC_FLG_SPQ 0x0010 -#define FUNC_FLG_LEADING 0x0020 /* PF only */ -#define FUNC_FLG_LEADING_STATS 0x0040 struct bnx2x_func_init_params { /* dma */ - dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ - dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */ + bool spq_active; + dma_addr_t spq_map; + u16 spq_prod; - u16 func_flgs; u16 func_id; /* abs fid */ u16 pf_id; - u16 spq_prod; /* valid iff FUNC_FLG_SPQ */ }; #define for_each_cnic_queue(bp, var) \ @@ -2077,6 +2092,11 @@ struct bnx2x_func_init_params { int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, struct bnx2x_vlan_mac_obj *obj, bool set, int mac_type, unsigned long *ramrod_flags); + +int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan, + struct bnx2x_vlan_mac_obj *obj, bool set, + unsigned long *ramrod_flags); + /** * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object * @@ -2481,6 +2501,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define VF_ACQUIRE_THRESH 3 #define VF_ACQUIRE_MAC_FILTERS 1 #define VF_ACQUIRE_MC_FILTERS 10 +#define VF_ACQUIRE_VLAN_FILTERS 2 /* VLAN0 + 'real' VLAN */ #define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \ (!((me_reg) & ME_REG_VF_ERR))) @@ -2553,6 +2574,10 @@ void bnx2x_notify_link_changed(struct bnx2x *bp); (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) || \ IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp)) +/* Determines whether BW configuration arrives in 100Mb units or in + * percentages from actual physical link speed. + */ +#define IS_MF_PERCENT_BW(bp) (IS_MF_SI(bp) || IS_MF_UFP(bp) || IS_MF_BD(bp)) #define SET_FLAG(value, mask, flag) \ do {\ @@ -2577,6 +2602,8 @@ void bnx2x_set_local_cmng(struct bnx2x *bp); void bnx2x_update_mng_version(struct bnx2x *bp); +void bnx2x_update_mfw_dump(struct bnx2x *bp); + #define MCPR_SCRATCH_BASE(bp) \ (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) @@ -2589,4 +2616,9 @@ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb); #define BNX2X_MAX_PHC_DRIFT 31000000 #define BNX2X_PTP_TX_TIMEOUT +/* Re-configure all previously configured vlan filters. + * Meant for implicit re-load flows. + */ +int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp); + #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 5ba60a7cf..bd688aec2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1,6 +1,8 @@ -/* bnx2x_cmn.c: Broadcom Everest network driver. +/* bnx2x_cmn.c: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -1188,7 +1190,7 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp) /* Calculate the current MAX line speed limit for the MF * devices */ - if (IS_MF_SI(bp)) + if (IS_MF_PERCENT_BW(bp)) line_speed = (line_speed * maxCfg) / 100; else { /* SD mode */ u16 vn_max_rate = maxCfg * 100; @@ -2103,9 +2105,14 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, if (rss_obj->udp_rss_v6) __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); - if (!CHIP_IS_E1x(bp)) + if (!CHIP_IS_E1x(bp)) { + /* valid only for TUNN_MODE_VXLAN tunnel mode */ + __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags); + /* valid only for TUNN_MODE_GRE tunnel mode */ - __set_bit(BNX2X_RSS_GRE_INNER_HDRS, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags); + } } else { __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); } @@ -2509,6 +2516,20 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) fp->mode = TPA_MODE_DISABLED; } +void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state) +{ + u32 cur; + + if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp)) + return; + + cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]); + DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n", + cur, state); + + SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state); +} + int bnx2x_load_cnic(struct bnx2x *bp) { int i, rc, port = BP_PORT(bp); @@ -2826,6 +2847,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Start fast path */ + /* Re-configure vlan filters */ + rc = bnx2x_vlan_reconfigure_vid(bp); + if (rc) + LOAD_ERROR_EXIT(bp, load_error3); + /* Initialize Rx filter. */ bnx2x_set_rx_mode_inner(bp); @@ -2872,6 +2898,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* mark driver is loaded in shmem2 */ u32 val; val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); + val &= ~DRV_FLAGS_MTU_MASK; + val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT); SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | DRV_FLAGS_CAPABILITIES_LOADED_L2); @@ -2884,10 +2912,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) return -EBUSY; } + /* Update driver data for On-Chip MFW dump. */ + if (IS_PF(bp)) + bnx2x_update_mfw_dump(bp); + /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) bnx2x_dcbx_init(bp, false); + if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) + bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE); + DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n"); return 0; @@ -2955,6 +2990,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) DP(NETIF_MSG_IFUP, "Starting NIC unload\n"); + if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) + bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED); + /* mark driver is unloaded in shmem2 */ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { u32 val; @@ -3676,7 +3714,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, pbd2->fw_ip_hdr_to_payload_w = hlen_w - ((sizeof(struct ipv6hdr)) >> 1); pbd_e2->data.tunnel_data.flags |= - ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER; + ETH_TUNNEL_DATA_IPV6_OUTER; } pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); @@ -4183,6 +4221,41 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } +void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default) +{ + int mfw_vn = BP_FW_MB_IDX(bp); + u32 tmp; + + /* If the shmem shouldn't affect configuration, reflect */ + if (!IS_MF_BD(bp)) { + int i; + + for (i = 0; i < BNX2X_MAX_PRIORITY; i++) + c2s_map[i] = i; + *c2s_default = 0; + + return; + } + + tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]); + tmp = (__force u32)be32_to_cpu((__force __be32)tmp); + c2s_map[0] = tmp & 0xff; + c2s_map[1] = (tmp >> 8) & 0xff; + c2s_map[2] = (tmp >> 16) & 0xff; + c2s_map[3] = (tmp >> 24) & 0xff; + + tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]); + tmp = (__force u32)be32_to_cpu((__force __be32)tmp); + c2s_map[4] = tmp & 0xff; + c2s_map[5] = (tmp >> 8) & 0xff; + c2s_map[6] = (tmp >> 16) & 0xff; + c2s_map[7] = (tmp >> 24) & 0xff; + + tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]); + tmp = (__force u32)be32_to_cpu((__force __be32)tmp); + *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff; +} + /** * bnx2x_setup_tc - routine to configure net_device for multi tc * @@ -4193,8 +4266,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) */ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) { - int cos, prio, count, offset; struct bnx2x *bp = netdev_priv(dev); + u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def; + int cos, prio, count, offset; /* setup tc must be called under rtnl lock */ ASSERT_RTNL(); @@ -4218,12 +4292,16 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) return -EINVAL; } + bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def); + /* configure priority to traffic class mapping */ for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { - netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]); + int outer_prio = c2s_map[prio]; + + netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]); DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, "mapping priority %d to tc %d\n", - prio, bp->prio_to_cos[prio]); + outer_prio, bp->prio_to_cos[outer_prio]); } /* Use this configuration to differentiate tc0 from other COSes @@ -4277,6 +4355,9 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p) if (netif_running(dev)) rc = bnx2x_set_eth_mac(bp, true); + if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) + SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); + return rc; } @@ -4830,6 +4911,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu) */ dev->mtu = new_mtu; + if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) + SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); + return bnx2x_reload_if_running(dev); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 03b7404d5..b7d32e841 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -1,6 +1,8 @@ -/* bnx2x_cmn.h: Broadcom Everest network driver. +/* bnx2x_cmn.h: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -620,6 +622,14 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features); */ void bnx2x_tx_timeout(struct net_device *dev); +/** bnx2x_get_c2s_mapping - read inner-to-outer vlan configuration + * c2s_map should have BNX2X_MAX_PRIORITY entries. + * @bp: driver handle + * @c2s_map: should have BNX2X_MAX_PRIORITY entries for mapping + * @c2s_default: entry for non-tagged configuration + */ +void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default); + /*********************** Inlines **********************************/ /*********************** Fast path ********************************/ static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) @@ -931,14 +941,35 @@ static inline int bnx2x_func_start(struct bnx2x *bp) start_params->mf_mode = bp->mf_mode; start_params->sd_vlan_tag = bp->mf_ov; + /* Configure Ethertype for BD mode */ + if (IS_MF_BD(bp)) { + DP(NETIF_MSG_IFUP, "Configuring ethertype 0x88a8 for BD\n"); + start_params->sd_vlan_eth_type = ETH_P_8021AD; + REG_WR(bp, PRS_REG_VLAN_TYPE_0, ETH_P_8021AD); + REG_WR(bp, PBF_REG_VLAN_TYPE_0, ETH_P_8021AD); + REG_WR(bp, NIG_REG_LLH_E1HOV_TYPE_1, ETH_P_8021AD); + + bnx2x_get_c2s_mapping(bp, start_params->c2s_pri, + &start_params->c2s_pri_default); + start_params->c2s_pri_valid = 1; + + DP(NETIF_MSG_IFUP, + "Inner-to-Outer priority: %02x %02x %02x %02x %02x %02x %02x %02x [Default %02x]\n", + start_params->c2s_pri[0], start_params->c2s_pri[1], + start_params->c2s_pri[2], start_params->c2s_pri[3], + start_params->c2s_pri[4], start_params->c2s_pri[5], + start_params->c2s_pri[6], start_params->c2s_pri[7], + start_params->c2s_pri_default); + } + if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) start_params->network_cos_mode = STATIC_COS; else /* CHIP_IS_E1X */ start_params->network_cos_mode = FW_WRR; - start_params->tunnel_mode = TUNN_MODE_GRE; - start_params->gre_tunnel_type = IPGRE_TUNNEL; - start_params->inner_gre_rss_en = 1; + start_params->vxlan_dst_port = bp->vxlan_dst_port; + + start_params->inner_rss = 1; if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { start_params->class_fail_ethtype = ETH_P_FIP; @@ -1037,6 +1068,15 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, BNX2X_FILTER_MAC_PENDING, &bp->sp_state, obj_type, &bp->macs_pool); + + if (!CHIP_IS_E1x(bp)) + bnx2x_init_vlan_obj(bp, &bnx2x_sp_obj(bp, fp).vlan_obj, + fp->cl_id, fp->cid, BP_FUNC(bp), + bnx2x_sp(bp, vlan_rdata), + bnx2x_sp_mapping(bp, vlan_rdata), + BNX2X_FILTER_VLAN_PENDING, + &bp->sp_state, obj_type, + &bp->vlans_pool); } /** @@ -1096,7 +1136,7 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp) bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), bnx2x_get_path_func_num(bp)); - bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1, + bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_FUNC(bp), bnx2x_get_path_func_num(bp)); /* RSS configuration object */ @@ -1106,6 +1146,8 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp) bnx2x_sp_mapping(bp, rss_rdata), BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, BNX2X_OBJ_TYPE_RX); + + bp->vlan_credit = PF_VLAN_CREDIT_E2(bp, bnx2x_get_path_func_num(bp)); } static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) @@ -1339,4 +1381,23 @@ void bnx2x_squeeze_objects(struct bnx2x *bp); void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag, u32 verbose); +/** + * bnx2x_set_os_driver_state - write driver state for management FW usage + * + * @bp: driver handle + * @state: OS_DRIVER_STATE_* value reflecting current driver state + */ +void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state); + +/** + * bnx2x_nvram_read - reads data from nvram [might sleep] + * + * @bp: driver handle + * @offset: byte offset in nvram + * @ret_buf: pointer to buffer where data is to be stored + * @buf_size: Length of 'ret_buf' in bytes + */ +int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, + int buf_size); + #endif /* BNX2X_CMN_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 6e4294ed1..7ccf6684e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -1,15 +1,17 @@ -/* bnx2x_dcb.c: Broadcom Everest network driver. +/* bnx2x_dcb.c: QLogic Everest network driver. * * Copyright 2009-2013 Broadcom Corporation + * Copyright 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. * * Maintained by: Ariel Elior @@ -1850,6 +1852,8 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, if (bp->dcbx_port_params.ets.cos_params[cos]. pri_bitmask & pri_bit) tt2cos[pri].cos = cos; + + pfc_fw_cfg->dcb_outer_pri[pri] = ttp[pri]; } /* we never want the FW to add a 0 vlan tag */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h index c6939ecb0..9a9517c0f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h @@ -1,15 +1,17 @@ -/* bnx2x_dcb.h: Broadcom Everest network driver. +/* bnx2x_dcb.h: QLogic Everest network driver. * * Copyright 2009-2013 Broadcom Corporation + * Copyright 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. * * Maintained by: Ariel Elior diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h index 741aa130c..eccfa13b0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h @@ -1,15 +1,17 @@ -/* bnx2x_dump.h: Broadcom Everest network driver. +/* bnx2x_dump.h: QLogic Everest network driver. * * Copyright (c) 2012-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 5907c821d..be628bd9f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1,6 +1,8 @@ -/* bnx2x_ethtool.c: Broadcom Everest network driver. +/* bnx2x_ethtool.c: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -1129,6 +1131,9 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) } else bp->wol = 0; + if (SHMEM2_HAS(bp, curr_cfg)) + SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); + return 0; } @@ -1343,8 +1348,8 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val, return rc; } -static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, - int buf_size) +int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, + int buf_size) { int rc; u32 cmd_flags; @@ -3346,6 +3351,13 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) udp_rss_requested = 0; else return -EINVAL; + + if (CHIP_IS_E1x(bp) && udp_rss_requested) { + DP(BNX2X_MSG_ETHTOOL, + "57710, 57711 boards don't support RSS according to UDP 4-tuple\n"); + return -EINVAL; + } + if ((info->flow_type == UDP_V4_FLOW) && (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) { bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested; @@ -3578,17 +3590,8 @@ static int bnx2x_get_ts_info(struct net_device *dev, info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index 7636e3c18..226ab29f4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -1,6 +1,8 @@ -/* bnx2x_fw_defs.h: Broadcom Everest network driver. +/* bnx2x_fw_defs.h: Qlogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -372,7 +374,7 @@ #define MAX_COS_NUMBER 4 #define MAX_TRAFFIC_TYPES 8 #define MAX_PFC_PRIORITIES 8 - +#define MAX_VLAN_PRIORITIES 8 /* used by array traffic_type_to_priority[] to mark traffic type \ that is not mapped to priority*/ #define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h index 8aafd9b5d..9e3b5a1e9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h @@ -1,6 +1,8 @@ /* bnx2x_fw_file_hdr.h: FW binary file header structure. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 42b4cb34a..c6158d662 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1,6 +1,8 @@ -/* bnx2x_hsi.h: Broadcom Everest network driver. +/* bnx2x_hsi.h: Qlogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -729,6 +731,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616 0x00001000 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834 0x00001100 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84858 0x00001200 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00 @@ -786,6 +789,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616 0x00001000 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834 0x00001100 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858 0x00001200 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 @@ -864,6 +868,7 @@ struct shared_feat_cfg { /* NVRAM Offset */ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE 0x00000500 #define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE 0x00000600 #define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE 0x00000700 @@ -2064,6 +2069,45 @@ struct ncsi_oem_fcoe_features { #define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET 0 }; +enum curr_cfg_method_e { + CURR_CFG_MET_NONE = 0, /* default config */ + CURR_CFG_MET_OS = 1, + CURR_CFG_MET_VENDOR_SPEC = 2,/* e.g. Option ROM, NPAR, O/S Cfg Utils */ +}; + +#define FC_NPIV_WWPN_SIZE 8 +#define FC_NPIV_WWNN_SIZE 8 +struct bdn_npiv_settings { + u8 npiv_wwpn[FC_NPIV_WWPN_SIZE]; + u8 npiv_wwnn[FC_NPIV_WWNN_SIZE]; +}; + +struct bdn_fc_npiv_cfg { + /* hdr used internally by the MFW */ + u32 hdr; + u32 num_of_npiv; +}; + +#define MAX_NUMBER_NPIV 64 +struct bdn_fc_npiv_tbl { + struct bdn_fc_npiv_cfg fc_npiv_cfg; + struct bdn_npiv_settings settings[MAX_NUMBER_NPIV]; +}; + +struct mdump_driver_info { + u32 epoc; + u32 drv_ver; + u32 fw_ver; + + u32 valid_dump; + #define FIRST_DUMP_VALID (1 << 0) + #define SECOND_DUMP_VALID (1 << 1) + + u32 flags; + #define ENABLE_ALL_TRIGGERS (0x7fffffff) + #define TRIGGER_MDUMP_ONCE (1 << 31) +}; + struct ncsi_oem_data { u32 driver_version[4]; struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features; @@ -2187,6 +2231,8 @@ struct shmem2_region { #define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002 #define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004 #define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008 +#define DRV_FLAGS_MTU_MASK 0xffff0000 +#define DRV_FLAGS_MTU_SHIFT 16 u32 extended_dev_info_shared_cfg_size; @@ -2251,6 +2297,7 @@ struct shmem2_region { u32 reserved4; /* Offset 0x150 */ u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */ #define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001 + #define LINK_ATTR_84858 0x00000002 #define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00 #define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8 #define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000 @@ -2268,6 +2315,74 @@ struct shmem2_region { /* We use indication for each PF (0..3) */ #define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_)) + union { /* For various OEMs */ /* Offset 0x1a0 */ + u8 storage_boot_prog[E2_FUNC_MAX]; + #define STORAGE_BOOT_PROG_MASK 0x000000FF + #define STORAGE_BOOT_PROG_NONE 0x00000000 + #define STORAGE_BOOT_PROG_ISCSI_IP_ACQUIRED 0x00000002 + #define STORAGE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS 0x00000002 + #define STORAGE_BOOT_PROG_TARGET_FOUND 0x00000004 + #define STORAGE_BOOT_PROG_ISCSI_CHAP_SUCCESS 0x00000008 + #define STORAGE_BOOT_PROG_FCOE_LUN_FOUND 0x00000008 + #define STORAGE_BOOT_PROG_LOGGED_INTO_TGT 0x00000010 + #define STORAGE_BOOT_PROG_IMG_DOWNLOADED 0x00000020 + #define STORAGE_BOOT_PROG_OS_HANDOFF 0x00000040 + #define STORAGE_BOOT_PROG_COMPLETED 0x00000080 + + u32 oem_i2c_data_addr; + }; + + /* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */ + /* For PCP values 0-3 use the map lower */ + /* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1, + * 0x0000FF00 - PCP 2, 0x000000FF PCP 3 + */ + u32 c2s_pcp_map_lower[E2_FUNC_MAX]; /* 0x1a4 */ + + /* For PCP values 4-7 use the map upper */ + /* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5, + * 0x0000FF00 - PCP 6, 0x000000FF PCP 7 + */ + u32 c2s_pcp_map_upper[E2_FUNC_MAX]; /* 0x1b4 */ + + /* For PCP default value get the MSB byte of the map default */ + u32 c2s_pcp_map_default[E2_FUNC_MAX]; /* 0x1c4 */ + + /* FC_NPIV table offset in NVRAM */ + u32 fc_npiv_nvram_tbl_addr[PORT_MAX]; /* 0x1d4 */ + + /* Shows last method that changed configuration of this device */ + enum curr_cfg_method_e curr_cfg; /* 0x1dc */ + + /* Storm FW version, shold be kept in the format 0xMMmmbbdd: + * MM - Major, mm - Minor, bb - Build ,dd - Drop + */ + u32 netproc_fw_ver; /* 0x1e0 */ + + /* Option ROM SMASH CLP version */ + u32 clp_ver; /* 0x1e4 */ + + u32 pcie_bus_num; /* 0x1e8 */ + + u32 sriov_switch_mode; /* 0x1ec */ + #define SRIOV_SWITCH_MODE_NONE 0x0 + #define SRIOV_SWITCH_MODE_VEB 0x1 + #define SRIOV_SWITCH_MODE_VEPA 0x2 + + u8 rsrv2[E2_FUNC_MAX]; /* 0x1f0 */ + + u32 img_inv_table_addr; /* Address to INV_TABLE_P */ /* 0x1f4 */ + + u32 mtu_size[E2_FUNC_MAX]; /* 0x1f8 */ + + u32 os_driver_state[E2_FUNC_MAX]; /* 0x208 */ + #define OS_DRIVER_STATE_NOT_LOADED 0 /* not installed */ + #define OS_DRIVER_STATE_LOADING 1 /* transition state */ + #define OS_DRIVER_STATE_DISABLED 2 /* installed but disabled */ + #define OS_DRIVER_STATE_ACTIVE 3 /* installed and active */ + + /* mini dump driver info */ + struct mdump_driver_info drv_info; /* 0x218 */ }; @@ -3897,7 +4012,11 @@ struct eth_fast_path_rx_cqe { __le16 len_on_bd; struct parsing_flags pars_flags; union eth_sgl_or_raw_data sgl_or_raw_data; - __le32 reserved1[7]; + u8 tunn_type; + u8 tunn_inner_hdrs_offset; + __le16 reserved1; + __le32 tunn_tenant_id; + __le32 padding[5]; u32 marker; }; @@ -4008,8 +4127,8 @@ struct eth_tunnel_data { __le16 pseudo_csum; u8 ip_hdr_start_inner_w; u8 flags; -#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0) -#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0 +#define ETH_TUNNEL_DATA_IPV6_OUTER (0x1<<0) +#define ETH_TUNNEL_DATA_IPV6_OUTER_SHIFT 0 #define ETH_TUNNEL_DATA_RESERVED (0x7F<<1) #define ETH_TUNNEL_DATA_RESERVED_SHIFT 1 }; @@ -4116,16 +4235,12 @@ struct eth_rss_update_ramrod_data { #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7) #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7 -#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8) -#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8 -#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9) -#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9 -#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10) -#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10 -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11) -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11 -#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12) -#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12 +#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY (0x1<<8) +#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY_SHIFT 8 +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<9) +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 9 +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0x3F<<10) +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 10 u8 rss_result_mask; u8 reserved3; __le16 reserved4; @@ -4310,6 +4425,18 @@ enum eth_tunnel_non_lso_csum_location { MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION }; +enum eth_tunn_type { + TUNN_TYPE_NONE, + TUNN_TYPE_VXLAN, + TUNN_TYPE_L2_GRE, + TUNN_TYPE_IPV4_GRE, + TUNN_TYPE_IPV6_GRE, + TUNN_TYPE_L2_GENEVE, + TUNN_TYPE_IPV4_GENEVE, + TUNN_TYPE_IPV6_GENEVE, + MAX_ETH_TUNN_TYPE +}; + /* * Tx regular BD structure */ @@ -4754,6 +4881,9 @@ struct afex_vif_list_ramrod_data { __le16 reserved1; }; +struct c2s_pri_trans_table_entry { + u8 val[MAX_VLAN_PRIORITIES]; +}; /* * cfc delete event data @@ -5242,6 +5372,7 @@ struct flow_control_configuration { u8 dont_add_pri_0_en; u8 reserved1; __le32 reserved2; + u8 dcb_outer_pri[MAX_TRAFFIC_TYPES]; }; @@ -5256,18 +5387,25 @@ struct function_start_data { u8 path_id; u8 network_cos_mode; u8 dmae_cmd_id; - u8 tunnel_mode; - u8 gre_tunnel_type; - u8 tunn_clss_en; - u8 inner_gre_rss_en; - u8 sd_accept_mf_clss_fail; + u8 no_added_tags; + __le16 reserved0; + __le32 reserved1; + u8 inner_clss_vxlan; + u8 inner_clss_l2gre; + u8 inner_clss_l2geneve; + u8 inner_rss; __le16 vxlan_dst_port; + __le16 geneve_dst_port; + u8 sd_accept_mf_clss_fail; + u8 sd_accept_mf_clss_fail_match_ethtype; __le16 sd_accept_mf_clss_fail_ethtype; __le16 sd_vlan_eth_type; u8 sd_vlan_force_pri_flg; u8 sd_vlan_force_pri_val; - u8 sd_accept_mf_clss_fail_match_ethtype; - u8 no_added_tags; + u8 c2s_pri_tt_valid; + u8 c2s_pri_default; + u8 reserved2[6]; + struct c2s_pri_trans_table_entry c2s_pri_trans_table; }; struct function_update_data { @@ -5285,11 +5423,12 @@ struct function_update_data { u8 tx_switch_suspend; u8 echo; u8 update_tunn_cfg_flg; - u8 tunnel_mode; - u8 gre_tunnel_type; - u8 tunn_clss_en; - u8 inner_gre_rss_en; + u8 inner_clss_vxlan; + u8 inner_clss_l2gre; + u8 inner_clss_l2geneve; + u8 inner_rss; __le16 vxlan_dst_port; + __le16 geneve_dst_port; u8 sd_vlan_force_pri_change_flg; u8 sd_vlan_force_pri_flg; u8 sd_vlan_force_pri_val; @@ -5298,6 +5437,8 @@ struct function_update_data { u8 reserved1; __le16 sd_vlan_tag; __le16 sd_vlan_eth_type; + __le16 reserved0; + __le32 reserved2; }; /* @@ -5326,15 +5467,6 @@ struct fw_version { #define __FW_VERSION_RESERVED_SHIFT 4 }; - -/* GRE Tunnel Mode */ -enum gre_tunnel_type { - NVGRE_TUNNEL, - L2GRE_TUNNEL, - IPGRE_TUNNEL, - MAX_GRE_TUNNEL_TYPE -}; - /* * Dynamic Host-Coalescing - Driver(host) counters */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h index d6e1975b7..46ee2c01f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h @@ -1,7 +1,9 @@ -/* bnx2x_init.h: Broadcom Everest network driver. +/* bnx2x_init.h: Qlogic Everest network driver. * Structures and macroes needed during the initialization. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h index 7919cc644..6811bf03c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h @@ -1,8 +1,10 @@ -/* bnx2x_init_ops.h: Broadcom Everest network driver. +/* bnx2x_init_ops.h: Qlogic Everest network driver. * Static functions needed during the initialization. * This file is "included" in bnx2x_main.c. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index a0b03c27e..d946bba43 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -1,13 +1,15 @@ /* Copyright 2008-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other Qlogic software provided under a + * license other than the GPL, without Qlogic's express prior written * consent. * * Written by Yaniv Rosner @@ -9652,6 +9654,13 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy, /******************************************************************/ /* BCM8481/BCM84823/BCM84833 PHY SECTION */ /******************************************************************/ +static int bnx2x_is_8483x_8485x(struct bnx2x_phy *phy) +{ + return ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)); +} + static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, struct bnx2x *bp, u8 port) @@ -9666,8 +9675,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, }; u16 fw_ver1; - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + if (bnx2x_is_8483x_8485x(phy)) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff, phy->ver_addr); @@ -9749,8 +9757,7 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp, bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, reg_set[i].val); - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) + if (bnx2x_is_8483x_8485x(phy)) offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1; else offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; @@ -9768,8 +9775,7 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; switch (action) { case PHY_INIT: - if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && - (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + if (!bnx2x_is_8483x_8485x(phy)) { /* Save spirom version */ bnx2x_save_848xx_spirom_version(phy, bp, params->port); } @@ -9901,8 +9907,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, /* Always write this if this is not 84833/4. * For 84833/4, write it only when it's a forced speed. */ - if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && - (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) || + if (!bnx2x_is_8483x_8485x(phy) || ((autoneg_val & (1<<12)) == 0)) bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, @@ -9949,8 +9954,86 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy, return bnx2x_848xx_cmn_config_init(phy, params, vars); } -#define PHY84833_CMDHDLR_WAIT 300 -#define PHY84833_CMDHDLR_MAX_ARGS 5 +#define PHY848xx_CMDHDLR_WAIT 300 +#define PHY848xx_CMDHDLR_MAX_ARGS 5 + +static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy, + struct link_params *params, + u16 fw_cmd, + u16 cmd_args[], int argc) +{ + int idx; + u16 val; + struct bnx2x *bp = params->bp; + + /* Step 1: Poll the STATUS register to see whether the previous command + * is in progress or the system is busy (CMD_IN_PROGRESS or + * SYSTEM_BUSY). If previous command is in progress or system is busy, + * check again until the previous command finishes execution and the + * system is available for taking command + */ + + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, &val); + if ((val != PHY84858_STATUS_CMD_IN_PROGRESS) && + (val != PHY84858_STATUS_CMD_SYSTEM_BUSY)) + break; + usleep_range(1000, 2000); + } + if (idx >= PHY848xx_CMDHDLR_WAIT) { + DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); + return -EINVAL; + } + + /* Step2: If any parameters are required for the function, write them + * to the required DATA registers + */ + + for (idx = 0; idx < argc; idx++) { + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_DATA1 + idx, + cmd_args[idx]); + } + + /* Step3: When the firmware is ready for commands, write the 'Command + * code' to the CMD register + */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd); + + /* Step4: Once the command has been written, poll the STATUS register + * to check whether the command has completed (CMD_COMPLETED_PASS/ + * CMD_FOR_CMDS or CMD_COMPLETED_ERROR). + */ + + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, &val); + if ((val == PHY84858_STATUS_CMD_COMPLETE_PASS) || + (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) + break; + usleep_range(1000, 2000); + } + if ((idx >= PHY848xx_CMDHDLR_WAIT) || + (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) { + DP(NETIF_MSG_LINK, "FW cmd failed.\n"); + return -EINVAL; + } + /* Step5: Once the command has completed, read the specficied DATA + * registers for any saved results for the command, if applicable + */ + + /* Gather returning data */ + for (idx = 0; idx < argc; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_DATA1 + idx, + &cmd_args[idx]); + } + + return 0; +} + static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, struct link_params *params, u16 fw_cmd, u16 cmd_args[], int argc) @@ -9960,16 +10043,16 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; /* Write CMD_OPEN_OVERRIDE to STATUS reg */ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_STATUS, + MDIO_848xx_CMD_HDLR_STATUS, PHY84833_STATUS_CMD_OPEN_OVERRIDE); - for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) { + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_STATUS, &val); + MDIO_848xx_CMD_HDLR_STATUS, &val); if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS) break; usleep_range(1000, 2000); } - if (idx >= PHY84833_CMDHDLR_WAIT) { + if (idx >= PHY848xx_CMDHDLR_WAIT) { DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); return -EINVAL; } @@ -9977,42 +10060,62 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, /* Prepare argument(s) and issue command */ for (idx = 0; idx < argc; idx++) { bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_DATA1 + idx, + MDIO_848xx_CMD_HDLR_DATA1 + idx, cmd_args[idx]); } bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_COMMAND, fw_cmd); - for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) { + MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd); + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_STATUS, &val); + MDIO_848xx_CMD_HDLR_STATUS, &val); if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) || - (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) + (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) break; usleep_range(1000, 2000); } - if ((idx >= PHY84833_CMDHDLR_WAIT) || - (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { + if ((idx >= PHY848xx_CMDHDLR_WAIT) || + (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { DP(NETIF_MSG_LINK, "FW cmd failed.\n"); return -EINVAL; } /* Gather returning data */ for (idx = 0; idx < argc; idx++) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_DATA1 + idx, + MDIO_848xx_CMD_HDLR_DATA1 + idx, &cmd_args[idx]); } bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_STATUS, + MDIO_848xx_CMD_HDLR_STATUS, PHY84833_STATUS_CMD_CLEAR_COMPLETE); return 0; } -static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy, + struct link_params *params, + u16 fw_cmd, + u16 cmd_args[], int argc) +{ + struct bnx2x *bp = params->bp; + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) || + (REG_RD(bp, params->shmem2_base + + offsetof(struct shmem2_region, + link_attr_sync[params->port])) & + LINK_ATTR_84858)) { + return bnx2x_84858_cmd_hdlr(phy, params, fw_cmd, cmd_args, + argc); + } else { + return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args, + argc); + } +} + +static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { u32 pair_swap; - u16 data[PHY84833_CMDHDLR_MAX_ARGS]; + u16 data[PHY848xx_CMDHDLR_MAX_ARGS]; int status; struct bnx2x *bp = params->bp; @@ -10028,8 +10131,9 @@ static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, /* Only the second argument is used for this command */ data[1] = (u16)pair_swap; - status = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS); + status = bnx2x_848xx_cmd_hdlr(phy, params, + PHY848xx_CMD_SET_PAIR_SWAP, data, + PHY848xx_CMDHDLR_MAX_ARGS); if (status == 0) DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]); @@ -10118,8 +10222,8 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); /* Prevent Phy from working in EEE and advertising it */ - rc = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); + rc = bnx2x_848xx_cmd_hdlr(phy, params, + PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); if (rc) { DP(NETIF_MSG_LINK, "EEE disable failed.\n"); return rc; @@ -10136,8 +10240,8 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 cmd_args = 1; - rc = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); + rc = bnx2x_848xx_cmd_hdlr(phy, params, + PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); if (rc) { DP(NETIF_MSG_LINK, "EEE enable failed.\n"); return rc; @@ -10155,7 +10259,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, u8 port, initialize = 1; u16 val; u32 actual_phy_selection; - u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS]; + u16 cmd_args[PHY848xx_CMDHDLR_MAX_ARGS]; int rc = 0; usleep_range(1000, 2000); @@ -10180,8 +10284,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, /* Wait for GPHY to come out of reset */ msleep(50); - if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && - (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + if (!bnx2x_is_8483x_8485x(phy)) { /* BCM84823 requires that XGXS links up first @ 10G for normal * behavior. */ @@ -10192,7 +10295,19 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, bnx2x_program_serdes(¶ms->phy[INT_PHY], params, vars); vars->line_speed = temp; } + /* Check if this is actually BCM84858 */ + if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) { + u16 hw_rev; + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_848xx_ID_MSB, &hw_rev); + if (hw_rev == BCM84858_PHY_ID) { + params->link_attr_sync |= LINK_ATTR_84858; + bnx2x_update_link_attr(params, params->link_attr_sync); + } + } + + /* Set dual-media configuration according to configuration */ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, MDIO_CTL_REG_84823_MEDIA, &val); val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | @@ -10237,18 +10352,17 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", params->multi_phy_config, val); - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { - bnx2x_84833_pair_swap_cfg(phy, params, vars); + if (bnx2x_is_8483x_8485x(phy)) { + bnx2x_848xx_pair_swap_cfg(phy, params, vars); /* Keep AutogrEEEn disabled. */ cmd_args[0] = 0x0; cmd_args[1] = 0x0; cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1; cmd_args[3] = PHY84833_CONSTANT_LATENCY; - rc = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_EEE_MODE, cmd_args, - PHY84833_CMDHDLR_MAX_ARGS); + rc = bnx2x_848xx_cmd_hdlr(phy, params, + PHY848xx_CMD_SET_EEE_MODE, cmd_args, + PHY848xx_CMDHDLR_MAX_ARGS); if (rc) DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); } @@ -10302,8 +10416,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; } - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + if (bnx2x_is_8483x_8485x(phy)) { /* Bring PHY out of super isolate mode as the final step. */ bnx2x_cl45_read_and_write(bp, phy, MDIO_CTL_DEVAD, @@ -10435,8 +10548,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; /* Determine if EEE was negotiated */ - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) + if (bnx2x_is_8483x_8485x(phy)) bnx2x_eee_an_resolve(phy, params, vars); } @@ -11842,6 +11954,40 @@ static const struct bnx2x_phy phy_84834 = { .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func }; +static const struct bnx2x_phy phy_84858 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_REARM_LATCH_SIGNAL, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_848x3_config_init, + .read_status = (read_status_t)bnx2x_848xx_read_status, + .link_reset = (link_reset_t)bnx2x_848x3_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, + .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, + .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func +}; + static const struct bnx2x_phy phy_54618se = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE, .addr = 0xff, @@ -12128,6 +12274,9 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834: *phy = phy_84834; break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858: + *phy = phy_84858; + break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE: *phy = phy_54618se; @@ -12184,9 +12333,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, } phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); - if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) && - (phy->ver_addr)) { + if (bnx2x_is_8483x_8485x(phy) && (phy->ver_addr)) { /* Remove 100Mb link supported for BCM84833/4 when phy fw * version lower than or equal to 1.39 */ @@ -13281,6 +13428,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858: /* GPIO3's are linked, and so both need to be toggled * to obtain required 2us pulse. */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index d9cce4c38..b7d251108 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -1,13 +1,15 @@ /* Copyright 2008-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other Qlogic software provided under a + * license other than the GPL, without Qlogic's express prior written * consent. * * Written by Yaniv Rosner diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index bbb733556..e436c7b0b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -1,6 +1,8 @@ -/* bnx2x_main.c: Broadcom Everest network driver. +/* bnx2x_main.c: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -80,11 +82,11 @@ #define TX_TIMEOUT (5*HZ) static char version[] = - "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " + "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver " DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Eliezer Tamir"); -MODULE_DESCRIPTION("Broadcom NetXtreme II " +MODULE_DESCRIPTION("QLogic " "BCM57710/57711/57711E/" "57712/57712_MF/57800/57800_MF/57810/57810_MF/" "57840/57840_MF Driver"); @@ -160,27 +162,27 @@ enum bnx2x_board_type { static struct { char *name; } board_info[] = { - [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, - [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, - [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, - [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, - [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, - [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" }, - [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, - [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, - [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" }, - [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, - [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, - [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" }, - [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" }, - [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" }, - [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, - [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }, - [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" }, - [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" }, - [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, - [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, - [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" } + [BCM57710] = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" }, + [BCM57711] = { "QLogic BCM57711 10 Gigabit PCIe" }, + [BCM57711E] = { "QLogic BCM57711E 10 Gigabit PCIe" }, + [BCM57712] = { "QLogic BCM57712 10 Gigabit Ethernet" }, + [BCM57712_MF] = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" }, + [BCM57712_VF] = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" }, + [BCM57800] = { "QLogic BCM57800 10 Gigabit Ethernet" }, + [BCM57800_MF] = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" }, + [BCM57800_VF] = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" }, + [BCM57810] = { "QLogic BCM57810 10 Gigabit Ethernet" }, + [BCM57810_MF] = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" }, + [BCM57810_VF] = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" }, + [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" }, + [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" }, + [BCM57840_MF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" }, + [BCM57840_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }, + [BCM57811] = { "QLogic BCM57811 10 Gigabit Ethernet" }, + [BCM57811_MF] = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" }, + [BCM57840_O] = { "QLogic BCM57840 10/20 Gigabit Ethernet" }, + [BCM57840_MFO] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" }, + [BCM57811_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" } }; #ifndef PCI_DEVICE_ID_NX2_57710 @@ -261,11 +263,14 @@ static const struct pci_device_id bnx2x_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 }, + { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, + { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF }, + { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF }, @@ -2486,7 +2491,7 @@ static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, else { u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); - if (IS_MF_SI(bp)) { + if (IS_MF_PERCENT_BW(bp)) { /* maxCfg in percents of linkspeed */ vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; } else /* SD modes */ @@ -2910,7 +2915,7 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp) func_params.f_obj = &bp->func_obj; func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; - if (IS_MF_UFP(bp)) { + if (IS_MF_UFP(bp) || IS_MF_BD(bp)) { int func = BP_ABS_FUNC(bp); u32 val; @@ -2937,16 +2942,16 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp) BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n", bp->mf_ov); goto fail; + } else { + DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", + bp->mf_ov); } - - DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov); - - bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0); - - return; + } else { + goto fail; } - /* not supported by SW yet */ + bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0); + return; fail: bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0); } @@ -3059,7 +3064,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) storm_memset_func_en(bp, p->func_id, 1); /* spq */ - if (p->func_flgs & FUNC_FLG_SPQ) { + if (p->spq_active) { storm_memset_spq_addr(bp, p->spq_map, p->func_id); REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); @@ -3275,7 +3280,6 @@ static void bnx2x_pf_init(struct bnx2x *bp) { struct bnx2x_func_init_params func_init = {0}; struct event_ring_data eq_data = { {0} }; - u16 flags; if (!CHIP_IS_E1x(bp)) { /* reset IGU PF statistics: MSIX + ATTN */ @@ -3292,15 +3296,7 @@ static void bnx2x_pf_init(struct bnx2x *bp) BP_FUNC(bp) : BP_VN(bp))*4, 0); } - /* function setup flags */ - flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); - - /* This flag is relevant for E1x only. - * E2 doesn't have a TPA configuration in a function level. - */ - flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0; - - func_init.func_flgs = flags; + func_init.spq_active = true; func_init.pf_id = BP_FUNC(bp); func_init.func_id = BP_FUNC(bp); func_init.spq_map = bp->spq_mapping; @@ -3701,6 +3697,32 @@ out: ethver, iscsiver, fcoever); } +void bnx2x_update_mfw_dump(struct bnx2x *bp) +{ + u32 drv_ver; + u32 valid_dump; + + if (!SHMEM2_HAS(bp, drv_info)) + return; + + /* Update Driver load time, possibly broken in y2038 */ + SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds()); + + drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); + SHMEM2_WR(bp, drv_info.drv_ver, drv_ver); + + SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM)); + + /* Check & notify On-Chip dump. */ + valid_dump = SHMEM2_RD(bp, drv_info.valid_dump); + + if (valid_dump & FIRST_DUMP_VALID) + DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n"); + + if (valid_dump & SECOND_DUMP_VALID) + DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n"); +} + static void bnx2x_oem_event(struct bnx2x *bp, u32 event) { u32 cmd_ok, cmd_fail; @@ -5267,6 +5289,10 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp, else vlan_mac_obj = &bp->sp_objs[cid].mac_obj; + break; + case BNX2X_FILTER_VLAN_PENDING: + DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n"); + vlan_mac_obj = &bp->sp_objs[cid].vlan_obj; break; case BNX2X_FILTER_MCAST_PENDING: DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); @@ -5562,6 +5588,8 @@ static void bnx2x_eq_int(struct bnx2x *bp) BNX2X_STATE_OPEN): case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPENING_WAIT4_PORT): + case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | + BNX2X_STATE_CLOSING_WAIT4_HALT): cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", @@ -5579,7 +5607,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) BNX2X_STATE_DIAG): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_CLOSING_WAIT4_HALT): - DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n"); + DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n"); bnx2x_handle_classification_eqe(bp, elem); break; @@ -6167,6 +6195,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); + if (bp->accept_any_vlan) { + __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); + } + break; case BNX2X_RX_MODE_ALLMULTI: __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); @@ -6178,6 +6211,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); + if (bp->accept_any_vlan) { + __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); + } + break; case BNX2X_RX_MODE_PROMISC: /* According to definition of SI mode, iface in promisc mode @@ -6198,18 +6236,15 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, else __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); + break; default: BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode); return -EINVAL; } - /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ - if (rx_mode != BNX2X_RX_MODE_NONE) { - __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); - __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); - } - return 0; } @@ -7423,6 +7458,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) } else BNX2X_ERR("Bootcode is missing - can not initialize link\n"); + if (SHMEM2_HAS(bp, netproc_fw_ver)) + SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM)); + return 0; } @@ -8400,6 +8438,42 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, return rc; } +int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan, + struct bnx2x_vlan_mac_obj *obj, bool set, + unsigned long *ramrod_flags) +{ + int rc; + struct bnx2x_vlan_mac_ramrod_params ramrod_param; + + memset(&ramrod_param, 0, sizeof(ramrod_param)); + + /* Fill general parameters */ + ramrod_param.vlan_mac_obj = obj; + ramrod_param.ramrod_flags = *ramrod_flags; + + /* Fill a user request section if needed */ + if (!test_bit(RAMROD_CONT, ramrod_flags)) { + ramrod_param.user_req.u.vlan.vlan = vlan; + /* Set the command: ADD or DEL */ + if (set) + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; + else + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; + } + + rc = bnx2x_config_vlan_mac(bp, &ramrod_param); + + if (rc == -EEXIST) { + /* Do not treat adding same vlan as error. */ + DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); + rc = 0; + } else if (rc < 0) { + BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del")); + } + + return rc; +} + int bnx2x_del_all_macs(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *mac_obj, int mac_type, bool wait_for_comp) @@ -9996,6 +10070,91 @@ static void bnx2x_parity_recover(struct bnx2x *bp) } } +#ifdef CONFIG_BNX2X_VXLAN +static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port) +{ + struct bnx2x_func_switch_update_params *switch_update_params; + struct bnx2x_func_state_params func_params = {NULL}; + int rc; + + switch_update_params = &func_params.params.switch_update; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; + + /* Function parameters */ + __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, + &switch_update_params->changes); + switch_update_params->vxlan_dst_port = port; + rc = bnx2x_func_state_change(bp, &func_params); + if (rc) + BNX2X_ERR("failed to change vxlan dst port to %d (rc = 0x%x)\n", + port, rc); + return rc; +} + +static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port) +{ + if (!netif_running(bp->dev)) + return; + + if (bp->vxlan_dst_port_count && bp->vxlan_dst_port == port) { + bp->vxlan_dst_port_count++; + return; + } + + if (bp->vxlan_dst_port_count || !IS_PF(bp)) { + DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n"); + return; + } + + bp->vxlan_dst_port = port; + bp->vxlan_dst_port_count = 1; + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0); +} + +static void bnx2x_add_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct bnx2x *bp = netdev_priv(netdev); + u16 t_port = ntohs(port); + + __bnx2x_add_vxlan_port(bp, t_port); +} + +static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port) +{ + if (!bp->vxlan_dst_port_count || bp->vxlan_dst_port != port || + !IS_PF(bp)) { + DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); + return; + } + bp->vxlan_dst_port--; + if (bp->vxlan_dst_port) + return; + + if (netif_running(bp->dev)) { + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0); + } else { + bp->vxlan_dst_port = 0; + netdev_info(bp->dev, "Deleted vxlan dest port %d", port); + } +} + +static void bnx2x_del_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct bnx2x *bp = netdev_priv(netdev); + u16 t_port = ntohs(port); + + __bnx2x_del_vxlan_port(bp, t_port); +} +#endif + static int bnx2x_close(struct net_device *dev); /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is @@ -10004,6 +10163,9 @@ static int bnx2x_close(struct net_device *dev); static void bnx2x_sp_rtnl_task(struct work_struct *work) { struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); +#ifdef CONFIG_BNX2X_VXLAN + u16 port; +#endif rtnl_lock(); @@ -10102,6 +10264,27 @@ sp_rtnl_not_reset: &bp->sp_rtnl_state)) bnx2x_update_mng_version(bp); +#ifdef CONFIG_BNX2X_VXLAN + port = bp->vxlan_dst_port; + if (test_and_clear_bit(BNX2X_SP_RTNL_ADD_VXLAN_PORT, + &bp->sp_rtnl_state)) { + if (!bnx2x_vxlan_port_update(bp, port)) + netdev_info(bp->dev, "Added vxlan dest port %d", port); + else + bp->vxlan_dst_port = 0; + } + + if (test_and_clear_bit(BNX2X_SP_RTNL_DEL_VXLAN_PORT, + &bp->sp_rtnl_state)) { + if (!bnx2x_vxlan_port_update(bp, 0)) { + netdev_info(bp->dev, + "Deleted vxlan dest port %d", port); + bp->vxlan_dst_port = 0; + vxlan_get_rx_port(bp->dev); + } + } +#endif + /* work which needs rtnl lock not-taken (as it takes the lock itself and * can be called from other contexts as well) */ @@ -11672,7 +11855,7 @@ static void validate_set_si_mode(struct bnx2x *bp) static int bnx2x_get_hwinfo(struct bnx2x *bp) { int /*abs*/func = BP_ABS_FUNC(bp); - int vn; + int vn, mfw_vn; u32 val = 0, val2 = 0; int rc = 0; @@ -11762,6 +11945,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) bp->mf_mode = 0; bp->mf_sub_mode = 0; vn = BP_VN(bp); + mfw_vn = BP_FW_MB_IDX(bp); if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", @@ -11818,6 +12002,31 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) } else BNX2X_DEV_INFO("illegal OV for SD\n"); break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE: + bp->mf_mode = MULTI_FUNCTION_SD; + bp->mf_sub_mode = SUB_MF_MODE_BD; + bp->mf_config[vn] = + MF_CFG_RD(bp, + func_mf_config[func].config); + + if (SHMEM2_HAS(bp, mtu_size)) { + int mtu_idx = BP_FW_MB_IDX(bp); + u16 mtu_size; + u32 mtu; + + mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]); + mtu_size = (u16)mtu; + DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n", + mtu_size, mtu); + + /* if valid: update device mtu */ + if (((mtu_size + ETH_HLEN) >= + ETH_MIN_PACKET_SIZE) && + (mtu_size <= + ETH_MAX_JUMBO_PACKET_SIZE)) + bp->dev->mtu = mtu_size; + } + break; case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE: bp->mf_mode = MULTI_FUNCTION_SD; bp->mf_sub_mode = SUB_MF_MODE_UFP; @@ -11865,9 +12074,10 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n", func, bp->mf_ov, bp->mf_ov); - } else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) { + } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) || + (bp->mf_sub_mode == SUB_MF_MODE_BD)) { dev_err(&bp->pdev->dev, - "Unexpected - no valid MF OV for func %d in UFP mode\n", + "Unexpected - no valid MF OV for func %d in UFP/BD mode\n", func); bp->path_has_ovlan = true; } else { @@ -12072,6 +12282,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) mutex_init(&bp->drv_info_mutex); sema_init(&bp->stats_lock, 1); bp->drv_info_mng_owner = false; + INIT_LIST_HEAD(&bp->vlan_reg); INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); @@ -12272,6 +12483,12 @@ static int bnx2x_open(struct net_device *dev) rc = bnx2x_nic_load(bp, LOAD_OPEN); if (rc) return rc; + +#ifdef CONFIG_BNX2X_VXLAN + if (IS_PF(bp)) + vxlan_get_rx_port(dev); +#endif + return 0; } @@ -12590,6 +12807,169 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb, return vxlan_features_check(skb, features); } +static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add) +{ + int rc; + + if (IS_PF(bp)) { + unsigned long ramrod_flags = 0; + + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj, + add, &ramrod_flags); + } else { + rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add); + } + + return rc; +} + +int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) +{ + struct bnx2x_vlan_entry *vlan; + int rc = 0; + + if (!bp->vlan_cnt) { + DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n"); + return 0; + } + + list_for_each_entry(vlan, &bp->vlan_reg, link) { + /* Prepare for cleanup in case of errors */ + if (rc) { + vlan->hw = false; + continue; + } + + if (!vlan->hw) + continue; + + DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid); + + rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); + if (rc) { + BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid); + vlan->hw = false; + rc = -EINVAL; + continue; + } + } + + return rc; +} + +static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct bnx2x *bp = netdev_priv(dev); + struct bnx2x_vlan_entry *vlan; + bool hw = false; + int rc = 0; + + if (!netif_running(bp->dev)) { + DP(NETIF_MSG_IFUP, + "Ignoring VLAN configuration the interface is down\n"); + return -EFAULT; + } + + DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid); + + vlan = kmalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) + return -ENOMEM; + + bp->vlan_cnt++; + if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) { + DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n"); + bp->accept_any_vlan = true; + if (IS_PF(bp)) + bnx2x_set_rx_mode_inner(bp); + else + bnx2x_vfpf_storm_rx_mode(bp); + } else if (bp->vlan_cnt <= bp->vlan_credit) { + rc = __bnx2x_vlan_configure_vid(bp, vid, true); + hw = true; + } + + vlan->vid = vid; + vlan->hw = hw; + + if (!rc) { + list_add(&vlan->link, &bp->vlan_reg); + } else { + bp->vlan_cnt--; + kfree(vlan); + } + + DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc); + + return rc; +} + +static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct bnx2x *bp = netdev_priv(dev); + struct bnx2x_vlan_entry *vlan; + int rc = 0; + + if (!netif_running(bp->dev)) { + DP(NETIF_MSG_IFUP, + "Ignoring VLAN configuration the interface is down\n"); + return -EFAULT; + } + + DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid); + + if (!bp->vlan_cnt) { + BNX2X_ERR("Unable to kill VLAN %d\n", vid); + return -EINVAL; + } + + list_for_each_entry(vlan, &bp->vlan_reg, link) + if (vlan->vid == vid) + break; + + if (vlan->vid != vid) { + BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid); + return -EINVAL; + } + + if (vlan->hw) + rc = __bnx2x_vlan_configure_vid(bp, vid, false); + + list_del(&vlan->link); + kfree(vlan); + + bp->vlan_cnt--; + + if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) { + /* Configure all non-configured entries */ + list_for_each_entry(vlan, &bp->vlan_reg, link) { + if (vlan->hw) + continue; + + rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); + if (rc) { + BNX2X_ERR("Unable to config VLAN %d\n", + vlan->vid); + continue; + } + DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", + vlan->vid); + vlan->hw = true; + } + DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n"); + bp->accept_any_vlan = false; + if (IS_PF(bp)) + bnx2x_set_rx_mode_inner(bp); + else + bnx2x_vfpf_storm_rx_mode(bp); + } + + DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc); + + return rc; +} + static const struct net_device_ops bnx2x_netdev_ops = { .ndo_open = bnx2x_open, .ndo_stop = bnx2x_close, @@ -12603,6 +12983,8 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_fix_features = bnx2x_fix_features, .ndo_set_features = bnx2x_set_features, .ndo_tx_timeout = bnx2x_tx_timeout, + .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = poll_bnx2x, #endif @@ -12622,6 +13004,10 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_get_phys_port_id = bnx2x_get_phys_port_id, .ndo_set_vf_link_state = bnx2x_set_vf_link_state, .ndo_features_check = bnx2x_features_check, +#ifdef CONFIG_BNX2X_VXLAN + .ndo_add_vxlan_port = bnx2x_add_vxlan_port, + .ndo_del_vxlan_port = bnx2x_del_vxlan_port, +#endif }; static int bnx2x_set_coherency_mask(struct bnx2x *bp) @@ -12813,6 +13199,18 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; + /* VF with OLD Hypervisor or old PF do not support filtering */ + if (IS_PF(bp)) { + if (CHIP_IS_E1x(bp)) + bp->accept_any_vlan = true; + else + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; +#ifdef CONFIG_BNX2X_SRIOV + } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) { + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; +#endif + } + dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; dev->features |= NETIF_F_HIGHDMA; @@ -13497,6 +13895,9 @@ static int bnx2x_init_one(struct pci_dev *pdev, bnx2x_register_phc(bp); + if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) + bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED); + return 0; init_one_exit: @@ -13559,6 +13960,7 @@ static void __bnx2x_remove(struct pci_dev *pdev, /* Power on: we can't let PCI layer write to us while we are in D3 */ if (IS_PF(bp)) { bnx2x_set_power_state(bp, PCI_D0); + bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED); /* Set endianity registers to reset values in case next driver * boots in different endianty environment. @@ -14307,6 +14709,90 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) rc = -EINVAL; } + /* For storage-only interfaces, change driver state */ + if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) { + switch (ctl->drv_state) { + case DRV_NOP: + break; + case DRV_ACTIVE: + bnx2x_set_os_driver_state(bp, + OS_DRIVER_STATE_ACTIVE); + break; + case DRV_INACTIVE: + bnx2x_set_os_driver_state(bp, + OS_DRIVER_STATE_DISABLED); + break; + case DRV_UNLOADED: + bnx2x_set_os_driver_state(bp, + OS_DRIVER_STATE_NOT_LOADED); + break; + default: + BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state); + } + } + + return rc; +} + +static int bnx2x_get_fc_npiv(struct net_device *dev, + struct cnic_fc_npiv_tbl *cnic_tbl) +{ + struct bnx2x *bp = netdev_priv(dev); + struct bdn_fc_npiv_tbl *tbl = NULL; + u32 offset, entries; + int rc = -EINVAL; + int i; + + if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0])) + goto out; + + DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n"); + + tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); + if (!tbl) { + BNX2X_ERR("Failed to allocate fc_npiv table\n"); + goto out; + } + + offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]); + DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset); + + /* Read the table contents from nvram */ + if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) { + BNX2X_ERR("Failed to read FC-NPIV table\n"); + goto out; + } + + /* Since bnx2x_nvram_read() returns data in be32, we need to convert + * the number of entries back to cpu endianness. + */ + entries = tbl->fc_npiv_cfg.num_of_npiv; + entries = (__force u32)be32_to_cpu((__force __be32)entries); + tbl->fc_npiv_cfg.num_of_npiv = entries; + + if (!tbl->fc_npiv_cfg.num_of_npiv) { + DP(BNX2X_MSG_MCP, + "No FC-NPIV table [valid, simply not present]\n"); + goto out; + } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) { + BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n", + tbl->fc_npiv_cfg.num_of_npiv); + goto out; + } else { + DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n", + tbl->fc_npiv_cfg.num_of_npiv); + } + + /* Copy the data into cnic-provided struct */ + cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv; + for (i = 0; i < cnic_tbl->count; i++) { + memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8); + memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8); + } + + rc = 0; +out: + kfree(tbl); return rc; } @@ -14452,6 +14938,7 @@ static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; cp->drv_ctl = bnx2x_drv_ctl; + cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv; cp->drv_register_cnic = bnx2x_register_cnic; cp->drv_unregister_cnic = bnx2x_unregister_cnic; cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h index caf1aef65..a91ccbf36 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h @@ -1,6 +1,8 @@ -/* bnx2x_mfw_req.h: Broadcom Everest network driver. +/* bnx2x_mfw_req.h: Qlogic Everest network driver. * * Copyright (c) 2012-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 49d511092..4dead49bd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -1,6 +1,8 @@ -/* bnx2x_reg.h: Broadcom Everest network driver. +/* bnx2x_reg.h: Qlogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -2137,6 +2139,10 @@ /* [RW 1] When this bit is set; the LLH will expect all packets to be with e1hov */ #define NIG_REG_LLH_E1HOV_MODE 0x160d8 +/* [RW 16] Outer VLAN type identifier for multi-function mode. In non + * multi-function mode; it will hold the inner VLAN type. Typically 0x8100. + */ +#define NIG_REG_LLH_E1HOV_TYPE_1 0x16028 /* [RW 1] When this bit is set; the LLH will classify the packet before sending it to the BRB or calculating WoL on it. */ #define NIG_REG_LLH_MF_MODE 0x16024 @@ -2953,7 +2959,12 @@ #define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac /* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */ #define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0 -#define PB_REG_CONTROL 0 +/* [RW 16] One of 8 values that should be compared to type in Ethernet + * parsing. If there is a match; the field after Ethernet is the first VLAN. + * Reset value is 0x8100 which is the standard VLAN type. Note that when + * checking second VLAN; type is compared only to 0x8100. + */ +#define PBF_REG_VLAN_TYPE_0 0x15c06c /* [RW 2] Interrupt mask register #0 read/write */ #define PB_REG_PB_INT_MASK 0x28 /* [R 2] Interrupt register #0 read */ @@ -3372,6 +3383,12 @@ #define PRS_REG_TCM_CURRENT_CREDIT 0x40160 /* [R 8] debug only: TSDM current credit. Transaction based. */ #define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c +/* [RW 16] One of 8 values that should be compared to type in Ethernet + * parsing. If there is a match; the field after Ethernet is the first VLAN. + * Reset value is 0x8100 which is the standard VLAN type. Note that when + * checking second VLAN; type is compared only to 0x8100. + */ +#define PRS_REG_VLAN_TYPE_0 0x401a8 #define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19) #define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20) #define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22) @@ -7240,6 +7257,9 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0 #define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40 #define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1 +#define MDIO_AN_REG_848xx_ID_MSB 0xffe2 +#define BCM84858_PHY_ID 0x600d +#define MDIO_AN_REG_848xx_ID_LSB 0xffe3 #define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4 #define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6 #define MDIO_AN_REG_8481_1000T_CTRL 0xffe9 @@ -7283,31 +7303,31 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81 #define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a #define MDIO_84833_SUPER_ISOLATE 0x8000 -/* These are mailbox register set used by 84833. */ -#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005 -#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006 -#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007 -#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008 -#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009 -#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037 -#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038 -#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039 -#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a -#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b -#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c -#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0 -#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26 -#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27 -#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28 -#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29 -#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30 -#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31 +/* These are mailbox register set used by 84833/84858. */ +#define MDIO_848xx_TOP_CFG_SCRATCH_REG0 0x4005 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG1 0x4006 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG2 0x4007 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG3 0x4008 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG4 0x4009 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG26 0x4037 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG27 0x4038 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG28 0x4039 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG29 0x403a +#define MDIO_848xx_TOP_CFG_SCRATCH_REG30 0x403b +#define MDIO_848xx_TOP_CFG_SCRATCH_REG31 0x403c +#define MDIO_848xx_CMD_HDLR_COMMAND (MDIO_848xx_TOP_CFG_SCRATCH_REG0) +#define MDIO_848xx_CMD_HDLR_STATUS (MDIO_848xx_TOP_CFG_SCRATCH_REG26) +#define MDIO_848xx_CMD_HDLR_DATA1 (MDIO_848xx_TOP_CFG_SCRATCH_REG27) +#define MDIO_848xx_CMD_HDLR_DATA2 (MDIO_848xx_TOP_CFG_SCRATCH_REG28) +#define MDIO_848xx_CMD_HDLR_DATA3 (MDIO_848xx_TOP_CFG_SCRATCH_REG29) +#define MDIO_848xx_CMD_HDLR_DATA4 (MDIO_848xx_TOP_CFG_SCRATCH_REG30) +#define MDIO_848xx_CMD_HDLR_DATA5 (MDIO_848xx_TOP_CFG_SCRATCH_REG31) -/* Mailbox command set used by 84833. */ -#define PHY84833_CMD_SET_PAIR_SWAP 0x8001 -#define PHY84833_CMD_GET_EEE_MODE 0x8008 -#define PHY84833_CMD_SET_EEE_MODE 0x8009 -/* Mailbox status set used by 84833. */ +/* Mailbox command set used by 84833/84858 */ +#define PHY848xx_CMD_SET_PAIR_SWAP 0x8001 +#define PHY848xx_CMD_GET_EEE_MODE 0x8008 +#define PHY848xx_CMD_SET_EEE_MODE 0x8009 +/* Mailbox status set used by 84833 only */ #define PHY84833_STATUS_CMD_RECEIVED 0x0001 #define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002 #define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004 @@ -7318,6 +7338,13 @@ Theotherbitsarereservedandshouldbezero*/ #define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080 #define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5 +/* Mailbox status set used by 84858 only */ +#define PHY84858_STATUS_CMD_RECEIVED 0x0001 +#define PHY84858_STATUS_CMD_IN_PROGRESS 0x0002 +#define PHY84858_STATUS_CMD_COMPLETE_PASS 0x0004 +#define PHY84858_STATUS_CMD_COMPLETE_ERROR 0x0008 +#define PHY84858_STATUS_CMD_SYSTEM_BUSY 0xbbbb + /* Warpcore clause 45 addressing */ #define MDIO_WC_DEVAD 0x3 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 4ad415ac8..ff702a707 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -1,15 +1,17 @@ -/* bnx2x_sp.c: Broadcom Everest network driver. +/* bnx2x_sp.c: Qlogic Everest network driver. * - * Copyright (c) 2011-2013 Broadcom Corporation + * Copyright 2011-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and Qlogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other Qlogic software provided under a + * license other than the GPL, without Qlogic's express prior written * consent. * * Maintained by: Ariel Elior @@ -355,6 +357,23 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o) return vp->get(vp, 1); } + +static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + if (!mp->get(mp, 1)) + return false; + + if (!vp->get(vp, 1)) { + mp->put(mp, 1); + return false; + } + + return true; +} + static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset) { struct bnx2x_credit_pool_obj *mp = o->macs_pool; @@ -383,6 +402,22 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o) return vp->put(vp, 1); } +static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + if (!mp->put(mp, 1)) + return false; + + if (!vp->put(vp, 1)) { + mp->get(mp, 1); + return false; + } + + return true; +} + /** * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock * @@ -636,6 +671,26 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp, return 0; } +static int bnx2x_check_vlan_mac_add(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n", + data->vlan_mac.mac, data->vlan_mac.vlan); + + list_for_each_entry(pos, &o->head, link) + if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && + (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, + ETH_ALEN)) && + (data->vlan_mac.is_inner_mac == + pos->u.vlan_mac.is_inner_mac)) + return -EEXIST; + + return 0; +} + /* check_del() callbacks */ static struct bnx2x_vlan_mac_registry_elem * bnx2x_check_mac_del(struct bnx2x *bp, @@ -670,6 +725,27 @@ static struct bnx2x_vlan_mac_registry_elem * return NULL; } +static struct bnx2x_vlan_mac_registry_elem * + bnx2x_check_vlan_mac_del(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n", + data->vlan_mac.mac, data->vlan_mac.vlan); + + list_for_each_entry(pos, &o->head, link) + if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && + (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, + ETH_ALEN)) && + (data->vlan_mac.is_inner_mac == + pos->u.vlan_mac.is_inner_mac)) + return pos; + + return NULL; +} + /* check_move() callback */ static bool bnx2x_check_move(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *src_o, @@ -1036,6 +1112,96 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, rule_cnt); } +static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, + int rule_idx, int cam_offset) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct eth_classify_rules_ramrod_data *data = + (struct eth_classify_rules_ramrod_data *)(raw->rdata); + int rule_cnt = rule_idx + 1; + union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; + enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; + bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; + u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; + u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; + u16 inner_mac; + + /* Reset the ramrod data buffer for the first rule */ + if (rule_idx == 0) + memset(data, 0, sizeof(*data)); + + /* Set a rule header */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, + &rule_entry->pair.header); + + /* Set VLAN and MAC themselves */ + rule_entry->pair.vlan = cpu_to_le16(vlan); + bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, + &rule_entry->pair.mac_mid, + &rule_entry->pair.mac_lsb, mac); + inner_mac = elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac; + rule_entry->pair.inner_mac = cpu_to_le16(inner_mac); + /* MOVE: Add a rule that will add this MAC/VLAN to the target Queue */ + if (cmd == BNX2X_VLAN_MAC_MOVE) { + struct bnx2x_vlan_mac_obj *target_obj; + + rule_entry++; + rule_cnt++; + + /* Setup ramrod data */ + target_obj = elem->cmd_data.vlan_mac.target_obj; + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, target_obj, + true, CLASSIFY_RULE_OPCODE_PAIR, + &rule_entry->pair.header); + + /* Set a VLAN itself */ + rule_entry->pair.vlan = cpu_to_le16(vlan); + bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, + &rule_entry->pair.mac_mid, + &rule_entry->pair.mac_lsb, mac); + rule_entry->pair.inner_mac = cpu_to_le16(inner_mac); + } + + /* Set the ramrod data header */ + bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, + rule_cnt); +} + +/** + * bnx2x_set_one_vlan_mac_e1h - + * + * @bp: device handle + * @o: bnx2x_vlan_mac_obj + * @elem: bnx2x_exeq_elem + * @rule_idx: rule_idx + * @cam_offset: cam_offset + */ +static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, + int rule_idx, int cam_offset) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct mac_configuration_cmd *config = + (struct mac_configuration_cmd *)(raw->rdata); + /* 57710 and 57711 do not support MOVE command, + * so it's either ADD or DEL + */ + bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? + true : false; + + /* Reset the ramrod data buffer */ + memset(config, 0, sizeof(*config)); + + bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING, + cam_offset, add, + elem->cmd_data.vlan_mac.u.vlan_mac.mac, + elem->cmd_data.vlan_mac.u.vlan_mac.vlan, + ETH_VLAN_FILTER_CLASSIFY, config); +} + /** * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element * @@ -1135,6 +1301,25 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan( return NULL; } +static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac( + struct bnx2x_exe_queue_obj *o, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_exeq_elem *pos; + struct bnx2x_vlan_mac_ramrod_data *data = + &elem->cmd_data.vlan_mac.u.vlan_mac; + + /* Check pending for execution commands */ + list_for_each_entry(pos, &o->exe_queue, link) + if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data, + sizeof(*data)) && + (pos->cmd_data.vlan_mac.cmd == + elem->cmd_data.vlan_mac.cmd)) + return pos; + + return NULL; +} + /** * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed * @@ -2042,6 +2227,68 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp, } } +void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_mac_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *macs_pool, + struct bnx2x_credit_pool_obj *vlans_pool) +{ + union bnx2x_qable_obj *qable_obj = + (union bnx2x_qable_obj *)vlan_mac_obj; + + bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata, + rdata_mapping, state, pstate, type, + macs_pool, vlans_pool); + + /* CAM pool handling */ + vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; + vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; + /* CAM offset is relevant for 57710 and 57711 chips only which have a + * single CAM for both MACs and VLAN-MAC pairs. So the offset + * will be taken from MACs' pool object only. + */ + vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; + vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; + + if (CHIP_IS_E1(bp)) { + BNX2X_ERR("Do not support chips others than E2\n"); + BUG(); + } else if (CHIP_IS_E1H(bp)) { + vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h; + vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; + vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; + vlan_mac_obj->check_move = bnx2x_check_move_always_err; + vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; + + /* Exe Queue */ + bnx2x_exe_queue_init(bp, + &vlan_mac_obj->exe_queue, 1, qable_obj, + bnx2x_validate_vlan_mac, + bnx2x_remove_vlan_mac, + bnx2x_optimize_vlan_mac, + bnx2x_execute_vlan_mac, + bnx2x_exeq_get_vlan_mac); + } else { + vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2; + vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; + vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; + vlan_mac_obj->check_move = bnx2x_check_move; + vlan_mac_obj->ramrod_cmd = + RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; + + /* Exe Queue */ + bnx2x_exe_queue_init(bp, + &vlan_mac_obj->exe_queue, + CLASSIFY_RULES_COUNT, + qable_obj, bnx2x_validate_vlan_mac, + bnx2x_remove_vlan_mac, + bnx2x_optimize_vlan_mac, + bnx2x_execute_vlan_mac, + bnx2x_exeq_get_vlan_mac); + } +} /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ static inline void __storm_memset_mac_filters(struct bnx2x *bp, struct tstorm_eth_mac_filter_config *mac_filters, @@ -3854,8 +4101,8 @@ static bool bnx2x_credit_pool_get_entry_always_true( * If credit is negative pool operations will always succeed (unlimited pool). * */ -static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, - int base, int credit) +void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, + int base, int credit) { /* Zero the object first */ memset(p, 0, sizeof(*p)); @@ -3934,9 +4181,9 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, /* CAM credit is equaly divided between all active functions * on the PATH. */ - if ((func_num > 0)) { + if (func_num > 0) { if (!CHIP_REV_IS_SLOW(bp)) - cam_sz = (MAX_MAC_CREDIT_E2 / func_num); + cam_sz = PF_MAC_CREDIT_E2(bp, func_num); else cam_sz = BNX2X_CAM_SIZE_EMUL; @@ -3966,8 +4213,9 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, * on the PATH. */ if (func_num > 0) { - int credit = MAX_VLAN_CREDIT_E2 / func_num; - bnx2x_init_credit_pool(p, func_id * credit, credit); + int credit = PF_VLAN_CREDIT_E2(bp, func_num); + + bnx2x_init_credit_pool(p, -1/*unused for E2*/, credit); } else /* this should never happen! Block VLAN operations. */ bnx2x_init_credit_pool(p, 0, 0); @@ -4060,13 +4308,27 @@ static int bnx2x_setup_rss(struct bnx2x *bp, if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags)) caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; - if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags)) - caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY; + if (test_bit(BNX2X_RSS_IPV4_VXLAN, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY; + + if (test_bit(BNX2X_RSS_IPV6_VXLAN, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY; + + if (test_bit(BNX2X_RSS_TUNN_INNER_HDRS, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY; /* RSS keys */ if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { - memcpy(&data->rss_key[0], &p->rss_key[0], - sizeof(data->rss_key)); + u8 *dst = (u8 *)(data->rss_key) + sizeof(data->rss_key); + const u8 *src = (const u8 *)p->rss_key; + int i; + + /* Apparently, bnx2x reads this array in reverse order + * We need to byte swap rss_key to comply with Toeplitz specs. + */ + for (i = 0; i < sizeof(data->rss_key); i++) + *--dst = *src++; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; } @@ -5669,10 +5931,14 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); rdata->path_id = BP_PATH(bp); rdata->network_cos_mode = start_params->network_cos_mode; - rdata->tunnel_mode = start_params->tunnel_mode; - rdata->gre_tunnel_type = start_params->gre_tunnel_type; - rdata->inner_gre_rss_en = start_params->inner_gre_rss_en; - rdata->vxlan_dst_port = cpu_to_le16(4789); + + rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port); + rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port); + rdata->inner_clss_l2gre = start_params->inner_clss_l2gre; + rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve; + rdata->inner_clss_vxlan = start_params->inner_clss_vxlan; + rdata->inner_rss = start_params->inner_rss; + rdata->sd_accept_mf_clss_fail = start_params->class_fail; if (start_params->class_fail_ethtype) { rdata->sd_accept_mf_clss_fail_match_ethtype = 1; @@ -5690,6 +5956,14 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, cpu_to_le16(0x8100); rdata->no_added_tags = start_params->no_added_tags; + + rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid; + if (rdata->c2s_pri_tt_valid) { + memcpy(rdata->c2s_pri_trans_table.val, + start_params->c2s_pri, + MAX_VLAN_PRIORITIES); + rdata->c2s_pri_default = start_params->c2s_pri_default; + } /* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory @@ -5750,15 +6024,22 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, &switch_update_params->changes)) { rdata->update_tunn_cfg_flg = 1; - if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN, + if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE, + &switch_update_params->changes)) + rdata->inner_clss_l2gre = 1; + if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN, + &switch_update_params->changes)) + rdata->inner_clss_vxlan = 1; + if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE, &switch_update_params->changes)) - rdata->tunn_clss_en = 1; - if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN, + rdata->inner_clss_l2geneve = 1; + if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS, &switch_update_params->changes)) - rdata->inner_gre_rss_en = 1; - rdata->tunnel_mode = switch_update_params->tunnel_mode; - rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type; - rdata->vxlan_dst_port = cpu_to_le16(4789); + rdata->inner_rss = 1; + rdata->vxlan_dst_port = + cpu_to_le16(switch_update_params->vxlan_dst_port); + rdata->geneve_dst_port = + cpu_to_le16(switch_update_params->geneve_dst_port); } rdata->echo = SWITCH_UPDATE; @@ -5885,6 +6166,8 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, rdata->traffic_type_to_priority_cos[i] = tx_start_params->traffic_type_to_priority_cos[i]; + for (i = 0; i < MAX_TRAFFIC_TYPES; i++) + rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i]; /* No need for an explicit memory barrier here as long as we * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 86baecb7c..4048fc594 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -1,15 +1,17 @@ -/* bnx2x_sp.h: Broadcom Everest network driver. +/* bnx2x_sp.h: Qlogic Everest network driver. * - * Copyright (c) 2011-2013 Broadcom Corporation + * Copyright 2011-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and Qlogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other Qlogic software provided under a + * license other than the GPL, without Qlogic's express prior written * consent. * * Maintained by: Ariel Elior @@ -711,7 +713,10 @@ enum { BNX2X_RSS_IPV6, BNX2X_RSS_IPV6_TCP, BNX2X_RSS_IPV6_UDP, - BNX2X_RSS_GRE_INNER_HDRS, + + BNX2X_RSS_IPV4_VXLAN, + BNX2X_RSS_IPV6_VXLAN, + BNX2X_RSS_TUNN_INNER_HDRS, }; struct bnx2x_config_rss_params { @@ -1105,8 +1110,10 @@ enum { BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG, BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG, BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, - BNX2X_F_UPDATE_TUNNEL_CLSS_EN, - BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN, + BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE, + BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN, + BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE, + BNX2X_F_UPDATE_TUNNEL_INNER_RSS, }; /* Allowed Function states */ @@ -1171,19 +1178,23 @@ struct bnx2x_func_start_params { /* Function cos mode */ u8 network_cos_mode; - /* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */ - u8 tunnel_mode; + /* UDP dest port for VXLAN */ + u16 vxlan_dst_port; - /* tunneling classification enablement */ - u8 tunn_clss_en; + /* UDP dest port for Geneve */ + u16 geneve_dst_port; - /* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ - u8 gre_tunnel_type; + /* Enable inner Rx classifications for L2GRE packets */ + u8 inner_clss_l2gre; - /* Enables Inner GRE RSS on the function, depends on the client RSS - * capailities - */ - u8 inner_gre_rss_en; + /* Enable inner Rx classifications for L2-Geneve packets */ + u8 inner_clss_l2geneve; + + /* Enable inner Rx classification for vxlan packets */ + u8 inner_clss_vxlan; + + /* Enable RSS according to inner header */ + u8 inner_rss; /* Allows accepting of packets failing MF classification, possibly * only matching a given ethertype @@ -1200,6 +1211,11 @@ struct bnx2x_func_start_params { /* Prevent inner vlans from being added by FW */ u8 no_added_tags; + + /* Inner-to-Outer vlan priority mapping */ + u8 c2s_pri[MAX_VLAN_PRIORITIES]; + u8 c2s_pri_default; + u8 c2s_pri_valid; }; struct bnx2x_func_switch_update_params { @@ -1207,8 +1223,8 @@ struct bnx2x_func_switch_update_params { u16 vlan; u16 vlan_eth_type; u8 vlan_force_prio; - u8 tunnel_mode; - u8 gre_tunnel_type; + u16 vxlan_dst_port; + u16 geneve_dst_port; }; struct bnx2x_func_afex_update_params { @@ -1229,6 +1245,7 @@ struct bnx2x_func_tx_start_params { u8 dcb_enabled; u8 dcb_version; u8 dont_add_pri_0_en; + u8 dcb_outer_pri[MAX_TRAFFIC_TYPES]; }; struct bnx2x_func_set_timesync_params { @@ -1396,6 +1413,14 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp, unsigned long *pstate, bnx2x_obj_type type, struct bnx2x_credit_pool_obj *vlans_pool); +void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_mac_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *macs_pool, + struct bnx2x_credit_pool_obj *vlans_pool); + int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o); void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, @@ -1466,6 +1491,8 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, struct bnx2x_credit_pool_obj *p, u8 func_id, u8 func_num); +void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, + int base, int credit); /****************** RSS CONFIGURATION ****************/ void bnx2x_init_rss_config_obj(struct bnx2x *bp, @@ -1493,4 +1520,12 @@ int bnx2x_config_rss(struct bnx2x *bp, void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, u8 *ind_table); +#define PF_MAC_CREDIT_E2(bp, func_num) \ + ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \ + func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT) + +#define PF_VLAN_CREDIT_E2(bp, func_num) \ + ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \ + func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT) + #endif /* BNX2X_SP_VERBS */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index f67348d16..9d027348c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1,15 +1,17 @@ -/* bnx2x_sriov.c: Broadcom Everest network driver. +/* bnx2x_sriov.c: QLogic Everest network driver. * * Copyright 2009-2013 Broadcom Corporation + * Copyright 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. * * Maintained by: Ariel Elior @@ -195,14 +197,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp, setup_p->gen_params.stat_id = vfq_stat_id(vf, q); setup_p->gen_params.fp_hsi = vf->fp_hsi; - /* Setup-op pause params: - * Nothing to do, the pause thresholds are set by default to 0 which - * effectively turns off the feature for this queue. We don't want - * one queue (VF) to interfering with another queue (another VF) - */ - if (vf->cfg_flags & VF_CFG_FW_FC) - BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", - vf->abs_vfid); /* Setup-op flags: * collect statistics, zero statistics, local-switching, security, * OV for Flex10, RSS and MCAST for leading @@ -358,22 +352,24 @@ static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, } static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, - int qid, bool drv_only, bool mac) + int qid, bool drv_only, int type) { struct bnx2x_vlan_mac_ramrod_params ramrod; int rc; DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid, - mac ? "MACs" : "VLANs"); + (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" : + (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs"); /* Prepare ramrod params */ memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); - if (mac) { + if (type == BNX2X_VF_FILTER_VLAN_MAC) { + set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj); + } else if (type == BNX2X_VF_FILTER_MAC) { set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); } else { - set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, - &ramrod.user_req.vlan_mac_flags); ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); } ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL; @@ -391,14 +387,11 @@ static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, &ramrod.ramrod_flags); if (rc) { BNX2X_ERR("Failed to delete all %s\n", - mac ? "MACs" : "VLANs"); + (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" : + (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs"); return rc; } - /* Clear the vlan counters */ - if (!mac) - atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0); - return 0; } @@ -412,13 +405,17 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n", vf->abs_vfid, filter->add ? "Adding" : "Deleting", - filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN"); + (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" : + (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN"); /* Prepare ramrod params */ memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); - if (filter->type == BNX2X_VF_FILTER_VLAN) { - set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, - &ramrod.user_req.vlan_mac_flags); + if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) { + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj); + ramrod.user_req.u.vlan.vlan = filter->vid; + memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN); + set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); + } else if (filter->type == BNX2X_VF_FILTER_VLAN) { ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); ramrod.user_req.u.vlan.vlan = filter->vid; } else { @@ -429,16 +426,6 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; - /* Verify there are available vlan credits */ - if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && - (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= - vf_vlan_rules_cnt(vf))) { - BNX2X_ERR("No credits for vlan [%d >= %d]\n", - atomic_read(&bnx2x_vfq(vf, qid, vlan_count)), - vf_vlan_rules_cnt(vf)); - return -ENOMEM; - } - set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); if (drv_only) set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); @@ -450,16 +437,13 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, if (rc && rc != -EEXIST) { BNX2X_ERR("Failed to %s %s\n", filter->add ? "add" : "delete", - filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : - "VLAN"); + (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? + "VLAN-MAC" : + (filter->type == BNX2X_VF_FILTER_MAC) ? + "MAC" : "VLAN"); return rc; } - /* Update the vlan counters */ - if (filter->type == BNX2X_VF_FILTER_VLAN) - bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj, - &bnx2x_vfq(vf, qid, vlan_count)); - return 0; } @@ -511,21 +495,7 @@ int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, if (rc) goto op_err; - /* Configure vlan0 for leading queue */ - if (!qid) { - struct bnx2x_vf_mac_vlan_filter filter; - - memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter)); - filter.type = BNX2X_VF_FILTER_VLAN; - filter.add = true; - filter.vid = 0; - rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false); - if (rc) - goto op_err; - } - /* Schedule the configuration of any pending vlan filters */ - vf->cfg_flags |= VF_CFG_VLAN; bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_MSG_IOV); return 0; @@ -544,10 +514,16 @@ static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, /* If needed, clean the filtering data base */ if ((qid == LEADING_IDX) && bnx2x_validate_vf_sp_objs(bp, vf, false)) { - rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, + BNX2X_VF_FILTER_VLAN_MAC); + if (rc) + goto op_err; + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, + BNX2X_VF_FILTER_VLAN); if (rc) goto op_err; - rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, + BNX2X_VF_FILTER_MAC); if (rc) goto op_err; } @@ -680,11 +656,18 @@ int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) /* Remove filtering if feasible */ if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, - false, false); + false, + BNX2X_VF_FILTER_VLAN_MAC); + if (rc) + goto op_err; + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, + false, + BNX2X_VF_FILTER_VLAN); if (rc) goto op_err; rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, - false, true); + false, + BNX2X_VF_FILTER_MAC); if (rc) goto op_err; rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); @@ -765,8 +748,6 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); - if (vf->cfg_flags & VF_CFG_INT_SIMD) - val |= IGU_VF_CONF_SINGLE_ISR_EN; val &= ~IGU_VF_CONF_PARENT_MASK; val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); @@ -845,29 +826,6 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) return 0; } -static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp, - struct bnx2x_virtf *vf, - int new) -{ - int num = vf_vlan_rules_cnt(vf); - int diff = new - num; - bool rc = true; - - DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n", - vf->abs_vfid, new, num); - - if (diff > 0) - rc = bp->vlans_pool.get(&bp->vlans_pool, diff); - else if (diff < 0) - rc = bp->vlans_pool.put(&bp->vlans_pool, -diff); - - if (rc) - vf_vlan_rules_cnt(vf) = new; - else - DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n", - vf->abs_vfid); -} - /* must be called after the number of PF queues and the number of VFs are * both known */ @@ -875,21 +833,13 @@ static void bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) { struct vf_pf_resc_request *resc = &vf->alloc_resc; - u16 vlan_count = 0; /* will be set only during VF-ACQUIRE */ resc->num_rxqs = 0; resc->num_txqs = 0; - /* no credit calculations for macs (just yet) */ - resc->num_mac_filters = 1; - - /* divvy up vlan rules */ - bnx2x_iov_re_set_vlan_filters(bp, vf, 0); - vlan_count = bp->vlans_pool.check(&bp->vlans_pool); - vlan_count = 1 << ilog2(vlan_count); - bnx2x_iov_re_set_vlan_filters(bp, vf, - vlan_count / BNX2X_NR_VIRTFN(bp)); + resc->num_mac_filters = VF_MAC_CREDIT_CNT; + resc->num_vlan_filters = VF_VLAN_CREDIT_CNT; /* no real limitation */ resc->num_mc_filters = 0; @@ -1338,6 +1288,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, mutex_init(&bp->vfdb->bulletin_mutex); + if (SHMEM2_HAS(bp, sriov_switch_mode)) + SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB); + return 0; failed: DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); @@ -1620,6 +1573,11 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) vf->filter_state = 0; vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); + bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0, + vf_vlan_rules_cnt(vf)); + bnx2x_init_credit_pool(&vf->vf_macs_pool, 0, + vf_mac_rules_cnt(vf)); + /* init mcast object - This object will be re-initialized * during VF-ACQUIRE with the proper cl_id and cid. * It needs to be initialized here so that it can be safely @@ -2032,12 +1990,11 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); - /* Save a vlan filter for the Hypervisor */ return ((req_resc->num_rxqs <= rxq_cnt) && (req_resc->num_txqs <= txq_cnt) && (req_resc->num_sbs <= vf_sb_count(vf)) && (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && - (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf))); + (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); } /* CORE VF API */ @@ -2091,16 +2048,12 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, vf_sb_count(vf) = resc->num_sbs; vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); - if (resc->num_mac_filters) - vf_mac_rules_cnt(vf) = resc->num_mac_filters; - /* Add an additional vlan filter credit for the hypervisor */ - bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1); DP(BNX2X_MSG_IOV, "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", vf_sb_count(vf), vf_rxq_count(vf), vf_txq_count(vf), vf_mac_rules_cnt(vf), - vf_vlan_rules_visible_cnt(vf)); + vf_vlan_rules_cnt(vf)); /* Initialize the queues */ if (!vf->vfqs) { @@ -2133,7 +2086,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) { struct bnx2x_func_init_params func_init = {0}; - u16 flags = 0; int i; /* the sb resources are initialized at this point, do the @@ -2160,23 +2112,9 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) /* reset IGU VF statistics: MSIX */ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); - /* vf init */ - if (vf->cfg_flags & VF_CFG_STATS) - flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); - - if (vf->cfg_flags & VF_CFG_TPA) - flags |= FUNC_FLG_TPA; - - if (is_vf_multi(vf)) - flags |= FUNC_FLG_RSS; - /* function setup */ - func_init.func_flgs = flags; func_init.pf_id = BP_FUNC(bp); func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); - func_init.fw_stat_map = vf->fw_stat_map; - func_init.spq_map = vf->spq_map; - func_init.spq_prod = 0; bnx2x_func_init(bp, &func_init); /* Enable the vf */ @@ -2589,8 +2527,8 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); for_each_vf(bp, vfidx) { - bulletin = BP_VF_BULLETIN(bp, vfidx); - if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) + bulletin = BP_VF_BULLETIN(bp, vfidx); + if (bulletin->valid_bitmap & (1 << VLAN_VALID)) bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); } } @@ -2808,20 +2746,58 @@ out: return rc; } -int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) +static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp, + struct bnx2x_virtf *vf, bool accept) +{ + struct bnx2x_rx_mode_ramrod_params rx_ramrod; + unsigned long accept_flags; + + /* need to remove/add the VF's accept_any_vlan bit */ + accept_flags = bnx2x_leading_vfq(vf, accept_flags); + if (accept) + set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + else + clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + + bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, + accept_flags); + bnx2x_leading_vfq(vf, accept_flags) = accept_flags; + bnx2x_config_rx_mode(bp, &rx_ramrod); +} + +static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf, + u16 vlan, bool add) { - struct bnx2x_queue_state_params q_params = {NULL}; struct bnx2x_vlan_mac_ramrod_params ramrod_param; - struct bnx2x_queue_update_params *update_params; + unsigned long ramrod_flags = 0; + int rc = 0; + + /* configure the new vlan to device */ + memset(&ramrod_param, 0, sizeof(ramrod_param)); + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj); + ramrod_param.ramrod_flags = ramrod_flags; + ramrod_param.user_req.u.vlan.vlan = vlan; + ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD + : BNX2X_VLAN_MAC_DEL; + rc = bnx2x_config_vlan_mac(bp, &ramrod_param); + if (rc) { + BNX2X_ERR("failed to configure vlan\n"); + return -EINVAL; + } + + return 0; +} + +int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) +{ struct pf_vf_bulletin_content *bulletin = NULL; - struct bnx2x_rx_mode_ramrod_params rx_ramrod; struct bnx2x *bp = netdev_priv(dev); struct bnx2x_vlan_mac_obj *vlan_obj; unsigned long vlan_mac_flags = 0; unsigned long ramrod_flags = 0; struct bnx2x_virtf *vf = NULL; - unsigned long accept_flags; - int rc; + int i, rc; if (vlan > 4095) { BNX2X_ERR("illegal vlan value %d\n", vlan); @@ -2850,6 +2826,10 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) bulletin->valid_bitmap &= ~(1 << VLAN_VALID); bulletin->vlan = vlan; + /* Post update on VF's bulletin board */ + rc = bnx2x_post_vf_bulletin(bp, vfidx); + if (rc) + BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); mutex_unlock(&bp->vfdb->bulletin_mutex); /* is vf initialized and queue set up? */ @@ -2876,84 +2856,76 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) goto out; } - /* need to remove/add the VF's accept_any_vlan bit */ - accept_flags = bnx2x_leading_vfq(vf, accept_flags); - if (vlan) - clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); - else - set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); - - bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, - accept_flags); - bnx2x_leading_vfq(vf, accept_flags) = accept_flags; - bnx2x_config_rx_mode(bp, &rx_ramrod); + /* clear accept_any_vlan when HV forces vlan, otherwise + * according to VF capabilities + */ + if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER)) + bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan); - /* configure the new vlan to device */ - memset(&ramrod_param, 0, sizeof(ramrod_param)); - __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); - ramrod_param.vlan_mac_obj = vlan_obj; - ramrod_param.ramrod_flags = ramrod_flags; - set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, - &ramrod_param.user_req.vlan_mac_flags); - ramrod_param.user_req.u.vlan.vlan = vlan; - ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; - rc = bnx2x_config_vlan_mac(bp, &ramrod_param); - if (rc) { - BNX2X_ERR("failed to configure vlan\n"); - rc = -EINVAL; + rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true); + if (rc) goto out; - } - /* send queue update ramrod to configure default vlan and silent - * vlan removal + /* send queue update ramrods to configure default vlan and + * silent vlan removal */ - __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); - q_params.cmd = BNX2X_Q_CMD_UPDATE; - q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); - update_params = &q_params.params.update; - __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, - &update_params->update_flags); - __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, - &update_params->update_flags); - if (vlan == 0) { - /* if vlan is 0 then we want to leave the VF traffic - * untagged, and leave the incoming traffic untouched - * (i.e. do not remove any vlan tags). - */ - __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, - &update_params->update_flags); - __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, - &update_params->update_flags); - } else { - /* configure default vlan to vf queue and set silent - * vlan removal (the vf remains unaware of this vlan). - */ - __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, + for_each_vfq(vf, i) { + struct bnx2x_queue_state_params q_params = {NULL}; + struct bnx2x_queue_update_params *update_params; + + q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj); + + /* validate the Q is UP */ + if (bnx2x_get_q_logical_state(bp, q_params.q_obj) != + BNX2X_Q_LOGICAL_STATE_ACTIVE) + continue; + + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + q_params.cmd = BNX2X_Q_CMD_UPDATE; + update_params = &q_params.params.update; + __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, &update_params->update_flags); - __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, &update_params->update_flags); - update_params->def_vlan = vlan; - update_params->silent_removal_value = - vlan & VLAN_VID_MASK; - update_params->silent_removal_mask = VLAN_VID_MASK; - } + if (vlan == 0) { + /* if vlan is 0 then we want to leave the VF traffic + * untagged, and leave the incoming traffic untouched + * (i.e. do not remove any vlan tags). + */ + __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, + &update_params->update_flags); + __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + &update_params->update_flags); + } else { + /* configure default vlan to vf queue and set silent + * vlan removal (the vf remains unaware of this vlan). + */ + __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, + &update_params->update_flags); + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + &update_params->update_flags); + update_params->def_vlan = vlan; + update_params->silent_removal_value = + vlan & VLAN_VID_MASK; + update_params->silent_removal_mask = VLAN_VID_MASK; + } - /* Update the Queue state */ - rc = bnx2x_queue_state_change(bp, &q_params); - if (rc) { - BNX2X_ERR("Failed to configure default VLAN\n"); - goto out; + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to configure default VLAN queue %d\n", + i); + goto out; + } } - - - /* clear the flag indicating that this VF needs its vlan - * (will only be set if the HV configured the Vlan before vf was - * up and we were called because the VF came up later - */ out: - vf->cfg_flags &= ~VF_CFG_VLAN; bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); + if (rc) + DP(BNX2X_MSG_IOV, + "updated VF[%d] vlan configuration (vlan = %d)\n", + vfidx, vlan); + return rc; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 66ee62a04..670a581ff 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -1,15 +1,17 @@ -/* bnx2x_sriov.h: Broadcom Everest network driver. +/* bnx2x_sriov.h: QLogic Everest network driver. * * Copyright 2009-2013 Broadcom Corporation + * Copyright 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. * * Maintained by: Ariel Elior @@ -75,7 +77,10 @@ struct bnx2x_vf_queue { /* VLANs object */ struct bnx2x_vlan_mac_obj vlan_obj; - atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ + + /* VLAN-MACs object */ + struct bnx2x_vlan_mac_obj vlan_mac_obj; + unsigned long accept_flags; /* last accept flags configured */ /* Queue Slow-path State object */ @@ -103,8 +108,10 @@ struct bnx2x_virtf; struct bnx2x_vf_mac_vlan_filter { int type; -#define BNX2X_VF_FILTER_MAC 1 -#define BNX2X_VF_FILTER_VLAN 2 +#define BNX2X_VF_FILTER_MAC BIT(0) +#define BNX2X_VF_FILTER_VLAN BIT(1) +#define BNX2X_VF_FILTER_VLAN_MAC \ + (BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/ bool add; u8 *mac; @@ -119,14 +126,9 @@ struct bnx2x_vf_mac_vlan_filters { /* vf context */ struct bnx2x_virtf { u16 cfg_flags; -#define VF_CFG_STATS 0x0001 -#define VF_CFG_FW_FC 0x0002 -#define VF_CFG_TPA 0x0004 -#define VF_CFG_INT_SIMD 0x0008 -#define VF_CACHE_LINE 0x0010 -#define VF_CFG_VLAN 0x0020 -#define VF_CFG_STATS_COALESCE 0x0040 -#define VF_CFG_EXT_BULLETIN 0x0080 +#define VF_CFG_STATS_COALESCE 0x1 +#define VF_CFG_EXT_BULLETIN 0x2 +#define VF_CFG_VLAN_FILTER 0x4 u8 link_cfg; /* IFLA_VF_LINK_STATE_AUTO * IFLA_VF_LINK_STATE_ENABLE * IFLA_VF_LINK_STATE_DISABLE @@ -140,9 +142,8 @@ struct bnx2x_virtf { bool flr_clnup_stage; /* true during flr cleanup */ /* dma */ - dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ + dma_addr_t fw_stat_map; u16 stats_stride; - dma_addr_t spq_map; dma_addr_t bulletin_map; /* Allocated resources counters. Before the VF is acquired, the @@ -163,8 +164,6 @@ struct bnx2x_virtf { #define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) #define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) #define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) - /* Hide a single vlan filter credit for the hypervisor */ -#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1) u8 sb_count; /* actual number of SBs */ u8 igu_base_id; /* base igu status block id */ @@ -207,6 +206,9 @@ struct bnx2x_virtf { enum channel_tlvs op_current; u8 fp_hsi; + + struct bnx2x_credit_pool_obj vf_vlans_pool; + struct bnx2x_credit_pool_obj vf_macs_pool; }; #define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn) @@ -230,6 +232,12 @@ struct bnx2x_virtf { #define FW_VF_HANDLE(abs_vfid) \ (abs_vfid + FW_PF_MAX_HANDLE) +#define GET_NUM_VFS_PER_PATH(bp) 64 /* use max possible value */ +#define GET_NUM_VFS_PER_PF(bp) ((bp)->vfdb ? (bp)->vfdb->sriov.total \ + : 0) +#define VF_MAC_CREDIT_CNT 1 +#define VF_VLAN_CREDIT_CNT 2 /* VLAN0 + 'real' VLAN */ + /* locking and unlocking the channel mutex */ void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, enum channel_tlvs tlv); @@ -273,6 +281,10 @@ struct bnx2x_vf_sp { struct eth_classify_rules_ramrod_data e2; } vlan_rdata; + union { + struct eth_classify_rules_ramrod_data e2; + } vlan_mac_rdata; + union { struct eth_filter_rules_ramrod_data e2; } rx_mode_rdata; @@ -536,8 +548,14 @@ int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx); int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state); +int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add); #else /* CONFIG_BNX2X_SRIOV */ +#define GET_NUM_VFS_PER_PATH(bp) 0 +#define GET_NUM_VFS_PER_PF(bp) 0 +#define VF_MAC_CREDIT_CNT 0 +#define VF_VLAN_CREDIT_CNT 0 + static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, struct bnx2x_queue_sp_obj **q_obj) {} static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} @@ -604,5 +622,7 @@ struct pf_vf_bulletin_content; static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin, bool support_long) {} +static inline int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) {return 0; } + #endif /* CONFIG_BNX2X_SRIOV */ #endif /* bnx2x_sriov.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 69d699f07..7e0919aa4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -1,6 +1,8 @@ -/* bnx2x_stats.c: Broadcom Everest network driver. +/* bnx2x_stats.c: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 965539a9d..b2644ed13 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -1,6 +1,8 @@ -/* bnx2x_stats.h: Broadcom Everest network driver. +/* bnx2x_stats.h: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 06b8c0d8f..1374e5394 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -1,15 +1,17 @@ -/* bnx2x_vfpf.c: Broadcom Everest network driver. +/* bnx2x_vfpf.c: QLogic Everest network driver. * * Copyright 2009-2013 Broadcom Corporation + * Copyright 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. * * Maintained by: Ariel Elior @@ -245,6 +247,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) req->resc_request.num_sbs = bp->igu_sb_cnt; req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS; req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS; + req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS; /* pf 2 vf bulletin board address */ req->bulletin_addr = bp->pf2vf_bulletin_mapping; @@ -255,6 +258,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) /* Bulletin support for bulletin board with length > legacy length */ req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN; + /* vlan filtering is supported */ + req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER; /* add list termination tlv */ bnx2x_add_tlv(bp, req, @@ -373,6 +378,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG; bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs; bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id; + bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters; + strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver, sizeof(bp->fw_ver)); @@ -546,7 +553,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, BNX2X_FILTER_MAC_PENDING, &vf->filter_state, BNX2X_OBJ_TYPE_RX_TX, - &bp->macs_pool); + &vf->vf_macs_pool); /* vlan */ bnx2x_init_vlan_obj(bp, &q->vlan_obj, cl_id, q->cid, func_id, @@ -555,8 +562,17 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, BNX2X_FILTER_VLAN_PENDING, &vf->filter_state, BNX2X_OBJ_TYPE_RX_TX, - &bp->vlans_pool); - + &vf->vf_vlans_pool); + /* vlan-mac */ + bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj, + cl_id, q->cid, func_id, + bnx2x_vf_sp(bp, vf, vlan_mac_rdata), + bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata), + BNX2X_FILTER_VLAN_MAC_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX, + &vf->vf_macs_pool, + &vf->vf_vlans_pool); /* mcast */ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, q->cid, func_id, func_id, @@ -723,7 +739,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID; if (set) - req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC; + req->filters[0].flags |= VFPF_Q_FILTER_SET; /* sample bulletin board for new mac */ bnx2x_sample_bulletin(bp); @@ -911,6 +927,67 @@ out: return 0; } +/* request pf to add a vlan for the vf */ +int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) +{ + struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int rc = 0; + + if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) { + DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n"); + return 0; + } + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, + sizeof(*req)); + + req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED; + req->vf_qid = vf_qid; + req->n_mac_vlan_filters = 1; + + req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID; + + if (add) + req->filters[0].flags |= VFPF_Q_FILTER_SET; + + /* sample bulletin board for hypervisor vlan */ + bnx2x_sample_bulletin(bp); + + if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) { + BNX2X_ERR("Hypervisor will dicline the request, avoiding\n"); + rc = -EINVAL; + goto out; + } + + req->filters[0].vlan_tag = vid; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + /* send message to pf */ + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) { + BNX2X_ERR("failed to send message to pf. rc was %d\n", rc); + goto out; + } + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del", + vid); + rc = -EINVAL; + } +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return rc; +} + int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) { int mode = bp->rx_mode; @@ -934,8 +1011,13 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST; req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; + if (mode == BNX2X_RX_MODE_PROMISC) + req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN; } + if (bp->accept_any_vlan) + req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN; + req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED; req->vf_qid = 0; @@ -1188,7 +1270,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | PFVF_CAP_TPA | - PFVF_CAP_TPA_UPDATE); + PFVF_CAP_TPA_UPDATE | + PFVF_CAP_VLAN_FILTER); bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, sizeof(resp->pfdev_info.fw_ver)); @@ -1203,7 +1286,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_max_queue_cnt(bp, vf); resc->num_sbs = vf_sb_count(vf); resc->num_mac_filters = vf_mac_rules_cnt(vf); - resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf); + resc->num_vlan_filters = vf_vlan_rules_cnt(vf); resc->num_mc_filters = 0; if (status == PFVF_STATUS_SUCCESS) { @@ -1370,6 +1453,14 @@ static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN; } + if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) { + DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n", + vf->abs_vfid); + vf->cfg_flags |= VF_CFG_VLAN_FILTER; + } else { + vf->cfg_flags &= ~VF_CFG_VLAN_FILTER; + } + out: /* response */ bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc); @@ -1382,7 +1473,6 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, int rc; /* record ghost addresses from vf message */ - vf->spq_map = init->spq_addr; vf->fw_stat_map = init->stats_addr; vf->stats_stride = init->stats_stride; rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); @@ -1578,17 +1668,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, if ((msg_filter->flags & type_flag) != type_flag) continue; - if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) { + memset(&fl->filters[j], 0, sizeof(fl->filters[j])); + if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) { fl->filters[j].mac = msg_filter->mac; - fl->filters[j].type = BNX2X_VF_FILTER_MAC; - } else { + fl->filters[j].type |= BNX2X_VF_FILTER_MAC; + } + if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) { fl->filters[j].vid = msg_filter->vlan_tag; - fl->filters[j].type = BNX2X_VF_FILTER_VLAN; + fl->filters[j].type |= BNX2X_VF_FILTER_VLAN; } - fl->filters[j].add = - (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ? - true : false; + fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET); fl->count++; + j++; } if (!fl->count) kfree(fl); @@ -1598,6 +1689,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, return 0; } +static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters, + u32 flags) +{ + int i, cnt = 0; + + for (i = 0; i < filters->n_mac_vlan_filters; i++) + if ((filters->filters[i].flags & flags) == flags) + cnt++; + + return cnt; +} + static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx, struct vfpf_q_mac_vlan_filter *filter) { @@ -1629,6 +1732,7 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl, #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID +#define VFPF_VLAN_MAC_FILTER (VFPF_VLAN_FILTER | VFPF_MAC_FILTER) static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) { @@ -1639,17 +1743,17 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) /* check for any mac/vlan changes */ if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { - /* build mac list */ struct bnx2x_vf_mac_vlan_filters *fl = NULL; + /* build vlan-mac list */ rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, - VFPF_MAC_FILTER); + VFPF_VLAN_MAC_FILTER); if (rc) goto op_err; if (fl) { - /* set mac list */ + /* set vlan-mac list */ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, msg->vf_qid, false); @@ -1657,22 +1761,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) goto op_err; } - /* build vlan list */ + /* build mac list */ fl = NULL; rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, - VFPF_VLAN_FILTER); + VFPF_MAC_FILTER); if (rc) goto op_err; if (fl) { - /* set vlan list */ + /* set mac list */ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, msg->vf_qid, false); if (rc) goto op_err; } + } if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { @@ -1687,11 +1792,15 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); } - /* A packet arriving the vf's mac should be accepted - * with any vlan, unless a vlan has already been - * configured. + /* any_vlan is not configured if HV is forcing VLAN + * any_vlan is configured if + * 1. VF does not support vlan filtering + * OR + * 2. VF supports vlan filtering and explicitly requested it */ - if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) + if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) && + (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) || + msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN)) __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); /* set rx-mode */ @@ -1727,17 +1836,31 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp, * since queue was not set up. */ if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) { - /* once a mac was set by ndo can only accept a single mac... */ - if (filters->n_mac_vlan_filters > 1) { - BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n", - vf->abs_vfid); - rc = -EPERM; - goto response; + struct vfpf_q_mac_vlan_filter *filter = NULL; + int i; + + for (i = 0; i < filters->n_mac_vlan_filters; i++) { + if (!(filters->filters[i].flags & + VFPF_Q_FILTER_DEST_MAC_VALID)) + continue; + + /* once a mac was set by ndo can only accept + * a single mac... + */ + if (filter) { + BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n", + vf->abs_vfid, + filters->n_mac_vlan_filters); + rc = -EPERM; + goto response; + } + + filter = &filters->filters[i]; } /* ...and only the mac set by the ndo */ - if (filters->n_mac_vlan_filters == 1 && - !ether_addr_equal(filters->filters->mac, bulletin->mac)) { + if (filter && + !ether_addr_equal(filter->mac, bulletin->mac)) { BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n", vf->abs_vfid); @@ -1759,17 +1882,14 @@ static int bnx2x_filters_validate_vlan(struct bnx2x *bp, /* if vlan was set by hypervisor we don't allow guest to config vlan */ if (bulletin->valid_bitmap & 1 << VLAN_VALID) { - int i; - /* search for vlan filters */ - for (i = 0; i < filters->n_mac_vlan_filters; i++) { - if (filters->filters[i].flags & - VFPF_Q_FILTER_VLAN_TAG_VALID) { - BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", - vf->abs_vfid); - rc = -EPERM; - goto response; - } + + if (bnx2x_vf_filters_contain(filters, + VFPF_Q_FILTER_VLAN_TAG_VALID)) { + BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", + vf->abs_vfid); + rc = -EPERM; + goto response; } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index b86479fc0..64f2b52c5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -1,16 +1,22 @@ -/* bnx2x_vfpf.h: Broadcom Everest network driver. +/* bnx2x_vfpf.h: Qlogic Everest network driver. * * Copyright (c) 2011-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and Qlogic execute a separate written software license * agreement governing use of this software, this software is licensed to you - * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * under the terms of the GNU General Public License version 2 (the “GPL”), + * available at http://www.gnu.org/licenses/gpl-2.0.html, with the following + * added to such license: * - * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written - * consent. + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions + * of the license of that module. An independent module is a module which is + * not derived from this software. The special exception does not apply to any + * modifications of the software. * * Maintained by: Ariel Elior * Written by: Ariel Elior @@ -64,6 +70,8 @@ struct hw_sb_info { #define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004 #define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008 #define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010 +#define VFPF_RX_MASK_ACCEPT_ANY_VLAN 0x00000020 + #define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content)) #define BULLETIN_CONTENT_LEGACY_SIZE (32) #define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */ @@ -127,6 +135,7 @@ struct vfpf_acquire_tlv { u8 fp_hsi_ver; u8 caps; #define VF_CAP_SUPPORT_EXT_BULLETIN (1 << 0) +#define VF_CAP_SUPPORT_VLAN_FILTER (1 << 1) } vfdev_info; struct vf_pf_resc_request resc_request; @@ -168,10 +177,12 @@ struct pfvf_acquire_resp_tlv { struct pf_vf_pfdev_info { u32 chip_num; u32 pf_cap; -#define PFVF_CAP_RSS 0x00000001 -#define PFVF_CAP_DHC 0x00000002 -#define PFVF_CAP_TPA 0x00000004 -#define PFVF_CAP_TPA_UPDATE 0x00000008 +#define PFVF_CAP_RSS 0x00000001 +#define PFVF_CAP_DHC 0x00000002 +#define PFVF_CAP_TPA 0x00000004 +#define PFVF_CAP_TPA_UPDATE 0x00000008 +#define PFVF_CAP_VLAN_FILTER 0x00000010 + char fw_ver[32]; u16 db_size; u8 indices_per_sb; @@ -288,7 +299,7 @@ struct vfpf_q_mac_vlan_filter { u32 flags; #define VFPF_Q_FILTER_DEST_MAC_VALID 0x01 #define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02 -#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */ +#define VFPF_Q_FILTER_SET 0x100 /* set/clear */ u8 mac[ETH_ALEN]; u16 vlan_tag; }; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 17c145fdf..b69dc58fa 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -192,6 +192,7 @@ static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) struct drv_ctl_info info; struct drv_ctl_io *io = &info.data.io; + memset(&info, 0, sizeof(struct drv_ctl_info)); info.cmd = DRV_CTL_CTX_WR_CMD; io->cid_addr = cid_addr; io->offset = off; @@ -206,6 +207,7 @@ static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr) struct drv_ctl_info info; struct drv_ctl_io *io = &info.data.io; + memset(&info, 0, sizeof(struct drv_ctl_info)); info.cmd = DRV_CTL_CTXTBL_WR_CMD; io->offset = off; io->dma_addr = addr; @@ -219,6 +221,7 @@ static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start) struct drv_ctl_info info; struct drv_ctl_l2_ring *ring = &info.data.ring; + memset(&info, 0, sizeof(struct drv_ctl_info)); if (start) info.cmd = DRV_CTL_START_L2_CMD; else @@ -236,6 +239,7 @@ static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) struct drv_ctl_info info; struct drv_ctl_io *io = &info.data.io; + memset(&info, 0, sizeof(struct drv_ctl_info)); info.cmd = DRV_CTL_IO_WR_CMD; io->offset = off; io->data = val; @@ -249,13 +253,14 @@ static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) struct drv_ctl_info info; struct drv_ctl_io *io = &info.data.io; + memset(&info, 0, sizeof(struct drv_ctl_info)); info.cmd = DRV_CTL_IO_RD_CMD; io->offset = off; ethdev->drv_ctl(dev->netdev, &info); return io->data; } -static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg) +static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state) { struct cnic_local *cp = dev->cnic_priv; struct cnic_eth_dev *ethdev = cp->ethdev; @@ -263,6 +268,7 @@ static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg) struct fcoe_capabilities *fcoe_cap = &info.data.register_data.fcoe_features; + memset(&info, 0, sizeof(struct drv_ctl_info)); if (reg) { info.cmd = DRV_CTL_ULP_REGISTER_CMD; if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap) @@ -272,6 +278,7 @@ static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg) } info.data.ulp_type = ulp_type; + info.drv_state = state; ethdev->drv_ctl(dev->netdev, &info); } @@ -286,6 +293,7 @@ static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count) struct cnic_eth_dev *ethdev = cp->ethdev; struct drv_ctl_info info; + memset(&info, 0, sizeof(struct drv_ctl_info)); info.cmd = cmd; info.data.credit.credit_count = count; ethdev->drv_ctl(dev->netdev, &info); @@ -591,7 +599,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type, mutex_unlock(&cnic_lock); - cnic_ulp_ctl(dev, ulp_type, true); + cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE); return 0; @@ -636,7 +644,10 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n"); - cnic_ulp_ctl(dev, ulp_type, false); + if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) + cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED); + else + cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE); return 0; } @@ -4267,6 +4278,7 @@ static void cnic_delete_task(struct work_struct *work) cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI); + memset(&info, 0, sizeof(struct drv_ctl_info)); info.cmd = DRV_CTL_ISCSI_STOPPED_CMD; cp->ethdev->drv_ctl(dev->netdev, &info); } @@ -5433,6 +5445,23 @@ static void cnic_free_dev(struct cnic_dev *dev) kfree(dev); } +static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev, + struct cnic_fc_npiv_tbl *npiv_tbl) +{ + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + int ret; + + if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EAGAIN; /* bnx2x is down */ + + if (!BNX2X_CHIP_IS_E2_PLUS(bp)) + return -EINVAL; + + ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl); + return ret; +} + static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, struct pci_dev *pdev) { @@ -5451,6 +5480,7 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, cdev->register_device = cnic_register_device; cdev->unregister_device = cnic_unregister_device; cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; + cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl; cp = cdev->cnic_priv; cp->dev = cdev; diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index ef6125b0e..789e5c7e9 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -15,8 +15,8 @@ #include "bnx2x/bnx2x_mfw_req.h" -#define CNIC_MODULE_VERSION "2.5.21" -#define CNIC_MODULE_RELDATE "January 29, 2015" +#define CNIC_MODULE_VERSION "2.5.22" +#define CNIC_MODULE_RELDATE "July 20, 2015" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 @@ -151,6 +151,11 @@ struct drv_ctl_register_data { struct drv_ctl_info { int cmd; + int drv_state; +#define DRV_NOP 0 +#define DRV_ACTIVE 1 +#define DRV_INACTIVE 2 +#define DRV_UNLOADED 3 union { struct drv_ctl_spq_credit credit; struct drv_ctl_io io; @@ -161,6 +166,15 @@ struct drv_ctl_info { } data; }; +#define MAX_NPIV_ENTRIES 64 +#define FC_NPIV_WWN_SIZE 8 + +struct cnic_fc_npiv_tbl { + u8 wwpn[MAX_NPIV_ENTRIES][FC_NPIV_WWN_SIZE]; + u8 wwnn[MAX_NPIV_ENTRIES][FC_NPIV_WWN_SIZE]; + u32 count; +}; + struct cnic_ops { struct module *cnic_owner; /* Calls to these functions are protected by RCU. When @@ -226,6 +240,8 @@ struct cnic_eth_dev { int (*drv_submit_kwqes_16)(struct net_device *, struct kwqe_16 *[], u32); int (*drv_ctl)(struct net_device *, struct drv_ctl_info *); + int (*drv_get_fc_npiv_tbl)(struct net_device *, + struct cnic_fc_npiv_tbl *); unsigned long reserved1[2]; union drv_info_to_mcp *addr_drv_info_to_mcp; }; @@ -314,6 +330,7 @@ struct cnic_dev { struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type); int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type, char *data, u16 data_size); + int (*get_fc_npiv_tbl)(struct cnic_dev *, struct cnic_fc_npiv_tbl *); unsigned long flags; #define CNIC_F_CNIC_UP 1 #define CNIC_F_BNX2_CLASS 3 diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 09ff09f82..5e3cd76cb 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -907,9 +907,10 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, } bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - - if (mode == GENET_POWER_PASSIVE) + if (mode == GENET_POWER_PASSIVE) { + bcmgenet_phy_power_set(priv->dev, true); bcmgenet_mii_reset(priv->dev); + } } /* ioctl handle special commands that are not present in ethtool. */ @@ -1684,6 +1685,24 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); } +static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) +{ + u32 int0_enable = 0; + + /* Monitor cable plug/unplugged event for internal PHY, external PHY + * and MoCA PHY + */ + if (priv->internal_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + } else if (priv->ext_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + int0_enable |= UMAC_IRQ_LINK_EVENT; + } + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); +} + static int init_umac(struct bcmgenet_priv *priv) { struct device *kdev = &priv->pdev->dev; @@ -1724,15 +1743,8 @@ static int init_umac(struct bcmgenet_priv *priv) /* Enable Tx default queue 16 interrupts */ int0_enable |= UMAC_IRQ_TXDMA_DONE; - /* Monitor cable plug/unplugged event for internal PHY */ - if (phy_is_internal(priv->phydev)) { - int0_enable |= UMAC_IRQ_LINK_EVENT; - } else if (priv->ext_phy) { - int0_enable |= UMAC_IRQ_LINK_EVENT; - } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { - if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) - int0_enable |= UMAC_IRQ_LINK_EVENT; - + /* Configure backpressure vectors for MoCA */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { reg = bcmgenet_bp_mc_get(priv); reg |= BIT(priv->hw_params->bp_in_en_shift); @@ -2405,6 +2417,23 @@ static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) return IRQ_HANDLED; } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void bcmgenet_poll_controller(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Invoke the main RX/TX interrupt handler */ + disable_irq(priv->irq0); + bcmgenet_isr0(priv->irq0, priv); + enable_irq(priv->irq0); + + /* And the interrupt handler for RX/TX priority queues */ + disable_irq(priv->irq1); + bcmgenet_isr1(priv->irq1, priv); + enable_irq(priv->irq1); +} +#endif + static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) { u32 reg; @@ -2629,6 +2658,9 @@ static void bcmgenet_netif_start(struct net_device *dev) netif_tx_start_all_queues(dev); + /* Monitor link interrupts now */ + bcmgenet_link_intr_enable(priv); + phy_start(priv->phydev); } @@ -2642,13 +2674,12 @@ static int bcmgenet_open(struct net_device *dev) netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); /* Turn on the clock */ - if (!IS_ERR(priv->clk)) - clk_prepare_enable(priv->clk); + clk_prepare_enable(priv->clk); /* If this is an internal GPHY, power it back on now, before UniMAC is * brought out of reset as absolutely no UniMAC activity is allowed */ - if (phy_is_internal(priv->phydev)) + if (priv->internal_phy) bcmgenet_power_up(priv, GENET_POWER_PASSIVE); /* take MAC out of reset */ @@ -2667,7 +2698,7 @@ static int bcmgenet_open(struct net_device *dev) bcmgenet_set_hw_addr(priv, dev->dev_addr); - if (phy_is_internal(priv->phydev)) { + if (priv->internal_phy) { reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); reg |= EXT_ENERGY_DET_MASK; bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); @@ -2703,23 +2734,24 @@ static int bcmgenet_open(struct net_device *dev) goto err_irq0; } - /* Re-configure the port multiplexer towards the PHY device */ - bcmgenet_mii_config(priv->dev, false); - - phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup, - priv->phy_interface); + ret = bcmgenet_mii_probe(dev); + if (ret) { + netdev_err(dev, "failed to connect to PHY\n"); + goto err_irq1; + } bcmgenet_netif_start(dev); return 0; +err_irq1: + free_irq(priv->irq1, priv); err_irq0: - free_irq(priv->irq0, dev); + free_irq(priv->irq0, priv); err_fini_dma: bcmgenet_fini_dma(priv); err_clk_disable: - if (!IS_ERR(priv->clk)) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); return ret; } @@ -2773,11 +2805,10 @@ static int bcmgenet_close(struct net_device *dev) free_irq(priv->irq0, priv); free_irq(priv->irq1, priv); - if (phy_is_internal(priv->phydev)) + if (priv->internal_phy) ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); - if (!IS_ERR(priv->clk)) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); return ret; } @@ -2953,6 +2984,9 @@ static const struct net_device_ops bcmgenet_netdev_ops = { .ndo_set_mac_address = bcmgenet_set_mac_addr, .ndo_do_ioctl = bcmgenet_ioctl, .ndo_set_features = bcmgenet_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bcmgenet_poll_controller, +#endif }; /* Array of GENET hardware parameters/characteristics */ @@ -3137,6 +3171,7 @@ static const struct of_device_id bcmgenet_match[] = { { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, { }, }; +MODULE_DEVICE_TABLE(of, bcmgenet_match); static int bcmgenet_probe(struct platform_device *pdev) { @@ -3226,11 +3261,12 @@ static int bcmgenet_probe(struct platform_device *pdev) priv->version = pd->genet_version; priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); - if (IS_ERR(priv->clk)) + if (IS_ERR(priv->clk)) { dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); + priv->clk = NULL; + } - if (!IS_ERR(priv->clk)) - clk_prepare_enable(priv->clk); + clk_prepare_enable(priv->clk); bcmgenet_set_hw_params(priv); @@ -3241,8 +3277,10 @@ static int bcmgenet_probe(struct platform_device *pdev) INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol"); - if (IS_ERR(priv->clk_wol)) + if (IS_ERR(priv->clk_wol)) { dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); + priv->clk_wol = NULL; + } priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee"); if (IS_ERR(priv->clk_eee)) { @@ -3268,8 +3306,7 @@ static int bcmgenet_probe(struct platform_device *pdev) netif_carrier_off(dev); /* Turn off the main clock, WOL clock is handled separately */ - if (!IS_ERR(priv->clk)) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); err = register_netdev(dev); if (err) @@ -3278,8 +3315,7 @@ static int bcmgenet_probe(struct platform_device *pdev) return err; err_clk_disable: - if (!IS_ERR(priv->clk)) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); err: free_netdev(dev); return err; @@ -3331,7 +3367,7 @@ static int bcmgenet_suspend(struct device *d) if (device_may_wakeup(d) && priv->wolopts) { ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); clk_prepare_enable(priv->clk_wol); - } else if (phy_is_internal(priv->phydev)) { + } else if (priv->internal_phy) { ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); } @@ -3360,7 +3396,7 @@ static int bcmgenet_resume(struct device *d) /* If this is an internal GPHY, power it back on now, before UniMAC is * brought out of reset as absolutely no UniMAC activity is allowed */ - if (phy_is_internal(priv->phydev)) + if (priv->internal_phy) bcmgenet_power_up(priv, GENET_POWER_PASSIVE); bcmgenet_umac_reset(priv); @@ -3375,14 +3411,14 @@ static int bcmgenet_resume(struct device *d) phy_init_hw(priv->phydev); /* Speed settings must be restored */ - bcmgenet_mii_config(priv->dev, false); + bcmgenet_mii_config(priv->dev); /* disable ethernet MAC while updating its registers */ umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); bcmgenet_set_hw_addr(priv, dev->dev_addr); - if (phy_is_internal(priv->phydev)) { + if (priv->internal_phy) { reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); reg |= EXT_ENERGY_DET_MASK; bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 6159deab8..c739f7ebc 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -593,6 +593,7 @@ struct bcmgenet_priv { /* MDIO bus variables */ wait_queue_head_t wq; struct phy_device *phydev; + bool internal_phy; struct device_node *phy_dn; struct device_node *mdio_dn; struct mii_bus *mii_bus; @@ -670,7 +671,8 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); /* MDIO routines */ int bcmgenet_mii_init(struct net_device *dev); -int bcmgenet_mii_config(struct net_device *dev, bool init); +int bcmgenet_mii_config(struct net_device *dev); +int bcmgenet_mii_probe(struct net_device *dev); void bcmgenet_mii_exit(struct net_device *dev); void bcmgenet_mii_reset(struct net_device *dev); void bcmgenet_phy_power_set(struct net_device *dev, bool enable); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index adf23d2ac..8bdfe5375 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -163,10 +163,26 @@ void bcmgenet_mii_setup(struct net_device *dev) phy_print_status(phydev); } + +static int bcmgenet_fixed_phy_link_update(struct net_device *dev, + struct fixed_phy_status *status) +{ + if (dev && dev->phydev && status) + status->link = dev->phydev->link; + + return 0; +} + +/* Perform a voluntary PHY software reset, since the EPHY is very finicky about + * not doing it and will start corrupting packets + */ void bcmgenet_mii_reset(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); + if (GENET_IS_V4(priv)) + return; + if (priv->phydev) { phy_init_hw(priv->phydev); phy_start_aneg(priv->phydev); @@ -226,9 +242,13 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL); reg |= LED_ACT_SOURCE_MAC; bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); + + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + fixed_phy_set_link_update(priv->phydev, + bcmgenet_fixed_phy_link_update); } -int bcmgenet_mii_config(struct net_device *dev, bool init) +int bcmgenet_mii_config(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); struct phy_device *phydev = priv->phydev; @@ -238,10 +258,10 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) u32 port_ctrl; u32 reg; - priv->ext_phy = !phy_is_internal(priv->phydev) && + priv->ext_phy = !priv->internal_phy && (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); - if (phy_is_internal(priv->phydev)) + if (priv->internal_phy) priv->phy_interface = PHY_INTERFACE_MODE_NA; switch (priv->phy_interface) { @@ -259,7 +279,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); - if (phy_is_internal(priv->phydev)) { + if (priv->internal_phy) { phy_name = "internal PHY"; bcmgenet_internal_phy_setup(dev); } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { @@ -321,13 +341,12 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); } - if (init) - dev_info(kdev, "configuring instance for %s\n", phy_name); + dev_info_once(kdev, "configuring instance for %s\n", phy_name); return 0; } -static int bcmgenet_mii_probe(struct net_device *dev) +int bcmgenet_mii_probe(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); struct device_node *dn = priv->pdev->dev.of_node; @@ -345,22 +364,6 @@ static int bcmgenet_mii_probe(struct net_device *dev) priv->old_pause = -1; if (dn) { - if (priv->phydev) { - pr_info("PHY already attached\n"); - return 0; - } - - /* In the case of a fixed PHY, the DT node associated - * to the PHY is the Ethernet MAC DT node. - */ - if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { - ret = of_phy_register_fixed_link(dn); - if (ret) - return ret; - - priv->phy_dn = of_node_get(dn); - } - phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, phy_flags, priv->phy_interface); if (!phydev) { @@ -386,7 +389,7 @@ static int bcmgenet_mii_probe(struct net_device *dev) * PHY speed which is needed for bcmgenet_mii_config() to configure * things appropriately. */ - ret = bcmgenet_mii_config(dev, true); + ret = bcmgenet_mii_config(dev); if (ret) { phy_disconnect(priv->phydev); return ret; @@ -397,14 +400,11 @@ static int bcmgenet_mii_probe(struct net_device *dev) /* The internal PHY has its link interrupts routed to the * Ethernet MAC ISRs */ - if (phy_is_internal(priv->phydev)) + if (priv->internal_phy) priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT; else priv->mii_bus->irq[phydev->addr] = PHY_POLL; - pr_info("attached PHY at address %d [%s]\n", - phydev->addr, phydev->drv->name); - return 0; } @@ -490,7 +490,10 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) { struct device_node *dn = priv->pdev->dev.of_node; struct device *kdev = &priv->pdev->dev; + const char *phy_mode_str = NULL; + struct phy_device *phydev = NULL; char *compat; + int phy_mode; int ret; compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); @@ -513,17 +516,43 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) /* Fetch the PHY phandle */ priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0); + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { + ret = of_phy_register_fixed_link(dn); + if (ret) + return ret; + + priv->phy_dn = of_node_get(dn); + } + /* Get the link mode */ - priv->phy_interface = of_get_phy_mode(dn); + phy_mode = of_get_phy_mode(dn); + priv->phy_interface = phy_mode; - return 0; -} + /* We need to specifically look up whether this PHY interface is internal + * or not *before* we even try to probe the PHY driver over MDIO as we + * may have shut down the internal PHY for power saving purposes. + */ + if (phy_mode < 0) { + ret = of_property_read_string(dn, "phy-mode", &phy_mode_str); + if (ret < 0) { + dev_err(kdev, "invalid PHY mode property\n"); + return ret; + } -static int bcmgenet_fixed_phy_link_update(struct net_device *dev, - struct fixed_phy_status *status) -{ - if (dev && dev->phydev && status) - status->link = dev->phydev->link; + priv->phy_interface = PHY_INTERFACE_MODE_NA; + if (!strcasecmp(phy_mode_str, "internal")) + priv->internal_phy = true; + } + + /* Make sure we initialize MoCA PHYs with a link down */ + if (phy_mode == PHY_INTERFACE_MODE_MOCA) { + phydev = of_phy_find_device(dn); + if (phydev) + phydev->link = 0; + } return 0; } @@ -574,18 +603,15 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) .asym_pause = 0, }; - phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL); if (!phydev || IS_ERR(phydev)) { dev_err(kdev, "failed to register fixed PHY device\n"); return -ENODEV; } - if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) { - ret = fixed_phy_set_link_update( - phydev, bcmgenet_fixed_phy_link_update); - if (!ret) - phydev->link = 0; - } + /* Make sure we initialize MoCA PHYs with a link down */ + phydev->link = 0; + } priv->phydev = phydev; @@ -614,10 +640,6 @@ int bcmgenet_mii_init(struct net_device *dev) return ret; ret = bcmgenet_mii_bus_init(priv); - if (ret) - goto out_free; - - ret = bcmgenet_mii_probe(dev); if (ret) goto out; @@ -626,7 +648,6 @@ int bcmgenet_mii_init(struct net_device *dev) out: of_node_put(priv->phy_dn); mdiobus_unregister(priv->mii_bus); -out_free: kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); return ret; diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index b7a0f7879..9e59663a6 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -1543,7 +1543,7 @@ bfa_flash_cmd_act_check(void __iomem *pci_bar) } /* Flush FLI data fifo. */ -static u32 +static int bfa_flash_fifo_flush(void __iomem *pci_bar) { u32 i; @@ -1573,11 +1573,11 @@ bfa_flash_fifo_flush(void __iomem *pci_bar) } /* Read flash status. */ -static u32 +static int bfa_flash_status_read(void __iomem *pci_bar) { union bfa_flash_dev_status_reg dev_status; - u32 status; + int status; u32 ret_status; int i; @@ -1611,11 +1611,11 @@ bfa_flash_status_read(void __iomem *pci_bar) } /* Start flash read operation. */ -static u32 +static int bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, char *buf) { - u32 status; + int status; /* len must be mutiple of 4 and not exceeding fifo size */ if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) @@ -1703,7 +1703,8 @@ static enum bfa_status bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, u32 len) { - u32 n, status; + u32 n; + int status; u32 off, l, s, residue, fifo_sz; residue = len; diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 5d0753cc7..04b0d16b2 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -2400,6 +2400,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, q0->rcb->id = 0; q0->rx_packets = q0->rx_bytes = 0; q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; + q0->rxbuf_map_failed = 0; bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE, &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]); @@ -2428,6 +2429,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, : rx_cfg->q1_buf_size; q1->rx_packets = q1->rx_bytes = 0; q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; + q1->rxbuf_map_failed = 0; bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE, &hqpt_mem[i], &hsqpt_mem[i], diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index e0e797f2e..c438d032e 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -587,6 +587,7 @@ struct bna_rxq { u64 rx_bytes; u64 rx_packets_with_error; u64 rxbuf_alloc_failed; + u64 rxbuf_map_failed; }; /* RxQ pair */ diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index c1c13fd6c..c3cfe8da3 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -399,7 +399,13 @@ bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) } dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, - unmap_q->map_size, DMA_FROM_DEVICE); + unmap_q->map_size, DMA_FROM_DEVICE); + if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { + put_page(page); + BNAD_UPDATE_CTR(bnad, rxbuf_map_failed); + rcb->rxq->rxbuf_map_failed++; + goto finishing; + } unmap->page = page; unmap->page_offset = page_offset; @@ -454,8 +460,15 @@ bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) rcb->rxq->rxbuf_alloc_failed++; goto finishing; } + dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, buff_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, rxbuf_map_failed); + rcb->rxq->rxbuf_map_failed++; + goto finishing; + } unmap->skb = skb; dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); @@ -3025,6 +3038,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) unmap = head_unmap; dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_map_failed); + return NETDEV_TX_OK; + } BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); txqent->vector[0].length = htons(len); dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr); @@ -3056,6 +3074,15 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, 0, size, DMA_TO_DEVICE); + if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { + /* Undo the changes starting at tcb->producer_index */ + bnad_tx_buff_unmap(bnad, unmap_q, q_depth, + tcb->producer_index); + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_map_failed); + return NETDEV_TX_OK; + } + dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size); BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); txqent->vector[vect_id].length = htons(size); diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index faedbf247..f4ed816b9 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -175,6 +175,7 @@ struct bnad_drv_stats { u64 tx_skb_headlen_zero; u64 tx_skb_frag_zero; u64 tx_skb_len_mismatch; + u64 tx_skb_map_failed; u64 hw_stats_updates; u64 netif_rx_dropped; @@ -189,6 +190,7 @@ struct bnad_drv_stats { u64 rx_unmap_q_alloc_failed; u64 rxbuf_alloc_failed; + u64 rxbuf_map_failed; }; /* Complete driver stats */ diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index 3385c279a..d95098223 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -90,6 +90,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { "tx_skb_headlen_zero", "tx_skb_frag_zero", "tx_skb_len_mismatch", + "tx_skb_map_failed", "hw_stats_updates", "netif_rx_dropped", @@ -102,6 +103,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { "tx_unmap_q_alloc_failed", "rx_unmap_q_alloc_failed", "rxbuf_alloc_failed", + "rxbuf_map_failed", "mac_stats_clr_cnt", "mac_frame_64", @@ -807,6 +809,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi) rx_packets_with_error; buf[bi++] = rcb->rxq-> rxbuf_alloc_failed; + buf[bi++] = rcb->rxq->rxbuf_map_failed; buf[bi++] = rcb->producer_index; buf[bi++] = rcb->consumer_index; } @@ -821,6 +824,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi) rx_packets_with_error; buf[bi++] = rcb->rxq-> rxbuf_alloc_failed; + buf[bi++] = rcb->rxq->rxbuf_map_failed; buf[bi++] = rcb->producer_index; buf[bi++] = rcb->consumer_index; } diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index bf9eb2ecf..88c1e1a83 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -2774,8 +2774,7 @@ static const struct macb_config emac_config = { static const struct macb_config zynqmp_config = { - .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE | - MACB_CAPS_JUMBO, + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, @@ -2783,8 +2782,7 @@ static const struct macb_config zynqmp_config = { }; static const struct macb_config zynq_config = { - .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE | - MACB_CAPS_NO_GIGABIT_HALF, + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 1895b6b2a..6e1faea00 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -399,7 +399,7 @@ #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 #define MACB_CAPS_MACB_IS_GEM 0x80000000 -#define MACB_CAPS_JUMBO 0x00000008 +#define MACB_CAPS_JUMBO 0x00000010 /* Bit manipulation macros */ #define MACB_BIT(name) \ diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 02e23e6f1..8fb84e69c 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -3,7 +3,7 @@ # config NET_VENDOR_CAVIUM - tristate "Cavium ethernet drivers" + bool "Cavium ethernet drivers" depends on PCI default y ---help--- @@ -34,6 +34,8 @@ config THUNDER_NIC_VF config THUNDER_NIC_BGX tristate "Thunder MAC interface driver (BGX)" depends on 64BIT + select PHYLIB + select MDIO_OCTEON ---help--- This driver supports programming and controlling of MAC interface from NIC physical function driver. diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 5824becd9..8e0039ab0 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -816,10 +816,9 @@ static int setup_glist(struct lio *lio) INIT_LIST_HEAD(&lio->glist); for (i = 0; i < lio->tx_qsize; i++) { - g = kmalloc(sizeof(*g), GFP_KERNEL); + g = kzalloc(sizeof(*g), GFP_KERNEL); if (!g) break; - memset(g, 0, sizeof(struct octnic_gather)); g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 8aee25090..d3950b20f 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -135,6 +135,7 @@ #define NICVF_TX_TIMEOUT (50 * HZ) struct nicvf_cq_poll { + struct nicvf *nicvf; u8 cq_idx; /* Completion queue index */ struct napi_struct napi; }; @@ -190,10 +191,10 @@ enum tx_stats_reg_offset { }; struct nicvf_hw_stats { - u64 rx_bytes_ok; - u64 rx_ucast_frames_ok; - u64 rx_bcast_frames_ok; - u64 rx_mcast_frames_ok; + u64 rx_bytes; + u64 rx_ucast_frames; + u64 rx_bcast_frames; + u64 rx_mcast_frames; u64 rx_fcs_errors; u64 rx_l2_errors; u64 rx_drop_red; @@ -204,6 +205,31 @@ struct nicvf_hw_stats { u64 rx_drop_mcast; u64 rx_drop_l3_bcast; u64 rx_drop_l3_mcast; + u64 rx_bgx_truncated_pkts; + u64 rx_jabber_errs; + u64 rx_fcs_errs; + u64 rx_bgx_errs; + u64 rx_prel2_errs; + u64 rx_l2_hdr_malformed; + u64 rx_oversize; + u64 rx_undersize; + u64 rx_l2_len_mismatch; + u64 rx_l2_pclp; + u64 rx_ip_ver_errs; + u64 rx_ip_csum_errs; + u64 rx_ip_hdr_malformed; + u64 rx_ip_payload_malformed; + u64 rx_ip_ttl_errs; + u64 rx_l3_pclp; + u64 rx_l4_malformed; + u64 rx_l4_csum_errs; + u64 rx_udp_len_errs; + u64 rx_l4_port_errs; + u64 rx_tcp_flag_errs; + u64 rx_tcp_offset_errs; + u64 rx_l4_pclp; + u64 rx_truncated_pkts; + u64 tx_bytes_ok; u64 tx_ucast_frames_ok; u64 tx_bcast_frames_ok; @@ -222,6 +248,7 @@ struct nicvf_drv_stats { u64 rx_frames_1518; u64 rx_frames_jumbo; u64 rx_drops; + /* Tx */ u64 tx_frames_ok; u64 tx_drops; @@ -231,13 +258,24 @@ struct nicvf_drv_stats { }; struct nicvf { + struct nicvf *pnicvf; struct net_device *netdev; struct pci_dev *pdev; u8 vf_id; u8 node; - u8 tns_mode; + u8 tns_mode:1; + u8 sqs_mode:1; + u8 loopback_supported:1; u16 mtu; struct queue_set *qs; +#define MAX_SQS_PER_VF_SINGLE_NODE 5 +#define MAX_SQS_PER_VF 11 + u8 sqs_id; + u8 sqs_count; /* Secondary Qset count */ + struct nicvf *snicvf[MAX_SQS_PER_VF]; + u8 rx_queues; + u8 tx_queues; + u8 max_queues; void __iomem *reg_base; bool link_up; u8 duplex; @@ -257,7 +295,7 @@ struct nicvf { u32 cq_coalesce_usecs; u32 msg_enable; - struct nicvf_hw_stats stats; + struct nicvf_hw_stats hw_stats; struct nicvf_drv_stats drv_stats; struct bgx_stats bgx_stats; struct work_struct reset_task; @@ -269,10 +307,9 @@ struct nicvf { char irq_name[NIC_VF_MSIX_VECTORS][20]; bool irq_allocated[NIC_VF_MSIX_VECTORS]; - bool pf_ready_to_rcv_msg; + /* VF <-> PF mailbox communication */ bool pf_acked; bool pf_nacked; - bool bgx_stats_acked; bool set_mac_pending; } ____cacheline_aligned_in_smp; @@ -304,14 +341,21 @@ struct nicvf { #define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */ #define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */ #define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */ -#define NIC_MBOX_MSG_CFG_DONE 0x12 /* VF configuration done */ -#define NIC_MBOX_MSG_SHUTDOWN 0x13 /* VF is being shutdown */ +#define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */ +#define NIC_MBOX_MSG_NICVF_PTR 0x13 /* Send nicvf ptr to PF */ +#define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */ +#define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */ +#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */ +#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */ +#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */ struct nic_cfg_msg { u8 msg; u8 vf_id; - u8 tns_mode; u8 node_id; + u8 tns_mode:1; + u8 sqs_mode:1; + u8 loopback_supported:1; u8 mac_addr[ETH_ALEN]; }; @@ -319,6 +363,7 @@ struct nic_cfg_msg { struct qs_cfg_msg { u8 msg; u8 num; + u8 sqs_count; u64 cfg; }; @@ -335,6 +380,7 @@ struct sq_cfg_msg { u8 msg; u8 qs_num; u8 sq_num; + bool sqs_mode; u64 cfg; }; @@ -394,6 +440,28 @@ struct bgx_link_status { u32 speed; }; +/* Get Extra Qset IDs */ +struct sqs_alloc { + u8 msg; + u8 vf_id; + u8 qs_count; +}; + +struct nicvf_ptr { + u8 msg; + u8 vf_id; + bool sqs_mode; + u8 sqs_id; + u64 nicvf; +}; + +/* Set interface in loopback mode */ +struct set_loopback { + u8 msg; + u8 vf_id; + bool enable; +}; + /* 128 bit shared memory between PF and each VF */ union nic_mbx { struct { u8 msg; } msg; @@ -408,6 +476,9 @@ union nic_mbx { struct rss_cfg_msg rss_cfg; struct bgx_stats_msg bgx_stats; struct bgx_link_status link_status; + struct sqs_alloc sqs_alloc; + struct nicvf_ptr nicvf; + struct set_loopback lbk; }; #define NIC_NODE_ID_MASK 0x03 diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 6e0c03169..c561fdcb7 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -22,12 +22,16 @@ struct nicpf { struct pci_dev *pdev; - u8 rev_id; u8 node; unsigned int flags; u8 num_vf_en; /* No of VF enabled */ bool vf_enabled[MAX_NUM_VFS_SUPPORTED]; void __iomem *reg_base; /* Register start address */ + u8 num_sqs_en; /* Secondary qsets enabled */ + u64 nicvf[MAX_NUM_VFS_SUPPORTED]; + u8 vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF]; + u8 pqs_vf[MAX_NUM_VFS_SUPPORTED]; + bool sqs_used[MAX_NUM_VFS_SUPPORTED]; struct pkind_cfg pkind; #define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF)) #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) @@ -39,6 +43,7 @@ struct nicpf { u8 duplex[MAX_LMAC]; u32 speed[MAX_LMAC]; u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; + u16 rssi_base[MAX_NUM_VFS_SUPPORTED]; u16 rss_ind_tbl_size; bool mbx_lock[MAX_NUM_VFS_SUPPORTED]; @@ -49,6 +54,11 @@ struct nicpf { bool irq_allocated[NIC_PF_MSIX_VECTORS]; }; +static inline bool pass1_silicon(struct nicpf *nic) +{ + return nic->pdev->revision < 8; +} + /* Supported devices */ static const struct pci_device_id nic_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) }, @@ -112,7 +122,7 @@ static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) * when PF writes to MBOX(1), in next revisions when * PF writes to MBOX(0) */ - if (nic->rev_id == 0) { + if (pass1_silicon(nic)) { /* see the comment for nic_reg_write()/nic_reg_read() * functions above */ @@ -139,14 +149,19 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf) mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; - bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - - mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac); - if (mac) - ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac); + if (vf < MAX_LMAC) { + bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac); + if (mac) + ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac); + } + mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false; mbx.nic_cfg.node_id = nic->node; + + mbx.nic_cfg.loopback_supported = vf < MAX_LMAC; + nic_send_msg_to_vf(nic, vf, &mbx); } @@ -295,9 +310,6 @@ static void nic_init_hw(struct nicpf *nic) { int i; - /* Reset NIC, in case the driver is repeatedly inserted and removed */ - nic_reg_write(nic, NIC_PF_SOFT_RESET, 1); - /* Enable NIC HW block */ nic_reg_write(nic, NIC_PF_CFG, 0x3); @@ -329,6 +341,10 @@ static void nic_init_hw(struct nicpf *nic) /* Timer config */ nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK); + + /* Enable VLAN ethertype matching and stripping */ + nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7, + (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q); } /* Channel parse index configuration */ @@ -381,8 +397,18 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ /* Leave RSS_SIZE as '0' to disable RSS */ - nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), - (vnic << 24) | (padd << 16) | (rssi_base + rssi)); + if (pass1_silicon(nic)) { + nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), + (vnic << 24) | (padd << 16) | + (rssi_base + rssi)); + } else { + /* Set MPI_ALG to '0' to disable MCAM parsing */ + nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), + (padd << 16)); + /* MPI index is same as CPI if MPI_ALG is not enabled */ + nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3), + (vnic << 24) | (rssi_base + rssi)); + } if ((rssi + 1) >= cfg->rq_cnt) continue; @@ -395,6 +421,7 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) rssi = ((cpi - cpi_base) & 0x38) >> 3; } nic->cpi_base[cfg->vf_id] = cpi_base; + nic->rssi_base[cfg->vf_id] = rssi_base; } /* Responsds to VF with its RSS indirection table size */ @@ -420,23 +447,34 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) { u8 qset, idx = 0; u64 cpi_cfg, cpi_base, rssi_base, rssi; + u64 idx_addr; - cpi_base = nic->cpi_base[cfg->vf_id]; - cpi_cfg = nic_reg_read(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3)); - rssi_base = (cpi_cfg & 0x0FFF) + cfg->tbl_offset; + rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset; rssi = rssi_base; qset = cfg->vf_id; for (; rssi < (rssi_base + cfg->tbl_len); rssi++) { + u8 svf = cfg->ind_tbl[idx] >> 3; + + if (svf) + qset = nic->vf_sqs[cfg->vf_id][svf - 1]; + else + qset = cfg->vf_id; nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), (qset << 3) | (cfg->ind_tbl[idx] & 0x7)); idx++; } + cpi_base = nic->cpi_base[cfg->vf_id]; + if (pass1_silicon(nic)) + idx_addr = NIC_PF_CPI_0_2047_CFG; + else + idx_addr = NIC_PF_MPI_0_2047_CFG; + cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3)); cpi_cfg &= ~(0xFULL << 20); cpi_cfg |= (cfg->hash_bits << 20); - nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3), cpi_cfg); + nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg); } /* 4 level transmit side scheduler configutation @@ -452,19 +490,31 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1 * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1 */ -static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx) +static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, + struct sq_cfg_msg *sq) { u32 bgx, lmac, chan; u32 tl2, tl3, tl4; u32 rr_quantum; + u8 sq_idx = sq->sq_num; + u8 pqs_vnic; + + if (sq->sqs_mode) + pqs_vnic = nic->pqs_vf[vnic]; + else + pqs_vnic = vnic; + + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); - bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); - lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); /* 24 bytes for FCS, IPG and preamble */ rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX); tl4 += sq_idx; + if (sq->sqs_mode) + tl4 += vnic * 8; + tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3); nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | ((u64)vnic << NIC_QS_ID_SHIFT) | @@ -485,6 +535,86 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx) nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00); } +/* Send primary nicvf pointer to secondary QS's VF */ +static void nic_send_pnicvf(struct nicpf *nic, int sqs) +{ + union nic_mbx mbx = {}; + + mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR; + mbx.nicvf.nicvf = nic->nicvf[nic->pqs_vf[sqs]]; + nic_send_msg_to_vf(nic, sqs, &mbx); +} + +/* Send SQS's nicvf pointer to primary QS's VF */ +static void nic_send_snicvf(struct nicpf *nic, struct nicvf_ptr *nicvf) +{ + union nic_mbx mbx = {}; + int sqs_id = nic->vf_sqs[nicvf->vf_id][nicvf->sqs_id]; + + mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR; + mbx.nicvf.sqs_id = nicvf->sqs_id; + mbx.nicvf.nicvf = nic->nicvf[sqs_id]; + nic_send_msg_to_vf(nic, nicvf->vf_id, &mbx); +} + +/* Find next available Qset that can be assigned as a + * secondary Qset to a VF. + */ +static int nic_nxt_avail_sqs(struct nicpf *nic) +{ + int sqs; + + for (sqs = 0; sqs < nic->num_sqs_en; sqs++) { + if (!nic->sqs_used[sqs]) + nic->sqs_used[sqs] = true; + else + continue; + return sqs + nic->num_vf_en; + } + return -1; +} + +/* Allocate additional Qsets for requested VF */ +static void nic_alloc_sqs(struct nicpf *nic, struct sqs_alloc *sqs) +{ + union nic_mbx mbx = {}; + int idx, alloc_qs = 0; + int sqs_id; + + if (!nic->num_sqs_en) + goto send_mbox; + + for (idx = 0; idx < sqs->qs_count; idx++) { + sqs_id = nic_nxt_avail_sqs(nic); + if (sqs_id < 0) + break; + nic->vf_sqs[sqs->vf_id][idx] = sqs_id; + nic->pqs_vf[sqs_id] = sqs->vf_id; + alloc_qs++; + } + +send_mbox: + mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS; + mbx.sqs_alloc.vf_id = sqs->vf_id; + mbx.sqs_alloc.qs_count = alloc_qs; + nic_send_msg_to_vf(nic, sqs->vf_id, &mbx); +} + +static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) +{ + int bgx_idx, lmac_idx; + + if (lbk->vf_id > MAX_LMAC) + return -1; + + bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); + lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); + + bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable); + + return 0; +} + /* Interrupt handler to handle mailbox messages from VFs */ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) { @@ -492,6 +622,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) u64 *mbx_data; u64 mbx_addr; u64 reg_addr; + u64 cfg; int bgx, lmac; int i; int ret = 0; @@ -512,15 +643,24 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) switch (mbx.msg.msg) { case NIC_MBOX_MSG_READY: nic_mbx_send_ready(nic, vf); - nic->link[vf] = 0; - nic->duplex[vf] = 0; - nic->speed[vf] = 0; + if (vf < MAX_LMAC) { + nic->link[vf] = 0; + nic->duplex[vf] = 0; + nic->speed[vf] = 0; + } ret = 1; break; case NIC_MBOX_MSG_QS_CFG: reg_addr = NIC_PF_QSET_0_127_CFG | (mbx.qs.num << NIC_QS_ID_SHIFT); - nic_reg_write(nic, reg_addr, mbx.qs.cfg); + cfg = mbx.qs.cfg; + /* Check if its a secondary Qset */ + if (vf >= nic->num_vf_en) { + cfg = cfg & (~0x7FULL); + /* Assign this Qset to primary Qset's VF */ + cfg |= nic->pqs_vf[vf]; + } + nic_reg_write(nic, reg_addr, cfg); break; case NIC_MBOX_MSG_RQ_CFG: reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG | @@ -548,9 +688,11 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) (mbx.sq.qs_num << NIC_QS_ID_SHIFT) | (mbx.sq.sq_num << NIC_Q_NUM_SHIFT); nic_reg_write(nic, reg_addr, mbx.sq.cfg); - nic_tx_channel_cfg(nic, mbx.qs.num, mbx.sq.sq_num); + nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq); break; case NIC_MBOX_MSG_SET_MAC: + if (vf >= nic->num_vf_en) + break; lmac = mbx.mac.vf_id; bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); @@ -577,10 +719,28 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) case NIC_MBOX_MSG_SHUTDOWN: /* First msg in VF teardown sequence */ nic->vf_enabled[vf] = false; + if (vf >= nic->num_vf_en) + nic->sqs_used[vf - nic->num_vf_en] = false; + nic->pqs_vf[vf] = 0; break; + case NIC_MBOX_MSG_ALLOC_SQS: + nic_alloc_sqs(nic, &mbx.sqs_alloc); + goto unlock; + case NIC_MBOX_MSG_NICVF_PTR: + nic->nicvf[vf] = mbx.nicvf.nicvf; + break; + case NIC_MBOX_MSG_PNICVF_PTR: + nic_send_pnicvf(nic, vf); + goto unlock; + case NIC_MBOX_MSG_SNICVF_PTR: + nic_send_snicvf(nic, &mbx.nicvf); + goto unlock; case NIC_MBOX_MSG_BGX_STATS: nic_get_bgx_stats(nic, &mbx.bgx_stats); goto unlock; + case NIC_MBOX_MSG_LOOPBACK: + ret = nic_config_loopback(nic, &mbx.lbk); + break; default: dev_err(&nic->pdev->dev, "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); @@ -606,8 +766,7 @@ static void nic_mbx_intr_handler (struct nicpf *nic, int mbx) if (intr & (1ULL << vf)) { dev_dbg(&nic->pdev->dev, "Intr from VF %d\n", vf + (mbx * vf_per_mbx_reg)); - if ((vf + (mbx * vf_per_mbx_reg)) > nic->num_vf_en) - break; + nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg)); nic_clear_mbx_intr(nic, vf, mbx); } @@ -713,9 +872,24 @@ static void nic_unregister_interrupts(struct nicpf *nic) nic_disable_msix(nic); } +static int nic_num_sqs_en(struct nicpf *nic, int vf_en) +{ + int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE; + u16 total_vf; + + /* Check if its a multi-node environment */ + if (nr_node_ids > 1) + sqs_per_vf = MAX_SQS_PER_VF; + + pos = pci_find_ext_capability(nic->pdev, PCI_EXT_CAP_ID_SRIOV); + pci_read_config_word(nic->pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf); + return min(total_vf - vf_en, vf_en * sqs_per_vf); +} + static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic) { int pos = 0; + int vf_en; int err; u16 total_vf_cnt; @@ -732,16 +906,20 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic) if (!total_vf_cnt) return 0; - err = pci_enable_sriov(pdev, nic->num_vf_en); + vf_en = nic->num_vf_en; + nic->num_sqs_en = nic_num_sqs_en(nic, nic->num_vf_en); + vf_en += nic->num_sqs_en; + + err = pci_enable_sriov(pdev, vf_en); if (err) { dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n", - nic->num_vf_en); + vf_en); nic->num_vf_en = 0; return err; } dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n", - nic->num_vf_en); + vf_en); nic->flags |= NIC_SRIOV_ENABLED; return 0; @@ -841,8 +1019,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_release_regions; } - pci_read_config_byte(pdev, PCI_REVISION_ID, &nic->rev_id); - nic->node = nic_get_node_id(pdev); nic_set_lmac_vf_mapping(nic); diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h index 58197bb2f..dd536be20 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_reg.h +++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h @@ -85,7 +85,11 @@ #define NIC_PF_ECC3_DBE_INT_W1S (0x2708) #define NIC_PF_ECC3_DBE_ENA_W1C (0x2710) #define NIC_PF_ECC3_DBE_ENA_W1S (0x2718) +#define NIC_PF_MCAM_0_191_ENA (0x100000) +#define NIC_PF_MCAM_0_191_M_0_5_DATA (0x110000) +#define NIC_PF_MCAM_CTRL (0x120000) #define NIC_PF_CPI_0_2047_CFG (0x200000) +#define NIC_PF_MPI_0_2047_CFG (0x210000) #define NIC_PF_RSSI_0_4097_RQ (0x220000) #define NIC_PF_LMAC_0_7_CFG (0x240000) #define NIC_PF_LMAC_0_7_SW_XOFF (0x242000) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index a4228e664..af54c1094 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -35,10 +35,10 @@ struct nicvf_stat { } static const struct nicvf_stat nicvf_hw_stats[] = { - NICVF_HW_STAT(rx_bytes_ok), - NICVF_HW_STAT(rx_ucast_frames_ok), - NICVF_HW_STAT(rx_bcast_frames_ok), - NICVF_HW_STAT(rx_mcast_frames_ok), + NICVF_HW_STAT(rx_bytes), + NICVF_HW_STAT(rx_ucast_frames), + NICVF_HW_STAT(rx_bcast_frames), + NICVF_HW_STAT(rx_mcast_frames), NICVF_HW_STAT(rx_fcs_errors), NICVF_HW_STAT(rx_l2_errors), NICVF_HW_STAT(rx_drop_red), @@ -49,6 +49,30 @@ static const struct nicvf_stat nicvf_hw_stats[] = { NICVF_HW_STAT(rx_drop_mcast), NICVF_HW_STAT(rx_drop_l3_bcast), NICVF_HW_STAT(rx_drop_l3_mcast), + NICVF_HW_STAT(rx_bgx_truncated_pkts), + NICVF_HW_STAT(rx_jabber_errs), + NICVF_HW_STAT(rx_fcs_errs), + NICVF_HW_STAT(rx_bgx_errs), + NICVF_HW_STAT(rx_prel2_errs), + NICVF_HW_STAT(rx_l2_hdr_malformed), + NICVF_HW_STAT(rx_oversize), + NICVF_HW_STAT(rx_undersize), + NICVF_HW_STAT(rx_l2_len_mismatch), + NICVF_HW_STAT(rx_l2_pclp), + NICVF_HW_STAT(rx_ip_ver_errs), + NICVF_HW_STAT(rx_ip_csum_errs), + NICVF_HW_STAT(rx_ip_hdr_malformed), + NICVF_HW_STAT(rx_ip_payload_malformed), + NICVF_HW_STAT(rx_ip_ttl_errs), + NICVF_HW_STAT(rx_l3_pclp), + NICVF_HW_STAT(rx_l4_malformed), + NICVF_HW_STAT(rx_l4_csum_errs), + NICVF_HW_STAT(rx_udp_len_errs), + NICVF_HW_STAT(rx_l4_port_errs), + NICVF_HW_STAT(rx_tcp_flag_errs), + NICVF_HW_STAT(rx_tcp_offset_errs), + NICVF_HW_STAT(rx_l4_pclp), + NICVF_HW_STAT(rx_truncated_pkts), NICVF_HW_STAT(tx_bytes_ok), NICVF_HW_STAT(tx_ucast_frames_ok), NICVF_HW_STAT(tx_bcast_frames_ok), @@ -125,10 +149,33 @@ static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl) nic->msg_enable = lvl; } +static void nicvf_get_qset_strings(struct nicvf *nic, u8 **data, int qset) +{ + int stats, qidx; + int start_qidx = qset * MAX_RCV_QUEUES_PER_QS; + + for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { + for (stats = 0; stats < nicvf_n_queue_stats; stats++) { + sprintf(*data, "rxq%d: %s", qidx + start_qidx, + nicvf_queue_stats[stats].name); + *data += ETH_GSTRING_LEN; + } + } + + for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { + for (stats = 0; stats < nicvf_n_queue_stats; stats++) { + sprintf(*data, "txq%d: %s", qidx + start_qidx, + nicvf_queue_stats[stats].name); + *data += ETH_GSTRING_LEN; + } + } +} + static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) { struct nicvf *nic = netdev_priv(netdev); - int stats, qidx; + int stats; + int sqs; if (sset != ETH_SS_STATS) return; @@ -143,20 +190,12 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) data += ETH_GSTRING_LEN; } - for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { - for (stats = 0; stats < nicvf_n_queue_stats; stats++) { - sprintf(data, "rxq%d: %s", qidx, - nicvf_queue_stats[stats].name); - data += ETH_GSTRING_LEN; - } - } + nicvf_get_qset_strings(nic, &data, 0); - for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { - for (stats = 0; stats < nicvf_n_queue_stats; stats++) { - sprintf(data, "txq%d: %s", qidx, - nicvf_queue_stats[stats].name); - data += ETH_GSTRING_LEN; - } + for (sqs = 0; sqs < nic->sqs_count; sqs++) { + if (!nic->snicvf[sqs]) + continue; + nicvf_get_qset_strings(nic->snicvf[sqs], &data, sqs + 1); } for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) { @@ -173,21 +212,58 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) static int nicvf_get_sset_count(struct net_device *netdev, int sset) { struct nicvf *nic = netdev_priv(netdev); + int qstats_count; + int sqs; if (sset != ETH_SS_STATS) return -EINVAL; + qstats_count = nicvf_n_queue_stats * + (nic->qs->rq_cnt + nic->qs->sq_cnt); + for (sqs = 0; sqs < nic->sqs_count; sqs++) { + struct nicvf *snic; + + snic = nic->snicvf[sqs]; + if (!snic) + continue; + qstats_count += nicvf_n_queue_stats * + (snic->qs->rq_cnt + snic->qs->sq_cnt); + } + return nicvf_n_hw_stats + nicvf_n_drv_stats + - (nicvf_n_queue_stats * - (nic->qs->rq_cnt + nic->qs->sq_cnt)) + + qstats_count + BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT; } +static void nicvf_get_qset_stats(struct nicvf *nic, + struct ethtool_stats *stats, u64 **data) +{ + int stat, qidx; + + if (!nic) + return; + + for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { + nicvf_update_rq_stats(nic, qidx); + for (stat = 0; stat < nicvf_n_queue_stats; stat++) + *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats) + [nicvf_queue_stats[stat].index]; + } + + for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { + nicvf_update_sq_stats(nic, qidx); + for (stat = 0; stat < nicvf_n_queue_stats; stat++) + *((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats) + [nicvf_queue_stats[stat].index]; + } +} + static void nicvf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct nicvf *nic = netdev_priv(netdev); - int stat, qidx; + int stat; + int sqs; nicvf_update_stats(nic); @@ -195,22 +271,18 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev, nicvf_update_lmac_stats(nic); for (stat = 0; stat < nicvf_n_hw_stats; stat++) - *(data++) = ((u64 *)&nic->stats) + *(data++) = ((u64 *)&nic->hw_stats) [nicvf_hw_stats[stat].index]; for (stat = 0; stat < nicvf_n_drv_stats; stat++) *(data++) = ((u64 *)&nic->drv_stats) [nicvf_drv_stats[stat].index]; - for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { - for (stat = 0; stat < nicvf_n_queue_stats; stat++) - *(data++) = ((u64 *)&nic->qs->rq[qidx].stats) - [nicvf_queue_stats[stat].index]; - } + nicvf_get_qset_stats(nic, stats, &data); - for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { - for (stat = 0; stat < nicvf_n_queue_stats; stat++) - *(data++) = ((u64 *)&nic->qs->sq[qidx].stats) - [nicvf_queue_stats[stat].index]; + for (sqs = 0; sqs < nic->sqs_count; sqs++) { + if (!nic->snicvf[sqs]) + continue; + nicvf_get_qset_stats(nic->snicvf[sqs], stats, &data); } for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++) @@ -369,7 +441,7 @@ static int nicvf_get_rxnfc(struct net_device *dev, switch (info->cmd) { case ETHTOOL_GRXRINGS: - info->data = nic->qs->rq_cnt; + info->data = nic->rx_queues; ret = 0; break; case ETHTOOL_GRXFH: @@ -501,17 +573,15 @@ static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir, struct nicvf_rss_info *rss = &nic->rss_info; int idx; - if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) { - rss->enable = false; - rss->hash_bits = 0; - return -EIO; - } - - /* We do not allow change in unsupported parameters */ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - rss->enable = true; + if (!rss->enable) { + netdev_err(nic->netdev, + "RSS is disabled, cannot change settings\n"); + return -EIO; + } + if (indir) { for (idx = 0; idx < rss->rss_size; idx++) rss->ind_tbl[idx] = indir[idx]; @@ -534,11 +604,11 @@ static void nicvf_get_channels(struct net_device *dev, memset(channel, 0, sizeof(*channel)); - channel->max_rx = MAX_RCV_QUEUES_PER_QS; - channel->max_tx = MAX_SND_QUEUES_PER_QS; + channel->max_rx = nic->max_queues; + channel->max_tx = nic->max_queues; - channel->rx_count = nic->qs->rq_cnt; - channel->tx_count = nic->qs->sq_cnt; + channel->rx_count = nic->rx_queues; + channel->tx_count = nic->tx_queues; } /* Set no of Tx, Rx queues to be used */ @@ -548,22 +618,34 @@ static int nicvf_set_channels(struct net_device *dev, struct nicvf *nic = netdev_priv(dev); int err = 0; bool if_up = netif_running(dev); + int cqcount; if (!channel->rx_count || !channel->tx_count) return -EINVAL; - if (channel->rx_count > MAX_RCV_QUEUES_PER_QS) + if (channel->rx_count > nic->max_queues) return -EINVAL; - if (channel->tx_count > MAX_SND_QUEUES_PER_QS) + if (channel->tx_count > nic->max_queues) return -EINVAL; if (if_up) nicvf_stop(dev); - nic->qs->rq_cnt = channel->rx_count; - nic->qs->sq_cnt = channel->tx_count; + cqcount = max(channel->rx_count, channel->tx_count); + + if (cqcount > MAX_CMP_QUEUES_PER_QS) { + nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS); + nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1; + } else { + nic->sqs_count = 0; + } + + nic->qs->rq_cnt = min_t(u32, channel->rx_count, MAX_RCV_QUEUES_PER_QS); + nic->qs->sq_cnt = min_t(u32, channel->tx_count, MAX_SND_QUEUES_PER_QS); nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt); - err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt); + nic->rx_queues = channel->rx_count; + nic->tx_queues = channel->tx_count; + err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues); if (err) return err; @@ -571,7 +653,7 @@ static int nicvf_set_channels(struct net_device *dev, nicvf_open(dev); netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", - nic->qs->sq_cnt, nic->qs->rq_cnt); + nic->tx_queues, nic->rx_queues); return err; } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 3b90afb8c..a9377727c 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -28,7 +29,7 @@ static const struct pci_device_id nicvf_id_table[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_VF, - PCI_VENDOR_ID_CAVIUM, 0xA11E) }, + PCI_VENDOR_ID_CAVIUM, 0xA134) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF, PCI_VENDOR_ID_CAVIUM, 0xA11E) }, @@ -50,6 +51,14 @@ module_param(cpi_alg, int, S_IRUGO); MODULE_PARM_DESC(cpi_alg, "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); +static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) +{ + if (nic->sqs_mode) + return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS); + else + return qidx; +} + static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic, struct sk_buff *skb) { @@ -105,7 +114,6 @@ u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx) } /* VF -> PF mailbox communication */ - static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx) { u64 *msg = (u64 *)mbx; @@ -147,26 +155,15 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) */ static int nicvf_check_pf_ready(struct nicvf *nic) { - int timeout = 5000, sleep = 20; union nic_mbx mbx = {}; mbx.msg.msg = NIC_MBOX_MSG_READY; - - nic->pf_ready_to_rcv_msg = false; - - nicvf_write_to_mbx(nic, &mbx); - - while (!nic->pf_ready_to_rcv_msg) { - msleep(sleep); - if (nic->pf_ready_to_rcv_msg) - break; - timeout -= sleep; - if (!timeout) { - netdev_err(nic->netdev, - "PF didn't respond to READY msg\n"); - return 0; - } + if (nicvf_send_msg_to_pf(nic, &mbx)) { + netdev_err(nic->netdev, + "PF didn't respond to READY msg\n"); + return 0; } + return 1; } @@ -197,13 +194,15 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg); switch (mbx.msg.msg) { case NIC_MBOX_MSG_READY: - nic->pf_ready_to_rcv_msg = true; + nic->pf_acked = true; nic->vf_id = mbx.nic_cfg.vf_id & 0x7F; nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F; nic->node = mbx.nic_cfg.node_id; if (!nic->set_mac_pending) ether_addr_copy(nic->netdev->dev_addr, mbx.nic_cfg.mac_addr); + nic->sqs_mode = mbx.nic_cfg.sqs_mode; + nic->loopback_supported = mbx.nic_cfg.loopback_supported; nic->link_up = false; nic->duplex = 0; nic->speed = 0; @@ -221,7 +220,6 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) case NIC_MBOX_MSG_BGX_STATS: nicvf_read_bgx_stats(nic, &mbx.bgx_stats); nic->pf_acked = true; - nic->bgx_stats_acked = true; break; case NIC_MBOX_MSG_BGX_LINK_CHANGE: nic->pf_acked = true; @@ -242,6 +240,26 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) netif_tx_stop_all_queues(nic->netdev); } break; + case NIC_MBOX_MSG_ALLOC_SQS: + nic->sqs_count = mbx.sqs_alloc.qs_count; + nic->pf_acked = true; + break; + case NIC_MBOX_MSG_SNICVF_PTR: + /* Primary VF: make note of secondary VF's pointer + * to be used while packet transmission. + */ + nic->snicvf[mbx.nicvf.sqs_id] = + (struct nicvf *)mbx.nicvf.nicvf; + nic->pf_acked = true; + break; + case NIC_MBOX_MSG_PNICVF_PTR: + /* Secondary VF/Qset: make note of primary VF's pointer + * to be used while packet reception, to handover packet + * to primary VF's netdev. + */ + nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf; + nic->pf_acked = true; + break; default: netdev_err(nic->netdev, "Invalid message from PF, msg 0x%x\n", mbx.msg.msg); @@ -326,7 +344,7 @@ static int nicvf_rss_init(struct nicvf *nic) nicvf_get_rss_size(nic); - if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) { + if (cpi_alg != CPI_ALG_NONE) { rss->enable = false; rss->hash_bits = 0; return 0; @@ -350,11 +368,100 @@ static int nicvf_rss_init(struct nicvf *nic) for (idx = 0; idx < rss->rss_size; idx++) rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx, - nic->qs->rq_cnt); + nic->rx_queues); nicvf_config_rss(nic); return 1; } +/* Request PF to allocate additional Qsets */ +static void nicvf_request_sqs(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + int sqs; + int sqs_count = nic->sqs_count; + int rx_queues = 0, tx_queues = 0; + + /* Only primary VF should request */ + if (nic->sqs_mode || !nic->sqs_count) + return; + + mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS; + mbx.sqs_alloc.vf_id = nic->vf_id; + mbx.sqs_alloc.qs_count = nic->sqs_count; + if (nicvf_send_msg_to_pf(nic, &mbx)) { + /* No response from PF */ + nic->sqs_count = 0; + return; + } + + /* Return if no Secondary Qsets available */ + if (!nic->sqs_count) + return; + + if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS) + rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS; + if (nic->tx_queues > MAX_SND_QUEUES_PER_QS) + tx_queues = nic->tx_queues - MAX_SND_QUEUES_PER_QS; + + /* Set no of Rx/Tx queues in each of the SQsets */ + for (sqs = 0; sqs < nic->sqs_count; sqs++) { + mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR; + mbx.nicvf.vf_id = nic->vf_id; + mbx.nicvf.sqs_id = sqs; + nicvf_send_msg_to_pf(nic, &mbx); + + nic->snicvf[sqs]->sqs_id = sqs; + if (rx_queues > MAX_RCV_QUEUES_PER_QS) { + nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS; + rx_queues -= MAX_RCV_QUEUES_PER_QS; + } else { + nic->snicvf[sqs]->qs->rq_cnt = rx_queues; + rx_queues = 0; + } + + if (tx_queues > MAX_SND_QUEUES_PER_QS) { + nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS; + tx_queues -= MAX_SND_QUEUES_PER_QS; + } else { + nic->snicvf[sqs]->qs->sq_cnt = tx_queues; + tx_queues = 0; + } + + nic->snicvf[sqs]->qs->cq_cnt = + max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt); + + /* Initialize secondary Qset's queues and its interrupts */ + nicvf_open(nic->snicvf[sqs]->netdev); + } + + /* Update stack with actual Rx/Tx queue count allocated */ + if (sqs_count != nic->sqs_count) + nicvf_set_real_num_queues(nic->netdev, + nic->tx_queues, nic->rx_queues); +} + +/* Send this Qset's nicvf pointer to PF. + * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs + * so that packets received by these Qsets can use primary VF's netdev + */ +static void nicvf_send_vf_struct(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + + mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR; + mbx.nicvf.sqs_mode = nic->sqs_mode; + mbx.nicvf.nicvf = (u64)nic; + nicvf_send_msg_to_pf(nic, &mbx); +} + +static void nicvf_get_primary_vf_struct(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + + mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR; + nicvf_send_msg_to_pf(nic, &mbx); +} + int nicvf_set_real_num_queues(struct net_device *netdev, int tx_queues, int rx_queues) { @@ -429,6 +536,34 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, } } +static inline void nicvf_set_rxhash(struct net_device *netdev, + struct cqe_rx_t *cqe_rx, + struct sk_buff *skb) +{ + u8 hash_type; + u32 hash; + + if (!(netdev->features & NETIF_F_RXHASH)) + return; + + switch (cqe_rx->rss_alg) { + case RSS_ALG_TCP_IP: + case RSS_ALG_UDP_IP: + hash_type = PKT_HASH_TYPE_L4; + hash = cqe_rx->rss_tag; + break; + case RSS_ALG_IP: + hash_type = PKT_HASH_TYPE_L3; + hash = cqe_rx->rss_tag; + break; + default: + hash_type = PKT_HASH_TYPE_NONE; + hash = 0; + } + + skb_set_hash(skb, hash, hash_type); +} + static void nicvf_rcv_pkt_handler(struct net_device *netdev, struct napi_struct *napi, struct cmp_queue *cq, @@ -437,6 +572,15 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, struct sk_buff *skb; struct nicvf *nic = netdev_priv(netdev); int err = 0; + int rq_idx; + + rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx); + + if (nic->sqs_mode) { + /* Use primary VF's 'nicvf' struct */ + nic = nic->pnicvf; + netdev = nic->netdev; + } /* Check for errors */ err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); @@ -456,9 +600,17 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, skb->data, skb->len, true); } + /* If error packet, drop it here */ + if (err) { + dev_kfree_skb_any(skb); + return; + } + nicvf_set_rx_frame_cnt(nic, skb); - skb_record_rx_queue(skb, cqe_rx->rq_idx); + nicvf_set_rxhash(netdev, cqe_rx, skb); + + skb_record_rx_queue(skb, rq_idx); if (netdev->hw_features & NETIF_F_RXCSUM) { /* HW by default verifies TCP/UDP/SCTP checksums */ skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -468,6 +620,11 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, skb->protocol = eth_type_trans(skb, netdev); + /* Check for stripped VLAN */ + if (cqe_rx->vlan_found && cqe_rx->vlan_stripped) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + ntohs((__force __be16)cqe_rx->vlan_tci)); + if (napi && (netdev->features & NETIF_F_GRO)) napi_gro_receive(napi, skb); else @@ -549,8 +706,11 @@ loop: done: /* Wakeup TXQ if its stopped earlier due to SQ full */ if (tx_done) { - txq = netdev_get_tx_queue(netdev, cq_idx); - if (netif_tx_queue_stopped(txq)) { + netdev = nic->pnicvf->netdev; + txq = netdev_get_tx_queue(netdev, + nicvf_netdev_qidx(nic, cq_idx)); + nic = nic->pnicvf; + if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) { netif_tx_start_queue(txq); nic->drv_stats.txq_wake++; if (netif_msg_tx_err(nic)) @@ -624,11 +784,20 @@ static void nicvf_handle_qs_err(unsigned long data) nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); } +static void nicvf_dump_intr_status(struct nicvf *nic) +{ + if (netif_msg_intr(nic)) + netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n", + nic->netdev->name, nicvf_reg_read(nic, NIC_VF_INT)); +} + static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq) { struct nicvf *nic = (struct nicvf *)nicvf_irq; u64 intr; + nicvf_dump_intr_status(nic); + intr = nicvf_reg_read(nic, NIC_VF_INT); /* Check for spurious interrupt */ if (!(intr & NICVF_INTR_MBOX_MASK)) @@ -639,59 +808,58 @@ static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq) return IRQ_HANDLED; } -static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq) +static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq) +{ + struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq; + struct nicvf *nic = cq_poll->nicvf; + int qidx = cq_poll->cq_idx; + + nicvf_dump_intr_status(nic); + + /* Disable interrupts */ + nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); + + /* Schedule NAPI */ + napi_schedule(&cq_poll->napi); + + /* Clear interrupt */ + nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx); + + return IRQ_HANDLED; +} + +static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq) { - u64 qidx, intr, clear_intr = 0; - u64 cq_intr, rbdr_intr, qs_err_intr; struct nicvf *nic = (struct nicvf *)nicvf_irq; - struct queue_set *qs = nic->qs; - struct nicvf_cq_poll *cq_poll = NULL; + u8 qidx; - intr = nicvf_reg_read(nic, NIC_VF_INT); - if (netif_msg_intr(nic)) - netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n", - nic->netdev->name, intr); - - qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK; - if (qs_err_intr) { - /* Disable Qset err interrupt and schedule softirq */ - nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0); - tasklet_hi_schedule(&nic->qs_err_task); - clear_intr |= qs_err_intr; - } - /* Disable interrupts and start polling */ - cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT; - for (qidx = 0; qidx < qs->cq_cnt; qidx++) { - if (!(cq_intr & (1 << qidx))) - continue; - if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx)) + nicvf_dump_intr_status(nic); + + /* Disable RBDR interrupt and schedule softirq */ + for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) { + if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx)) continue; + nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx); + tasklet_hi_schedule(&nic->rbdr_task); + /* Clear interrupt */ + nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx); + } - nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); - clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT); + return IRQ_HANDLED; +} - cq_poll = nic->napi[qidx]; - /* Schedule NAPI */ - if (cq_poll) - napi_schedule(&cq_poll->napi); - } +static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq) +{ + struct nicvf *nic = (struct nicvf *)nicvf_irq; - /* Handle RBDR interrupts */ - rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT; - if (rbdr_intr) { - /* Disable RBDR interrupt and schedule softirq */ - for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { - if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx)) - continue; - nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx); - tasklet_hi_schedule(&nic->rbdr_task); - clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT); - } - } + nicvf_dump_intr_status(nic); + + /* Disable Qset err interrupt and schedule softirq */ + nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0); + tasklet_hi_schedule(&nic->qs_err_task); + nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0); - /* Clear interrupts */ - nicvf_reg_write(nic, NIC_VF_INT, clear_intr); return IRQ_HANDLED; } @@ -725,7 +893,7 @@ static void nicvf_disable_msix(struct nicvf *nic) static int nicvf_register_interrupts(struct nicvf *nic) { - int irq, free, ret = 0; + int irq, ret = 0; int vector; for_each_cq_irq(irq) @@ -740,44 +908,42 @@ static int nicvf_register_interrupts(struct nicvf *nic) sprintf(nic->irq_name[irq], "NICVF%d RBDR%d", nic->vf_id, irq - NICVF_INTR_ID_RBDR); - /* Register all interrupts except mailbox */ - for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) { + /* Register CQ interrupts */ + for (irq = 0; irq < nic->qs->cq_cnt; irq++) { vector = nic->msix_entries[irq].vector; ret = request_irq(vector, nicvf_intr_handler, - 0, nic->irq_name[irq], nic); + 0, nic->irq_name[irq], nic->napi[irq]); if (ret) - break; + goto err; nic->irq_allocated[irq] = true; } - for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) { + /* Register RBDR interrupt */ + for (irq = NICVF_INTR_ID_RBDR; + irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) { vector = nic->msix_entries[irq].vector; - ret = request_irq(vector, nicvf_intr_handler, + ret = request_irq(vector, nicvf_rbdr_intr_handler, 0, nic->irq_name[irq], nic); if (ret) - break; + goto err; nic->irq_allocated[irq] = true; } + /* Register QS error interrupt */ sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "NICVF%d Qset error", nic->vf_id); - if (!ret) { - vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector; - irq = NICVF_INTR_ID_QS_ERR; - ret = request_irq(vector, nicvf_intr_handler, - 0, nic->irq_name[irq], nic); - if (!ret) - nic->irq_allocated[irq] = true; - } + irq = NICVF_INTR_ID_QS_ERR; + ret = request_irq(nic->msix_entries[irq].vector, + nicvf_qs_err_intr_handler, + 0, nic->irq_name[irq], nic); + if (!ret) + nic->irq_allocated[irq] = true; - if (ret) { - netdev_err(nic->netdev, "Request irq failed\n"); - for (free = 0; free < irq; free++) - free_irq(nic->msix_entries[free].vector, nic); - return ret; - } +err: + if (ret) + netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq); - return 0; + return ret; } static void nicvf_unregister_interrupts(struct nicvf *nic) @@ -786,8 +952,14 @@ static void nicvf_unregister_interrupts(struct nicvf *nic) /* Free registered interrupts */ for (irq = 0; irq < nic->num_vec; irq++) { - if (nic->irq_allocated[irq]) + if (!nic->irq_allocated[irq]) + continue; + + if (irq < NICVF_INTR_ID_SQ) + free_irq(nic->msix_entries[irq].vector, nic->napi[irq]); + else free_irq(nic->msix_entries[irq].vector, nic); + nic->irq_allocated[irq] = false; } @@ -852,13 +1024,26 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev) netdev_warn(netdev, "%s: Transmit ring full, stopping SQ%d\n", netdev->name, qid); - return NETDEV_TX_BUSY; } return NETDEV_TX_OK; } +static inline void nicvf_free_cq_poll(struct nicvf *nic) +{ + struct nicvf_cq_poll *cq_poll; + int qidx; + + for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) { + cq_poll = nic->napi[qidx]; + if (!cq_poll) + continue; + nic->napi[qidx] = NULL; + kfree(cq_poll); + } +} + int nicvf_stop(struct net_device *netdev) { int irq, qidx; @@ -871,6 +1056,17 @@ int nicvf_stop(struct net_device *netdev) nicvf_send_msg_to_pf(nic, &mbx); netif_carrier_off(netdev); + netif_tx_stop_all_queues(nic->netdev); + + /* Teardown secondary qsets first */ + if (!nic->sqs_mode) { + for (qidx = 0; qidx < nic->sqs_count; qidx++) { + if (!nic->snicvf[qidx]) + continue; + nicvf_stop(nic->snicvf[qidx]->netdev); + nic->snicvf[qidx] = NULL; + } + } /* Disable RBDR & QS error interrupts */ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { @@ -893,7 +1089,6 @@ int nicvf_stop(struct net_device *netdev) cq_poll = nic->napi[qidx]; if (!cq_poll) continue; - nic->napi[qidx] = NULL; napi_synchronize(&cq_poll->napi); /* CQ intr is enabled while napi_complete, * so disable it now @@ -902,7 +1097,6 @@ int nicvf_stop(struct net_device *netdev) nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx); napi_disable(&cq_poll->napi); netif_napi_del(&cq_poll->napi); - kfree(cq_poll); } netif_tx_disable(netdev); @@ -918,6 +1112,12 @@ int nicvf_stop(struct net_device *netdev) nicvf_unregister_interrupts(nic); + nicvf_free_cq_poll(nic); + + /* Clear multiqset info */ + nic->pnicvf = nic; + nic->sqs_count = 0; + return 0; } @@ -944,6 +1144,7 @@ int nicvf_open(struct net_device *netdev) goto napi_del; } cq_poll->cq_idx = qidx; + cq_poll->nicvf = nic; netif_napi_add(netdev, &cq_poll->napi, nicvf_poll, NAPI_POLL_WEIGHT); napi_enable(&cq_poll->napi); @@ -972,10 +1173,16 @@ int nicvf_open(struct net_device *netdev) /* Configure CPI alorithm */ nic->cpi_alg = cpi_alg; - nicvf_config_cpi(nic); + if (!nic->sqs_mode) + nicvf_config_cpi(nic); + + nicvf_request_sqs(nic); + if (nic->sqs_mode) + nicvf_get_primary_vf_struct(nic); /* Configure receive side scaling */ - nicvf_rss_init(nic); + if (!nic->sqs_mode) + nicvf_rss_init(nic); err = nicvf_register_interrupts(nic); if (err) @@ -1011,6 +1218,8 @@ int nicvf_open(struct net_device *netdev) cleanup: nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); nicvf_unregister_interrupts(nic); + tasklet_kill(&nic->qs_err_task); + tasklet_kill(&nic->rbdr_task); napi_del: for (qidx = 0; qidx < qs->cq_cnt; qidx++) { cq_poll = nic->napi[qidx]; @@ -1018,9 +1227,8 @@ napi_del: continue; napi_disable(&cq_poll->napi); netif_napi_del(&cq_poll->napi); - kfree(cq_poll); - nic->napi[qidx] = NULL; } + nicvf_free_cq_poll(nic); return err; } @@ -1077,7 +1285,6 @@ void nicvf_update_lmac_stats(struct nicvf *nic) { int stat = 0; union nic_mbx mbx = {}; - int timeout; if (!netif_running(nic->netdev)) return; @@ -1087,14 +1294,9 @@ void nicvf_update_lmac_stats(struct nicvf *nic) /* Rx stats */ mbx.bgx_stats.rx = 1; while (stat < BGX_RX_STATS_COUNT) { - nic->bgx_stats_acked = 0; mbx.bgx_stats.idx = stat; - nicvf_send_msg_to_pf(nic, &mbx); - timeout = 0; - while ((!nic->bgx_stats_acked) && (timeout < 10)) { - msleep(2); - timeout++; - } + if (nicvf_send_msg_to_pf(nic, &mbx)) + return; stat++; } @@ -1103,14 +1305,9 @@ void nicvf_update_lmac_stats(struct nicvf *nic) /* Tx stats */ mbx.bgx_stats.rx = 0; while (stat < BGX_TX_STATS_COUNT) { - nic->bgx_stats_acked = 0; mbx.bgx_stats.idx = stat; - nicvf_send_msg_to_pf(nic, &mbx); - timeout = 0; - while ((!nic->bgx_stats_acked) && (timeout < 10)) { - msleep(2); - timeout++; - } + if (nicvf_send_msg_to_pf(nic, &mbx)) + return; stat++; } } @@ -1118,7 +1315,7 @@ void nicvf_update_lmac_stats(struct nicvf *nic) void nicvf_update_stats(struct nicvf *nic) { int qidx; - struct nicvf_hw_stats *stats = &nic->stats; + struct nicvf_hw_stats *stats = &nic->hw_stats; struct nicvf_drv_stats *drv_stats = &nic->drv_stats; struct queue_set *qs = nic->qs; @@ -1127,14 +1324,16 @@ void nicvf_update_stats(struct nicvf *nic) #define GET_TX_STATS(reg) \ nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3)) - stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS); - stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST); - stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST); - stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST); + stats->rx_bytes = GET_RX_STATS(RX_OCTS); + stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST); + stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST); + stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST); stats->rx_fcs_errors = GET_RX_STATS(RX_FCS); stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR); stats->rx_drop_red = GET_RX_STATS(RX_RED); + stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS); stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN); + stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS); stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST); stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST); stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); @@ -1146,9 +1345,6 @@ void nicvf_update_stats(struct nicvf *nic) stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); stats->tx_drops = GET_TX_STATS(TX_DROP); - drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok + - stats->rx_bcast_frames_ok + - stats->rx_mcast_frames_ok; drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + stats->tx_bcast_frames_ok + stats->tx_mcast_frames_ok; @@ -1167,14 +1363,15 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct nicvf *nic = netdev_priv(netdev); - struct nicvf_hw_stats *hw_stats = &nic->stats; + struct nicvf_hw_stats *hw_stats = &nic->hw_stats; struct nicvf_drv_stats *drv_stats = &nic->drv_stats; nicvf_update_stats(nic); - stats->rx_bytes = hw_stats->rx_bytes_ok; + stats->rx_bytes = hw_stats->rx_bytes; stats->rx_packets = drv_stats->rx_frames_ok; stats->rx_dropped = drv_stats->rx_drops; + stats->multicast = hw_stats->rx_mcast_frames; stats->tx_bytes = hw_stats->tx_bytes_ok; stats->tx_packets = drv_stats->tx_frames_ok; @@ -1208,6 +1405,45 @@ static void nicvf_reset_task(struct work_struct *work) nic->netdev->trans_start = jiffies; } +static int nicvf_config_loopback(struct nicvf *nic, + netdev_features_t features) +{ + union nic_mbx mbx = {}; + + mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK; + mbx.lbk.vf_id = nic->vf_id; + mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0; + + return nicvf_send_msg_to_pf(nic, &mbx); +} + +static netdev_features_t nicvf_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct nicvf *nic = netdev_priv(netdev); + + if ((features & NETIF_F_LOOPBACK) && + netif_running(netdev) && !nic->loopback_supported) + features &= ~NETIF_F_LOOPBACK; + + return features; +} + +static int nicvf_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct nicvf *nic = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + nicvf_config_vlan_stripping(nic, features); + + if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev)) + return nicvf_config_loopback(nic, features); + + return 0; +} + static const struct net_device_ops nicvf_netdev_ops = { .ndo_open = nicvf_open, .ndo_stop = nicvf_stop, @@ -1216,6 +1452,8 @@ static const struct net_device_ops nicvf_netdev_ops = { .ndo_set_mac_address = nicvf_set_mac_address, .ndo_get_stats64 = nicvf_get_stats64, .ndo_tx_timeout = nicvf_tx_timeout, + .ndo_fix_features = nicvf_fix_features, + .ndo_set_features = nicvf_set_features, }; static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -1223,8 +1461,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct device *dev = &pdev->dev; struct net_device *netdev; struct nicvf *nic; - struct queue_set *qs; - int err; + int err, qcount; err = pci_enable_device(pdev); if (err) { @@ -1250,9 +1487,17 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_release_regions; } - netdev = alloc_etherdev_mqs(sizeof(struct nicvf), - MAX_RCV_QUEUES_PER_QS, - MAX_SND_QUEUES_PER_QS); + qcount = MAX_CMP_QUEUES_PER_QS; + + /* Restrict multiqset support only for host bound VFs */ + if (pdev->is_virtfn) { + /* Set max number of queues per VF */ + qcount = roundup(num_online_cpus(), MAX_CMP_QUEUES_PER_QS); + qcount = min(qcount, + (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS); + } + + netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount); if (!netdev) { err = -ENOMEM; goto err_release_regions; @@ -1265,6 +1510,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) nic = netdev_priv(netdev); nic->netdev = netdev; nic->pdev = pdev; + nic->pnicvf = nic; + nic->max_queues = qcount; /* MAP VF's configuration registers */ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); @@ -1278,20 +1525,31 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_free_netdev; - qs = nic->qs; - - err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt); - if (err) - goto err_free_netdev; - /* Check if PF is alive and get MAC address for this VF */ err = nicvf_register_misc_interrupt(nic); if (err) goto err_free_netdev; - netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | - NETIF_F_TSO | NETIF_F_GRO); - netdev->hw_features = netdev->features; + nicvf_send_vf_struct(nic); + + /* Check if this VF is in QS only mode */ + if (nic->sqs_mode) + return 0; + + err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues); + if (err) + goto err_unregister_interrupts; + + netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_GRO | + NETIF_F_HW_VLAN_CTAG_RX); + + netdev->hw_features |= NETIF_F_RXHASH; + + netdev->features |= netdev->hw_features; + netdev->hw_features |= NETIF_F_LOOPBACK; + + netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; netdev->netdev_ops = &nicvf_netdev_ops; netdev->watchdog_timeo = NICVF_TX_TIMEOUT; @@ -1326,8 +1584,13 @@ static void nicvf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct nicvf *nic = netdev_priv(netdev); + struct net_device *pnetdev = nic->pnicvf->netdev; - unregister_netdev(netdev); + /* Check if this Qset is assigned to different VF. + * If yes, clean primary and all secondary Qsets. + */ + if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED)) + unregister_netdev(pnetdev); nicvf_unregister_interrupts(nic); pci_set_drvdata(pdev, NULL); free_netdev(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index ca4240aa6..e404ea837 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -475,6 +475,27 @@ static void nicvf_reclaim_rbdr(struct nicvf *nic, return; } +void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features) +{ + u64 rq_cfg; + int sqs; + + rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0); + + /* Enable first VLAN stripping */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rq_cfg |= (1ULL << 25); + else + rq_cfg &= ~(1ULL << 25); + nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); + + /* Configure Secondary Qsets, if any */ + for (sqs = 0; sqs < nic->sqs_count; sqs++) + if (nic->snicvf[sqs]) + nicvf_queue_reg_write(nic->snicvf[sqs], + NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); +} + /* Configures receive queue */ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, bool enable) @@ -524,7 +545,9 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); nicvf_send_msg_to_pf(nic, &mbx); - nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00); + nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); + if (!nic->sqs_mode) + nicvf_config_vlan_stripping(nic, nic->netdev->features); /* Enable Receive queue */ rq_cfg.ena = 1; @@ -598,6 +621,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; mbx.sq.qs_num = qs->vnic_id; mbx.sq.sq_num = qidx; + mbx.sq.sqs_mode = nic->sqs_mode; mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; nicvf_send_msg_to_pf(nic, &mbx); @@ -679,6 +703,7 @@ void nicvf_qset_config(struct nicvf *nic, bool enable) /* Send a mailbox msg to PF to config Qset */ mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; mbx.qs.num = qs->vnic_id; + mbx.qs.sqs_count = nic->sqs_count; mbx.qs.cfg = 0; qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; @@ -759,6 +784,10 @@ int nicvf_set_qset_resources(struct nicvf *nic) qs->rbdr_len = RCV_BUF_COUNT; qs->sq_len = SND_QUEUE_LEN; qs->cq_len = CMP_QUEUE_LEN; + + nic->rx_queues = qs->rq_cnt; + nic->tx_queues = qs->sq_cnt; + return 0; } @@ -961,9 +990,6 @@ nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, /* Offload checksum calculation to HW */ if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (skb->protocol != htons(ETH_P_IP)) - return; - hdr->csum_l3 = 1; /* Enable IP csum calculation */ hdr->l3_offset = skb_network_offset(skb); hdr->l4_offset = skb_transport_offset(skb); @@ -1005,7 +1031,7 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, * them to SQ for transfer */ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, - int qentry, struct sk_buff *skb) + int sq_num, int qentry, struct sk_buff *skb) { struct tso_t tso; int seg_subdescs = 0, desc_cnt = 0; @@ -1065,7 +1091,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, /* Inform HW to xmit all TSO segments */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, - skb_get_queue_mapping(skb), desc_cnt); + sq_num, desc_cnt); nic->drv_stats.tx_tso++; return 1; } @@ -1076,10 +1102,24 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) int i, size; int subdesc_cnt; int sq_num, qentry; - struct queue_set *qs = nic->qs; + struct queue_set *qs; struct snd_queue *sq; sq_num = skb_get_queue_mapping(skb); + if (sq_num >= MAX_SND_QUEUES_PER_QS) { + /* Get secondary Qset's SQ structure */ + i = sq_num / MAX_SND_QUEUES_PER_QS; + if (!nic->snicvf[i - 1]) { + netdev_warn(nic->netdev, + "Secondary Qset#%d's ptr not initialized\n", + i - 1); + return 1; + } + nic = (struct nicvf *)nic->snicvf[i - 1]; + sq_num = sq_num % MAX_SND_QUEUES_PER_QS; + } + + qs = nic->qs; sq = &qs->sq[sq_num]; subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); @@ -1090,7 +1130,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) /* Check if its a TSO packet */ if (skb_shinfo(skb)->gso_size) - return nicvf_sq_append_tso(nic, sq, qentry, skb); + return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb); /* Add SQ header subdesc */ nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len); @@ -1126,6 +1166,8 @@ doorbell: return 1; append_fail: + /* Use original PCI dev for debug log */ + nic = nic->pnicvf; netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); return 0; } @@ -1371,10 +1413,11 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) { - struct cmp_queue_stats *stats = &cq->stats; + struct nicvf_hw_stats *stats = &nic->hw_stats; + struct nicvf_drv_stats *drv_stats = &nic->drv_stats; if (!cqe_rx->err_level && !cqe_rx->err_opcode) { - stats->rx.errop.good++; + drv_stats->rx_frames_ok++; return 0; } @@ -1384,111 +1427,78 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, nic->netdev->name, cqe_rx->err_level, cqe_rx->err_opcode); - switch (cqe_rx->err_level) { - case CQ_ERRLVL_MAC: - stats->rx.errlvl.mac_errs++; - break; - case CQ_ERRLVL_L2: - stats->rx.errlvl.l2_errs++; - break; - case CQ_ERRLVL_L3: - stats->rx.errlvl.l3_errs++; - break; - case CQ_ERRLVL_L4: - stats->rx.errlvl.l4_errs++; - break; - } - switch (cqe_rx->err_opcode) { case CQ_RX_ERROP_RE_PARTIAL: - stats->rx.errop.partial_pkts++; + stats->rx_bgx_truncated_pkts++; break; case CQ_RX_ERROP_RE_JABBER: - stats->rx.errop.jabber_errs++; + stats->rx_jabber_errs++; break; case CQ_RX_ERROP_RE_FCS: - stats->rx.errop.fcs_errs++; - break; - case CQ_RX_ERROP_RE_TERMINATE: - stats->rx.errop.terminate_errs++; + stats->rx_fcs_errs++; break; case CQ_RX_ERROP_RE_RX_CTL: - stats->rx.errop.bgx_rx_errs++; + stats->rx_bgx_errs++; break; case CQ_RX_ERROP_PREL2_ERR: - stats->rx.errop.prel2_errs++; - break; - case CQ_RX_ERROP_L2_FRAGMENT: - stats->rx.errop.l2_frags++; - break; - case CQ_RX_ERROP_L2_OVERRUN: - stats->rx.errop.l2_overruns++; - break; - case CQ_RX_ERROP_L2_PFCS: - stats->rx.errop.l2_pfcs++; - break; - case CQ_RX_ERROP_L2_PUNY: - stats->rx.errop.l2_puny++; + stats->rx_prel2_errs++; break; case CQ_RX_ERROP_L2_MAL: - stats->rx.errop.l2_hdr_malformed++; + stats->rx_l2_hdr_malformed++; break; case CQ_RX_ERROP_L2_OVERSIZE: - stats->rx.errop.l2_oversize++; + stats->rx_oversize++; break; case CQ_RX_ERROP_L2_UNDERSIZE: - stats->rx.errop.l2_undersize++; + stats->rx_undersize++; break; case CQ_RX_ERROP_L2_LENMISM: - stats->rx.errop.l2_len_mismatch++; + stats->rx_l2_len_mismatch++; break; case CQ_RX_ERROP_L2_PCLP: - stats->rx.errop.l2_pclp++; + stats->rx_l2_pclp++; break; case CQ_RX_ERROP_IP_NOT: - stats->rx.errop.non_ip++; + stats->rx_ip_ver_errs++; break; case CQ_RX_ERROP_IP_CSUM_ERR: - stats->rx.errop.ip_csum_err++; + stats->rx_ip_csum_errs++; break; case CQ_RX_ERROP_IP_MAL: - stats->rx.errop.ip_hdr_malformed++; + stats->rx_ip_hdr_malformed++; break; case CQ_RX_ERROP_IP_MALD: - stats->rx.errop.ip_payload_malformed++; + stats->rx_ip_payload_malformed++; break; case CQ_RX_ERROP_IP_HOP: - stats->rx.errop.ip_hop_errs++; - break; - case CQ_RX_ERROP_L3_ICRC: - stats->rx.errop.l3_icrc_errs++; + stats->rx_ip_ttl_errs++; break; case CQ_RX_ERROP_L3_PCLP: - stats->rx.errop.l3_pclp++; + stats->rx_l3_pclp++; break; case CQ_RX_ERROP_L4_MAL: - stats->rx.errop.l4_malformed++; + stats->rx_l4_malformed++; break; case CQ_RX_ERROP_L4_CHK: - stats->rx.errop.l4_csum_errs++; + stats->rx_l4_csum_errs++; break; case CQ_RX_ERROP_UDP_LEN: - stats->rx.errop.udp_len_err++; + stats->rx_udp_len_errs++; break; case CQ_RX_ERROP_L4_PORT: - stats->rx.errop.bad_l4_port++; + stats->rx_l4_port_errs++; break; case CQ_RX_ERROP_TCP_FLAG: - stats->rx.errop.bad_tcp_flag++; + stats->rx_tcp_flag_errs++; break; case CQ_RX_ERROP_TCP_OFFSET: - stats->rx.errop.tcp_offset_errs++; + stats->rx_tcp_offset_errs++; break; case CQ_RX_ERROP_L4_PCLP: - stats->rx.errop.l4_pclp++; + stats->rx_l4_pclp++; break; case CQ_RX_ERROP_RBDR_TRUNC: - stats->rx.errop.pkt_truncated++; + stats->rx_truncated_pkts++; break; } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index f0937b7bf..fb4957d09 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -181,47 +181,6 @@ enum CQ_TX_ERROP_E { }; struct cmp_queue_stats { - struct rx_stats { - struct { - u64 mac_errs; - u64 l2_errs; - u64 l3_errs; - u64 l4_errs; - } errlvl; - struct { - u64 good; - u64 partial_pkts; - u64 jabber_errs; - u64 fcs_errs; - u64 terminate_errs; - u64 bgx_rx_errs; - u64 prel2_errs; - u64 l2_frags; - u64 l2_overruns; - u64 l2_pfcs; - u64 l2_puny; - u64 l2_hdr_malformed; - u64 l2_oversize; - u64 l2_undersize; - u64 l2_len_mismatch; - u64 l2_pclp; - u64 non_ip; - u64 ip_csum_err; - u64 ip_hdr_malformed; - u64 ip_payload_malformed; - u64 ip_hop_errs; - u64 l3_icrc_errs; - u64 l3_pclp; - u64 l4_malformed; - u64 l4_csum_errs; - u64 udp_len_err; - u64 bad_l4_port; - u64 bad_tcp_flag; - u64 tcp_offset_errs; - u64 l4_pclp; - u64 pkt_truncated; - } errop; - } rx; struct tx_stats { u64 good; u64 desc_fault; @@ -292,6 +251,7 @@ struct cmp_queue { void *desc; struct q_desc_mem dmem; struct cmp_queue_stats stats; + int irq; } ____cacheline_aligned_in_smp; struct snd_queue { @@ -347,6 +307,8 @@ struct queue_set { #define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT) +void nicvf_config_vlan_stripping(struct nicvf *nic, + netdev_features_t features); int nicvf_set_qset_resources(struct nicvf *nic); int nicvf_config_data_transfer(struct nicvf *nic, bool enable); void nicvf_qset_config(struct nicvf *nic, bool enable); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index b961a89dc..180aa9fab 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -6,6 +6,7 @@ * as published by the Free Software Foundation. */ +#include #include #include #include @@ -26,7 +27,7 @@ struct lmac { struct bgx *bgx; int dmac; - unsigned char mac[ETH_ALEN]; + u8 mac[ETH_ALEN]; bool link_up; int lmacid; /* ID within BGX */ int lmacid_bd; /* ID on board */ @@ -328,6 +329,37 @@ static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac) } } +/* Configure BGX LMAC in internal loopback mode */ +void bgx_lmac_internal_loopback(int node, int bgx_idx, + int lmac_idx, bool enable) +{ + struct bgx *bgx; + struct lmac *lmac; + u64 cfg; + + bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + if (!bgx) + return; + + lmac = &bgx->lmac[lmac_idx]; + if (lmac->is_sgmii) { + cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL); + if (enable) + cfg |= PCS_MRX_CTL_LOOPBACK1; + else + cfg &= ~PCS_MRX_CTL_LOOPBACK1; + bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg); + } else { + cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1); + if (enable) + cfg |= SPU_CTL_LOOPBACK; + else + cfg &= ~SPU_CTL_LOOPBACK; + bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg); + } +} +EXPORT_SYMBOL(bgx_lmac_internal_loopback); + static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid) { u64 cfg; @@ -835,18 +867,108 @@ static void bgx_get_qlm_mode(struct bgx *bgx) } } -static void bgx_init_of(struct bgx *bgx, struct device_node *np) +#ifdef CONFIG_ACPI + +static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst) +{ + u8 mac[ETH_ALEN]; + int ret; + + ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev), + "mac-address", mac, ETH_ALEN); + if (ret) + goto out; + + if (!is_valid_ether_addr(mac)) { + ret = -EINVAL; + goto out; + } + + memcpy(dst, mac, ETH_ALEN); +out: + return ret; +} + +/* Currently only sets the MAC address. */ +static acpi_status bgx_acpi_register_phy(acpi_handle handle, + u32 lvl, void *context, void **rv) +{ + struct bgx *bgx = context; + struct acpi_device *adev; + + if (acpi_bus_get_device(handle, &adev)) + goto out; + + acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac); + + SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev); + + bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; +out: + bgx->lmac_count++; + return AE_OK; +} + +static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl, + void *context, void **ret_val) { + struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; + struct bgx *bgx = context; + char bgx_sel[5]; + + snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id); + if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) { + pr_warn("Invalid link device\n"); + return AE_OK; + } + + if (strncmp(string.pointer, bgx_sel, 4)) + return AE_OK; + + acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, + bgx_acpi_register_phy, NULL, bgx, NULL); + + kfree(string.pointer); + return AE_CTRL_TERMINATE; +} + +static int bgx_init_acpi_phy(struct bgx *bgx) +{ + acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL); + return 0; +} + +#else + +static int bgx_init_acpi_phy(struct bgx *bgx) +{ + return -ENODEV; +} + +#endif /* CONFIG_ACPI */ + +#if IS_ENABLED(CONFIG_OF_MDIO) + +static int bgx_init_of_phy(struct bgx *bgx) +{ + struct device_node *np; struct device_node *np_child; u8 lmac = 0; + char bgx_sel[5]; + const char *mac; - for_each_child_of_node(np, np_child) { - struct device_node *phy_np; - const char *mac; + /* Get BGX node from DT */ + snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id); + np = of_find_node_by_name(NULL, bgx_sel); + if (!np) + return -ENODEV; - phy_np = of_parse_phandle(np_child, "phy-handle", 0); - if (phy_np) - bgx->lmac[lmac].phydev = of_phy_find_device(phy_np); + for_each_child_of_node(np, np_child) { + struct device_node *phy_np = of_parse_phandle(np_child, + "phy-handle", 0); + if (!phy_np) + continue; + bgx->lmac[lmac].phydev = of_phy_find_device(phy_np); mac = of_get_mac_address(np_child); if (mac) @@ -855,9 +977,29 @@ static void bgx_init_of(struct bgx *bgx, struct device_node *np) SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); bgx->lmac[lmac].lmacid = lmac; lmac++; - if (lmac == MAX_LMAC_PER_BGX) + if (lmac == MAX_LMAC_PER_BGX) { + of_node_put(np_child); break; + } } + return 0; +} + +#else + +static int bgx_init_of_phy(struct bgx *bgx) +{ + return -ENODEV; +} + +#endif /* CONFIG_OF_MDIO */ + +static int bgx_init_phy(struct bgx *bgx) +{ + if (!acpi_disabled) + return bgx_init_acpi_phy(bgx); + + return bgx_init_of_phy(bgx); } static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -865,8 +1007,6 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int err; struct device *dev = &pdev->dev; struct bgx *bgx = NULL; - struct device_node *np; - char bgx_sel[5]; u8 lmac; bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); @@ -902,10 +1042,9 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) bgx_vnic[bgx->bgx_id] = bgx; bgx_get_qlm_mode(bgx); - snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id); - np = of_find_node_by_name(NULL, bgx_sel); - if (np) - bgx_init_of(bgx, np); + err = bgx_init_phy(bgx); + if (err) + goto err_enable; bgx_init_hw(bgx); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index ba4f53b7c..07b7ec66c 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -72,6 +72,7 @@ #define BGX_SPUX_CONTROL1 0x10000 #define SPU_CTL_LOW_POWER BIT_ULL(11) +#define SPU_CTL_LOOPBACK BIT_ULL(14) #define SPU_CTL_RESET BIT_ULL(15) #define BGX_SPUX_STATUS1 0x10008 #define SPU_STATUS1_RCV_LNK BIT_ULL(2) @@ -126,6 +127,7 @@ #define PCS_MRX_CTL_RST_AN BIT_ULL(9) #define PCS_MRX_CTL_PWR_DN BIT_ULL(11) #define PCS_MRX_CTL_AN_EN BIT_ULL(12) +#define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14) #define PCS_MRX_CTL_RESET BIT_ULL(15) #define BGX_GMP_PCS_MRX_STATUS 0x30008 #define PCS_MRX_STATUS_AN_CPT BIT_ULL(5) @@ -186,6 +188,8 @@ int bgx_get_lmac_count(int node, int bgx); const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid); void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac); void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status); +void bgx_lmac_internal_loopback(int node, int bgx_idx, + int lmac_idx, bool enable); u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx); u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx); #define BGX_RX_STATS_COUNT 11 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 629f75d70..fa0c7b54e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -767,6 +767,11 @@ struct adapter { bool tid_release_task_busy; struct dentry *debugfs_root; + u32 use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */ + u32 trace_rss; /* 1 implies that different RSS flit per filter is + * used per filter else if 0 default RSS flit is + * used for all 4 filters. + */ spinlock_t stats_lock; spinlock_t win0_lock ____cacheline_aligned_in_smp; @@ -1284,6 +1289,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op); int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, const u8 *fw_data, unsigned int size, int force); unsigned int t4_flash_cfg_addr(struct adapter *adapter); +int t4_check_fw_version(struct adapter *adap); int t4_get_fw_version(struct adapter *adapter, u32 *vers); int t4_get_tp_version(struct adapter *adapter, u32 *vers); int t4_get_exprom_version(struct adapter *adapter, u32 *vers); @@ -1440,6 +1446,10 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox); int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); void t4_db_full(struct adapter *adapter); void t4_db_dropped(struct adapter *adapter); +int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp, + int filter_index, int enable); +void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp, + int filter_index, int *enabled); int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val); void t4_sge_decode_idma_state(struct adapter *adapter, int state); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 6074680bc..052c660ac 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -31,6 +31,15 @@ static const char * const dcb_ver_array[] = { "Auto Negotiated" }; +static inline bool cxgb4_dcb_state_synced(enum cxgb4_dcb_state state) +{ + if (state == CXGB4_DCB_STATE_FW_ALLSYNCED || + state == CXGB4_DCB_STATE_HOST) + return true; + else + return false; +} + /* Initialize a port's Data Center Bridging state. Typically used after a * Link Down event. */ @@ -603,7 +612,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg) struct port_info *pi = netdev2pinfo(dev); struct port_dcb_info *dcb = &pi->dcb; - if (dcb->state != CXGB4_DCB_STATE_FW_ALLSYNCED || + if (!cxgb4_dcb_state_synced(dcb->state) || priority >= CXGB4_MAX_PRIORITY) *pfccfg = 0; else @@ -620,7 +629,7 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg) struct adapter *adap = pi->adapter; int err; - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED || + if (!cxgb4_dcb_state_synced(pi->dcb.state) || priority >= CXGB4_MAX_PRIORITY) return; @@ -732,7 +741,7 @@ static u8 cxgb4_getpfcstate(struct net_device *dev) { struct port_info *pi = netdev2pinfo(dev); - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return false; return pi->dcb.pfcen != 0; @@ -756,7 +765,7 @@ static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id, struct adapter *adap = pi->adapter; int i; - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return 0; for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { @@ -794,7 +803,9 @@ static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id, */ static int cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id) { - return __cxgb4_getapp(dev, app_idtype, app_id, 0); + /* Convert app_idtype to firmware format before querying */ + return __cxgb4_getapp(dev, app_idtype == DCB_APP_IDTYPE_ETHTYPE ? + app_idtype : 3, app_id, 0); } /* Write a new Application User Priority Map for the specified Application ID @@ -808,7 +819,7 @@ static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, int i, err; - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return -EINVAL; /* DCB info gets thrown away on link up */ @@ -896,10 +907,11 @@ cxgb4_ieee_negotiation_complete(struct net_device *dev, struct port_info *pi = netdev2pinfo(dev); struct port_dcb_info *dcb = &pi->dcb; - if (dcb_subtype && !(dcb->msgs & dcb_subtype)) - return 0; + if (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED) + if (dcb_subtype && !(dcb->msgs & dcb_subtype)) + return 0; - return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED && + return (cxgb4_dcb_state_synced(dcb->state) && (dcb->supported & DCB_CAP_DCBX_VER_IEEE)); } @@ -1057,7 +1069,7 @@ static u8 cxgb4_setdcbx(struct net_device *dev, u8 dcb_request) /* Can't enable DCB if we haven't successfully negotiated it. */ - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return 1; /* There's currently no mechanism to allow for the firmware DCBX @@ -1080,7 +1092,7 @@ static int cxgb4_getpeer_app(struct net_device *dev, struct adapter *adap = pi->adapter; int i, err = 0; - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return 1; info->willing = 0; @@ -1114,7 +1126,7 @@ static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table) struct adapter *adap = pi->adapter; int i, err = 0; - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return 1; for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { @@ -1133,7 +1145,7 @@ static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table) if (!pcmd.u.dcb.app_priority.protocolid) break; - table[i].selector = pcmd.u.dcb.app_priority.sel_field; + table[i].selector = (pcmd.u.dcb.app_priority.sel_field + 1); table[i].protocol = be16_to_cpu(pcmd.u.dcb.app_priority.protocolid); table[i].priority = @@ -1181,6 +1193,8 @@ static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg) for (i = 0; i < CXGB4_MAX_PRIORITY; i++) pg->pg_bw[i] = pcmd.u.dcb.pgrate.pgrate[i]; + pg->tcs_supported = pcmd.u.dcb.pgrate.num_tcs_supported; + return 0; } @@ -1198,6 +1212,8 @@ static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc) */ pfc->pfc_en = bitswap_1(pi->dcb.pfcen); + pfc->tcs_supported = pi->dcb.pfc_num_tcs_supported; + return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index c3c7db418..0a87a3247 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -151,6 +151,45 @@ static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx) return 0; } +static int cim_la_show_t6(struct seq_file *seq, void *v, int idx) +{ + if (v == SEQ_START_TOKEN) { + seq_puts(seq, "Status Inst Data PC LS0Stat " + "LS0Addr LS0Data LS1Stat LS1Addr LS1Data\n"); + } else { + const u32 *p = v; + + seq_printf(seq, " %02x %04x%04x %04x%04x %04x%04x %08x %08x %08x %08x %08x %08x\n", + (p[9] >> 16) & 0xff, /* Status */ + p[9] & 0xffff, p[8] >> 16, /* Inst */ + p[8] & 0xffff, p[7] >> 16, /* Data */ + p[7] & 0xffff, p[6] >> 16, /* PC */ + p[2], p[1], p[0], /* LS0 Stat, Addr and Data */ + p[5], p[4], p[3]); /* LS1 Stat, Addr and Data */ + } + return 0; +} + +static int cim_la_show_pc_t6(struct seq_file *seq, void *v, int idx) +{ + if (v == SEQ_START_TOKEN) { + seq_puts(seq, "Status Inst Data PC\n"); + } else { + const u32 *p = v; + + seq_printf(seq, " %02x %08x %08x %08x\n", + p[3] & 0xff, p[2], p[1], p[0]); + seq_printf(seq, " %02x %02x%06x %02x%06x %02x%06x\n", + (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, + p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); + seq_printf(seq, " %02x %04x%04x %04x%04x %04x%04x\n", + (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, + p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, + p[6] >> 16); + } + return 0; +} + static int cim_la_open(struct inode *inode, struct file *file) { int ret; @@ -162,9 +201,18 @@ static int cim_la_open(struct inode *inode, struct file *file) if (ret) return ret; - p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1, - cfg & UPDBGLACAPTPCONLY_F ? - cim_la_show_3in1 : cim_la_show); + if (is_t6(adap->params.chip)) { + /* +1 to account for integer division of CIMLA_SIZE/10 */ + p = seq_open_tab(file, (adap->params.cim_la_size / 10) + 1, + 10 * sizeof(u32), 1, + cfg & UPDBGLACAPTPCONLY_F ? + cim_la_show_pc_t6 : cim_la_show_t6); + } else { + p = seq_open_tab(file, adap->params.cim_la_size / 8, + 8 * sizeof(u32), 1, + cfg & UPDBGLACAPTPCONLY_F ? cim_la_show_3in1 : + cim_la_show); + } if (!p) return -ENOMEM; @@ -298,11 +346,11 @@ static int cim_qcfg_show(struct seq_file *seq, void *v) if (is_t4(adap->params.chip)) { i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A, ARRAY_SIZE(obq_wr_t4), obq_wr_t4); - wr = obq_wr_t4; + wr = obq_wr_t4; } else { i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A, ARRAY_SIZE(obq_wr_t5), obq_wr_t5); - wr = obq_wr_t5; + wr = obq_wr_t5; } } if (i) @@ -1153,6 +1201,299 @@ static const struct file_operations mbox_debugfs_fops = { .write = mbox_write }; +static int mps_trc_show(struct seq_file *seq, void *v) +{ + int enabled, i; + struct trace_params tp; + unsigned int trcidx = (uintptr_t)seq->private & 3; + struct adapter *adap = seq->private - trcidx; + + t4_get_trace_filter(adap, &tp, trcidx, &enabled); + if (!enabled) { + seq_puts(seq, "tracer is disabled\n"); + return 0; + } + + if (tp.skip_ofst * 8 >= TRACE_LEN) { + dev_err(adap->pdev_dev, "illegal trace pattern skip offset\n"); + return -EINVAL; + } + if (tp.port < 8) { + i = adap->chan_map[tp.port & 3]; + if (i >= MAX_NPORTS) { + dev_err(adap->pdev_dev, "tracer %u is assigned " + "to non-existing port\n", trcidx); + return -EINVAL; + } + seq_printf(seq, "tracer is capturing %s %s, ", + adap->port[i]->name, tp.port < 4 ? "Rx" : "Tx"); + } else + seq_printf(seq, "tracer is capturing loopback %d, ", + tp.port - 8); + seq_printf(seq, "snap length: %u, min length: %u\n", tp.snap_len, + tp.min_len); + seq_printf(seq, "packets captured %smatch filter\n", + tp.invert ? "do not " : ""); + + if (tp.skip_ofst) { + seq_puts(seq, "filter pattern: "); + for (i = 0; i < tp.skip_ofst * 2; i += 2) + seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]); + seq_putc(seq, '/'); + for (i = 0; i < tp.skip_ofst * 2; i += 2) + seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]); + seq_puts(seq, "@0\n"); + } + + seq_puts(seq, "filter pattern: "); + for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2) + seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]); + seq_putc(seq, '/'); + for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2) + seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]); + seq_printf(seq, "@%u\n", (tp.skip_ofst + tp.skip_len) * 8); + return 0; +} + +static int mps_trc_open(struct inode *inode, struct file *file) +{ + return single_open(file, mps_trc_show, inode->i_private); +} + +static unsigned int xdigit2int(unsigned char c) +{ + return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10; +} + +#define TRC_PORT_NONE 0xff +#define TRC_RSS_ENABLE 0x33 +#define TRC_RSS_DISABLE 0x13 + +/* Set an MPS trace filter. Syntax is: + * + * disable + * + * to disable tracing, or + * + * interface qid= [snaplen=] [minlen=] [not] []... + * + * where interface is one of rxN, txN, or loopbackN, N = 0..3, qid can be one + * of the NIC's response qid obtained from sge_qinfo and pattern has the form + * + * [/][@] + * + * Up to 2 filter patterns can be specified. If 2 are supplied the first one + * must be anchored at 0. An omited mask is taken as a mask of 1s, an omitted + * anchor is taken as 0. + */ +static ssize_t mps_trc_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + int i, enable, ret; + u32 *data, *mask; + struct trace_params tp; + const struct inode *ino; + unsigned int trcidx; + char *s, *p, *word, *end; + struct adapter *adap; + u32 j; + + ino = file_inode(file); + trcidx = (uintptr_t)ino->i_private & 3; + adap = ino->i_private - trcidx; + + /* Don't accept input more than 1K, can't be anything valid except lots + * of whitespace. Well, use less. + */ + if (count > 1024) + return -EFBIG; + p = s = kzalloc(count + 1, GFP_USER); + if (!s) + return -ENOMEM; + if (copy_from_user(s, buf, count)) { + count = -EFAULT; + goto out; + } + + if (s[count - 1] == '\n') + s[count - 1] = '\0'; + + enable = strcmp("disable", s) != 0; + if (!enable) + goto apply; + + /* enable or disable trace multi rss filter */ + if (adap->trace_rss) + t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_ENABLE); + else + t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_DISABLE); + + memset(&tp, 0, sizeof(tp)); + tp.port = TRC_PORT_NONE; + i = 0; /* counts pattern nibbles */ + + while (p) { + while (isspace(*p)) + p++; + word = strsep(&p, " "); + if (!*word) + break; + + if (!strncmp(word, "qid=", 4)) { + end = (char *)word + 4; + ret = kstrtouint(end, 10, &j); + if (ret) + goto out; + if (!adap->trace_rss) { + t4_write_reg(adap, MPS_T5_TRC_RSS_CONTROL_A, j); + continue; + } + + switch (trcidx) { + case 0: + t4_write_reg(adap, MPS_TRC_RSS_CONTROL_A, j); + break; + case 1: + t4_write_reg(adap, + MPS_TRC_FILTER1_RSS_CONTROL_A, j); + break; + case 2: + t4_write_reg(adap, + MPS_TRC_FILTER2_RSS_CONTROL_A, j); + break; + case 3: + t4_write_reg(adap, + MPS_TRC_FILTER3_RSS_CONTROL_A, j); + break; + } + continue; + } + if (!strncmp(word, "snaplen=", 8)) { + end = (char *)word + 8; + ret = kstrtouint(end, 10, &j); + if (ret || j > 9600) { +inval: count = -EINVAL; + goto out; + } + tp.snap_len = j; + continue; + } + if (!strncmp(word, "minlen=", 7)) { + end = (char *)word + 7; + ret = kstrtouint(end, 10, &j); + if (ret || j > TFMINPKTSIZE_M) + goto inval; + tp.min_len = j; + continue; + } + if (!strcmp(word, "not")) { + tp.invert = !tp.invert; + continue; + } + if (!strncmp(word, "loopback", 8) && tp.port == TRC_PORT_NONE) { + if (word[8] < '0' || word[8] > '3' || word[9]) + goto inval; + tp.port = word[8] - '0' + 8; + continue; + } + if (!strncmp(word, "tx", 2) && tp.port == TRC_PORT_NONE) { + if (word[2] < '0' || word[2] > '3' || word[3]) + goto inval; + tp.port = word[2] - '0' + 4; + if (adap->chan_map[tp.port & 3] >= MAX_NPORTS) + goto inval; + continue; + } + if (!strncmp(word, "rx", 2) && tp.port == TRC_PORT_NONE) { + if (word[2] < '0' || word[2] > '3' || word[3]) + goto inval; + tp.port = word[2] - '0'; + if (adap->chan_map[tp.port] >= MAX_NPORTS) + goto inval; + continue; + } + if (!isxdigit(*word)) + goto inval; + + /* we have found a trace pattern */ + if (i) { /* split pattern */ + if (tp.skip_len) /* too many splits */ + goto inval; + tp.skip_ofst = i / 16; + } + + data = &tp.data[i / 8]; + mask = &tp.mask[i / 8]; + j = i; + + while (isxdigit(*word)) { + if (i >= TRACE_LEN * 2) { + count = -EFBIG; + goto out; + } + *data = (*data << 4) + xdigit2int(*word++); + if (++i % 8 == 0) + data++; + } + if (*word == '/') { + word++; + while (isxdigit(*word)) { + if (j >= i) /* mask longer than data */ + goto inval; + *mask = (*mask << 4) + xdigit2int(*word++); + if (++j % 8 == 0) + mask++; + } + if (i != j) /* mask shorter than data */ + goto inval; + } else { /* no mask, use all 1s */ + for ( ; i - j >= 8; j += 8) + *mask++ = 0xffffffff; + if (i % 8) + *mask = (1 << (i % 8) * 4) - 1; + } + if (*word == '@') { + end = (char *)word + 1; + ret = kstrtouint(end, 10, &j); + if (*end && *end != '\n') + goto inval; + if (j & 7) /* doesn't start at multiple of 8 */ + goto inval; + j /= 8; + if (j < tp.skip_ofst) /* overlaps earlier pattern */ + goto inval; + if (j - tp.skip_ofst > 31) /* skip too big */ + goto inval; + tp.skip_len = j - tp.skip_ofst; + } + if (i % 8) { + *data <<= (8 - i % 8) * 4; + *mask <<= (8 - i % 8) * 4; + i = (i + 15) & ~15; /* 8-byte align */ + } + } + + if (tp.port == TRC_PORT_NONE) + goto inval; + +apply: + i = t4_set_trace_filter(adap, &tp, trcidx, enable); + if (i) + count = i; +out: + kfree(s); + return count; +} + +static const struct file_operations mps_trc_debugfs_fops = { + .owner = THIS_MODULE, + .open = mps_trc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = mps_trc_write +}; + static ssize_t flash_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { @@ -1895,13 +2236,13 @@ static int sge_qinfo_show(struct seq_file *seq, void *v) { struct adapter *adap = seq->private; int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); - int toe_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4); + int iscsi_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4); int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4); int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4); int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); int i, r = (uintptr_t)v - 1; - int toe_idx = r - eth_entries; - int rdma_idx = toe_idx - toe_entries; + int iscsi_idx = r - eth_entries; + int rdma_idx = iscsi_idx - iscsi_entries; int ciq_idx = rdma_idx - rdma_entries; int ctrl_idx = ciq_idx - ciq_entries; int fq_idx = ctrl_idx - ctrl_entries; @@ -1917,8 +2258,12 @@ do { \ seq_putc(seq, '\n'); \ } while (0) #define S(s, v) S3("s", s, v) +#define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v) #define T(s, v) S3("u", s, tx[i].v) +#define TL(s, v) T3("lu", s, v) +#define R3(fmt_spec, s, v) S3(fmt_spec, s, rx[i].v) #define R(s, v) S3("u", s, rx[i].v) +#define RL(s, v) R3("lu", s, v) if (r < eth_entries) { int base_qset = r * 4; @@ -1957,12 +2302,30 @@ do { \ R("FL avail:", fl.avail); R("FL PIDX:", fl.pidx); R("FL CIDX:", fl.cidx); - } else if (toe_idx < toe_entries) { - const struct sge_ofld_rxq *rx = &adap->sge.ofldrxq[toe_idx * 4]; - const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[toe_idx * 4]; - int n = min(4, adap->sge.ofldqsets - 4 * toe_idx); + RL("RxPackets:", stats.pkts); + RL("RxCSO:", stats.rx_cso); + RL("VLANxtract:", stats.vlan_ex); + RL("LROmerged:", stats.lro_merged); + RL("LROpackets:", stats.lro_pkts); + RL("RxDrops:", stats.rx_drops); + TL("TSO:", tso); + TL("TxCSO:", tx_cso); + TL("VLANins:", vlan_ins); + TL("TxQFull:", q.stops); + TL("TxQRestarts:", q.restarts); + TL("TxMapErr:", mapping_err); + RL("FLAllocErr:", fl.alloc_failed); + RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLStarving:", fl.starving); + + } else if (iscsi_idx < iscsi_entries) { + const struct sge_ofld_rxq *rx = + &adap->sge.ofldrxq[iscsi_idx * 4]; + const struct sge_ofld_txq *tx = + &adap->sge.ofldtxq[iscsi_idx * 4]; + int n = min(4, adap->sge.ofldqsets - 4 * iscsi_idx); - S("QType:", "TOE"); + S("QType:", "iSCSI"); T("TxQ ID:", q.cntxt_id); T("TxQ size:", q.size); T("TxQ inuse:", q.in_use); @@ -1982,6 +2345,13 @@ do { \ R("FL avail:", fl.avail); R("FL PIDX:", fl.pidx); R("FL CIDX:", fl.cidx); + RL("RxPackets:", stats.pkts); + RL("RxImmPkts:", stats.imm); + RL("RxNoMem:", stats.nomem); + RL("FLAllocErr:", fl.alloc_failed); + RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLStarving:", fl.starving); + } else if (rdma_idx < rdma_entries) { const struct sge_ofld_rxq *rx = &adap->sge.rdmarxq[rdma_idx * 4]; @@ -2004,6 +2374,13 @@ do { \ R("FL avail:", fl.avail); R("FL PIDX:", fl.pidx); R("FL CIDX:", fl.cidx); + RL("RxPackets:", stats.pkts); + RL("RxImmPkts:", stats.imm); + RL("RxNoMem:", stats.nomem); + RL("FLAllocErr:", fl.alloc_failed); + RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLStarving:", fl.starving); + } else if (ciq_idx < ciq_entries) { const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4]; int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx); @@ -2019,6 +2396,9 @@ do { \ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); S3("u", "Intr pktcnt:", adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); + RL("RxAN:", stats.an); + RL("RxNoMem:", stats.nomem); + } else if (ctrl_idx < ctrl_entries) { const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4]; int n = min(4, adap->params.nports - 4 * ctrl_idx); @@ -2029,6 +2409,8 @@ do { \ T("TxQ inuse:", q.in_use); T("TxQ CIDX:", q.cidx); T("TxQ PIDX:", q.pidx); + TL("TxQFull:", q.stops); + TL("TxQRestarts:", q.restarts); } else if (fq_idx == 0) { const struct sge_rspq *evtq = &adap->sge.fw_evtq; @@ -2044,10 +2426,14 @@ do { \ adap->sge.counter_val[evtq->pktcnt_idx]); } #undef R +#undef RL #undef T +#undef TL #undef S +#undef R3 +#undef T3 #undef S3 -return 0; + return 0; } static int sge_queue_entries(const struct adapter *adap) @@ -2164,6 +2550,73 @@ static const struct file_operations mem_debugfs_fops = { .llseek = default_llseek, }; +static int tid_info_show(struct seq_file *seq, void *v) +{ + struct adapter *adap = seq->private; + const struct tid_info *t = &adap->tids; + enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); + + if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) { + unsigned int sb; + + if (chip <= CHELSIO_T5) + sb = t4_read_reg(adap, LE_DB_SERVER_INDEX_A) / 4; + else + sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A); + + if (sb) { + seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1, + adap->tids.hash_base, + t->ntids - 1); + seq_printf(seq, ", in use: %u/%u\n", + atomic_read(&t->tids_in_use), + atomic_read(&t->hash_tids_in_use)); + } else if (adap->flags & FW_OFLD_CONN) { + seq_printf(seq, "TID range: %u..%u/%u..%u", + t->aftid_base, + t->aftid_end, + adap->tids.hash_base, + t->ntids - 1); + seq_printf(seq, ", in use: %u/%u\n", + atomic_read(&t->tids_in_use), + atomic_read(&t->hash_tids_in_use)); + } else { + seq_printf(seq, "TID range: %u..%u", + adap->tids.hash_base, + t->ntids - 1); + seq_printf(seq, ", in use: %u\n", + atomic_read(&t->hash_tids_in_use)); + } + } else if (t->ntids) { + seq_printf(seq, "TID range: 0..%u", t->ntids - 1); + seq_printf(seq, ", in use: %u\n", + atomic_read(&t->tids_in_use)); + } + + if (t->nstids) + seq_printf(seq, "STID range: %u..%u, in use: %u\n", + (!t->stid_base && + (chip <= CHELSIO_T5)) ? + t->stid_base + 1 : t->stid_base, + t->stid_base + t->nstids - 1, t->stids_in_use); + if (t->natids) + seq_printf(seq, "ATID range: 0..%u, in use: %u\n", + t->natids - 1, t->atids_in_use); + seq_printf(seq, "FTID range: %u..%u\n", t->ftid_base, + t->ftid_base + t->nftids - 1); + if (t->nsftids) + seq_printf(seq, "SFTID range: %u..%u in use: %u\n", + t->sftid_base, t->sftid_base + t->nsftids - 2, + t->sftids_in_use); + if (t->ntids) + seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n", + t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A), + t4_read_reg(adap, LE_DB_ACT_CNT_IPV6_A)); + return 0; +} + +DEFINE_SIMPLE_DEBUGFS_FILE(tid_info); + static void add_debugfs_mem(struct adapter *adap, const char *name, unsigned int idx, unsigned int size_mb) { @@ -2227,6 +2680,290 @@ static const struct file_operations blocked_fl_fops = { .llseek = generic_file_llseek, }; +struct mem_desc { + unsigned int base; + unsigned int limit; + unsigned int idx; +}; + +static int mem_desc_cmp(const void *a, const void *b) +{ + return ((const struct mem_desc *)a)->base - + ((const struct mem_desc *)b)->base; +} + +static void mem_region_show(struct seq_file *seq, const char *name, + unsigned int from, unsigned int to) +{ + char buf[40]; + + string_get_size((u64)to - from + 1, 1, STRING_UNITS_2, buf, + sizeof(buf)); + seq_printf(seq, "%-15s %#x-%#x [%s]\n", name, from, to, buf); +} + +static int meminfo_show(struct seq_file *seq, void *v) +{ + static const char * const memory[] = { "EDC0:", "EDC1:", "MC:", + "MC0:", "MC1:"}; + static const char * const region[] = { + "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", + "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", + "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", + "TDDP region:", "TPT region:", "STAG region:", "RQ region:", + "RQUDP region:", "PBL region:", "TXPBL region:", + "DBVFIFO region:", "ULPRX state:", "ULPTX state:", + "On-chip queues:" + }; + + int i, n; + u32 lo, hi, used, alloc; + struct mem_desc avail[4]; + struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */ + struct mem_desc *md = mem; + struct adapter *adap = seq->private; + + for (i = 0; i < ARRAY_SIZE(mem); i++) { + mem[i].limit = 0; + mem[i].idx = i; + } + + /* Find and sort the populated memory ranges */ + i = 0; + lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (lo & EDRAM0_ENABLE_F) { + hi = t4_read_reg(adap, MA_EDRAM0_BAR_A); + avail[i].base = EDRAM0_BASE_G(hi) << 20; + avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20); + avail[i].idx = 0; + i++; + } + if (lo & EDRAM1_ENABLE_F) { + hi = t4_read_reg(adap, MA_EDRAM1_BAR_A); + avail[i].base = EDRAM1_BASE_G(hi) << 20; + avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20); + avail[i].idx = 1; + i++; + } + + if (is_t5(adap->params.chip)) { + if (lo & EXT_MEM0_ENABLE_F) { + hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); + avail[i].base = EXT_MEM0_BASE_G(hi) << 20; + avail[i].limit = + avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20); + avail[i].idx = 3; + i++; + } + if (lo & EXT_MEM1_ENABLE_F) { + hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); + avail[i].base = EXT_MEM1_BASE_G(hi) << 20; + avail[i].limit = + avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20); + avail[i].idx = 4; + i++; + } + } else { + if (lo & EXT_MEM_ENABLE_F) { + hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); + avail[i].base = EXT_MEM_BASE_G(hi) << 20; + avail[i].limit = + avail[i].base + (EXT_MEM_SIZE_G(hi) << 20); + avail[i].idx = 2; + i++; + } + } + if (!i) /* no memory available */ + return 0; + sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL); + + (md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A); + (md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A); + (md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A); + (md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A); + (md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A); + (md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A); + (md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A); + (md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A); + (md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A); + + /* the next few have explicit upper bounds */ + md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A); + md->limit = md->base - 1 + + t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) * + PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A)); + md++; + + md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A); + md->limit = md->base - 1 + + t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) * + PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A)); + md++; + + if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) { + if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) { + hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4; + md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A); + } else { + hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A); + md->base = t4_read_reg(adap, + LE_DB_HASH_TBL_BASE_ADDR_A); + } + md->limit = 0; + } else { + md->base = 0; + md->idx = ARRAY_SIZE(region); /* hide it */ + } + md++; + +#define ulp_region(reg) do { \ + md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\ + (md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \ +} while (0) + + ulp_region(RX_ISCSI); + ulp_region(RX_TDDP); + ulp_region(TX_TPT); + ulp_region(RX_STAG); + ulp_region(RX_RQ); + ulp_region(RX_RQUDP); + ulp_region(RX_PBL); + ulp_region(TX_PBL); +#undef ulp_region + md->base = 0; + md->idx = ARRAY_SIZE(region); + if (!is_t4(adap->params.chip)) { + u32 size = 0; + u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A); + u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A); + + if (is_t5(adap->params.chip)) { + if (sge_ctrl & VFIFO_ENABLE_F) + size = DBVFIFO_SIZE_G(fifo_size); + } else { + size = T6_DBVFIFO_SIZE_G(fifo_size); + } + + if (size) { + md->base = BASEADDR_G(t4_read_reg(adap, + SGE_DBVFIFO_BADDR_A)); + md->limit = md->base + (size << 2) - 1; + } + } + + md++; + + md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A); + md->limit = 0; + md++; + md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A); + md->limit = 0; + md++; + + md->base = adap->vres.ocq.start; + if (adap->vres.ocq.size) + md->limit = md->base + adap->vres.ocq.size - 1; + else + md->idx = ARRAY_SIZE(region); /* hide it */ + md++; + + /* add any address-space holes, there can be up to 3 */ + for (n = 0; n < i - 1; n++) + if (avail[n].limit < avail[n + 1].base) + (md++)->base = avail[n].limit; + if (avail[n].limit) + (md++)->base = avail[n].limit; + + n = md - mem; + sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL); + + for (lo = 0; lo < i; lo++) + mem_region_show(seq, memory[avail[lo].idx], avail[lo].base, + avail[lo].limit - 1); + + seq_putc(seq, '\n'); + for (i = 0; i < n; i++) { + if (mem[i].idx >= ARRAY_SIZE(region)) + continue; /* skip holes */ + if (!mem[i].limit) + mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; + mem_region_show(seq, region[mem[i].idx], mem[i].base, + mem[i].limit); + } + + seq_putc(seq, '\n'); + lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A); + hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1; + mem_region_show(seq, "uP RAM:", lo, hi); + + lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A); + hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1; + mem_region_show(seq, "uP Extmem2:", lo, hi); + + lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A); + seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n", + PMRXMAXPAGE_G(lo), + t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10, + (lo & PMRXNUMCHN_F) ? 2 : 1); + + lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A); + hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A); + seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n", + PMTXMAXPAGE_G(lo), + hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), + hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo)); + seq_printf(seq, "%u p-structs\n\n", + t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A)); + + for (i = 0; i < 4; i++) { + if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) + lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4); + else + lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4); + if (is_t5(adap->params.chip)) { + used = T5_USED_G(lo); + alloc = T5_ALLOC_G(lo); + } else { + used = USED_G(lo); + alloc = ALLOC_G(lo); + } + /* For T6 these are MAC buffer groups */ + seq_printf(seq, "Port %d using %u pages out of %u allocated\n", + i, used, alloc); + } + for (i = 0; i < adap->params.arch.nchan; i++) { + if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) + lo = t4_read_reg(adap, + MPS_RX_LPBK_BG_PG_CNT0_A + i * 4); + else + lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4); + if (is_t5(adap->params.chip)) { + used = T5_USED_G(lo); + alloc = T5_ALLOC_G(lo); + } else { + used = USED_G(lo); + alloc = ALLOC_G(lo); + } + /* For T6 these are MAC buffer groups */ + seq_printf(seq, + "Loopback %d using %u pages out of %u allocated\n", + i, used, alloc); + } + return 0; +} + +static int meminfo_open(struct inode *inode, struct file *file) +{ + return single_open(file, meminfo_show, inode->i_private); +} + +static const struct file_operations meminfo_fops = { + .owner = THIS_MODULE, + .open = meminfo_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; /* Add an array of Debug FS files. */ void add_debugfs_files(struct adapter *adap, @@ -2264,6 +3001,10 @@ int t4_setup_debugfs(struct adapter *adap) { "mbox5", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 5 }, { "mbox6", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 6 }, { "mbox7", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 7 }, + { "trace0", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 0 }, + { "trace1", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 1 }, + { "trace2", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 2 }, + { "trace3", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 3 }, { "l2t", &t4_l2t_fops, S_IRUSR, 0}, { "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 }, { "rss", &rss_debugfs_fops, S_IRUSR, 0 }, @@ -2293,7 +3034,9 @@ int t4_setup_debugfs(struct adapter *adap) #if IS_ENABLED(CONFIG_IPV6) { "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 }, #endif + { "tids", &tid_info_debugfs_fops, S_IRUSR, 0}, { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 }, + { "meminfo", &meminfo_fops, S_IRUSR, 0 }, }; /* Debug FS nodes common to all T5 and later adapters. @@ -2341,6 +3084,10 @@ int t4_setup_debugfs(struct adapter *adap) de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, &flash_debugfs_fops, adap->params.sf_size); + debugfs_create_bool("use_backdoor", S_IWUSR | S_IRUSR, + adap->debugfs_root, &adap->use_bd); + debugfs_create_bool("trace_rss", S_IWUSR | S_IRUSR, + adap->debugfs_root, &adap->trace_rss); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 687acf71f..5eedb98ff 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -925,6 +925,20 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) const struct firmware *fw; struct adapter *adap = netdev2adap(netdev); unsigned int mbox = PCIE_FW_MASTER_M + 1; + u32 pcie_fw; + unsigned int master; + u8 master_vld = 0; + + pcie_fw = t4_read_reg(adap, PCIE_FW_A); + master = PCIE_FW_MASTER_G(pcie_fw); + if (pcie_fw & PCIE_FW_MASTER_VLD_F) + master_vld = 1; + /* if csiostor is the master return */ + if (master_vld && (master != adap->pf)) { + dev_warn(adap->pdev_dev, + "cxgb4 driver needs to be loaded as MASTER to support FW flash\n"); + return -EOPNOTSUPP; + } ef->data[sizeof(ef->data) - 1] = '\0'; ret = request_firmware(&fw, ef->data, adap->pdev_dev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index a65eca515..ccd46828f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -1547,7 +1547,7 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data) t->stid_tab[stid].data = data; stid -= t->nstids; stid += t->sftid_base; - t->stids_in_use++; + t->sftids_in_use++; } spin_unlock_bh(&t->stid_lock); return stid; @@ -1572,10 +1572,14 @@ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) else bitmap_release_region(t->stid_bmap, stid, 2); t->stid_tab[stid].data = NULL; - if (family == PF_INET) - t->stids_in_use--; - else - t->stids_in_use -= 4; + if (stid < t->nstids) { + if (family == PF_INET) + t->stids_in_use--; + else + t->stids_in_use -= 4; + } else { + t->sftids_in_use--; + } spin_unlock_bh(&t->stid_lock); } EXPORT_SYMBOL(cxgb4_free_stid); @@ -1653,20 +1657,25 @@ static void process_tid_release_list(struct work_struct *work) */ void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) { - void *old; struct sk_buff *skb; struct adapter *adap = container_of(t, struct adapter, tids); - old = t->tid_tab[tid]; + WARN_ON(tid >= t->ntids); + + if (t->tid_tab[tid]) { + t->tid_tab[tid] = NULL; + if (t->hash_base && (tid >= t->hash_base)) + atomic_dec(&t->hash_tids_in_use); + else + atomic_dec(&t->tids_in_use); + } + skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); if (likely(skb)) { - t->tid_tab[tid] = NULL; mk_tid_release(skb, chan, tid); t4_ofld_send(adap, skb); } else cxgb4_queue_tid_release(t, chan, tid); - if (old) - atomic_dec(&t->tids_in_use); } EXPORT_SYMBOL(cxgb4_remove_tid); @@ -1701,9 +1710,11 @@ static int tid_init(struct tid_info *t) spin_lock_init(&t->atid_lock); t->stids_in_use = 0; + t->sftids_in_use = 0; t->afree = NULL; t->atids_in_use = 0; atomic_set(&t->tids_in_use, 0); + atomic_set(&t->hash_tids_in_use, 0); /* Setup the free list for atid_tab and clear the stid bitmap. */ if (natids) { @@ -3656,6 +3667,10 @@ static int adap_init0(struct adapter *adap) */ t4_get_fw_version(adap, &adap->params.fw_vers); t4_get_tp_version(adap, &adap->params.tp_vers); + ret = t4_check_fw_version(adap); + /* If firmware is too old (not supported by driver) force an update. */ + if (ret == -EFAULT) + state = DEV_STATE_UNINIT; if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { struct fw_info *fw_info; struct fw_hdr *card_fw; @@ -4550,6 +4565,27 @@ static void free_some_resources(struct adapter *adapter) NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) #define SEGMENT_SIZE 128 +static int get_chip_type(struct pci_dev *pdev, u32 pl_rev) +{ + u16 device_id; + + /* Retrieve adapter's device ID */ + pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); + + switch (device_id >> 12) { + case CHELSIO_T4: + return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); + case CHELSIO_T5: + return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); + case CHELSIO_T6: + return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev); + default: + dev_err(&pdev->dev, "Device %d is not supported\n", + device_id); + } + return -EINVAL; +} + static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int func, i, err, s_qpp, qpp, num_seg; @@ -4557,6 +4593,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bool highdma = false; struct adapter *adapter = NULL; void __iomem *regs; + u32 whoami, pl_rev; + enum chip_type chip; printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); @@ -4585,7 +4623,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_unmap_bar0; /* We control everything through one PF */ - func = SOURCEPF_G(readl(regs + PL_WHOAMI_A)); + whoami = readl(regs + PL_WHOAMI_A); + pl_rev = REV_G(readl(regs + PL_REV_A)); + chip = get_chip_type(pdev, pl_rev); + func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ? + SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); if (func != ent->driver_data) { iounmap(regs); pci_disable_device(pdev); @@ -4676,8 +4718,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENOMEM; goto out_free_adapter; } - t4_write_reg(adapter, SGE_STAT_CFG_A, - STATSOURCE_T5_V(7) | STATMODE_V(0)); } setup_memwin(adapter); @@ -4689,6 +4729,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto out_unmap_bar; + /* configure SGE_STAT_CFG_A to read WC stats */ + if (!is_t4(adapter->params.chip)) + t4_write_reg(adapter, SGE_STAT_CFG_A, + STATSOURCE_T5_V(7) | STATMODE_V(0)); + for_each_port(adapter, i) { struct net_device *netdev; @@ -4756,7 +4801,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) */ cfg_queues(adapter); - adapter->l2t = t4_init_l2t(); + adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end); if (!adapter->l2t) { /* We tolerate a lack of L2T, giving up some functionality */ dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); @@ -4781,6 +4826,22 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->params.offload = 0; } + if (is_offload(adapter)) { + if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) { + u32 hash_base, hash_reg; + + if (chip <= CHELSIO_T5) { + hash_reg = LE_DB_TID_HASHBASE_A; + hash_base = t4_read_reg(adapter, hash_reg); + adapter->tids.hash_base = hash_base / 4; + } else { + hash_reg = T6_LE_DB_HASH_TID_BASE_A; + hash_base = t4_read_reg(adapter, hash_reg); + adapter->tids.hash_base = hash_base; + } + } + } + /* See what interrupts we'll be using */ if (msi > 1 && enable_msix(adapter) == 0) adapter->flags |= USING_MSIX; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index b27897d4f..c3a8be554 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -96,6 +96,7 @@ struct tid_info { unsigned long *stid_bmap; unsigned int nstids; unsigned int stid_base; + unsigned int hash_base; union aopen_entry *atid_tab; unsigned int natids; @@ -116,8 +117,12 @@ struct tid_info { spinlock_t stid_lock; unsigned int stids_in_use; + unsigned int sftids_in_use; + /* TIDs in the TCAM */ atomic_t tids_in_use; + /* TIDs in the HASH */ + atomic_t hash_tids_in_use; }; static inline void *lookup_tid(const struct tid_info *t, unsigned int tid) @@ -147,7 +152,10 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data, unsigned int tid) { t->tid_tab[tid] = data; - atomic_inc(&t->tids_in_use); + if (t->hash_base && (tid >= t->hash_base)) + atomic_inc(&t->hash_tids_in_use); + else + atomic_inc(&t->tids_in_use); } int cxgb4_alloc_atid(struct tid_info *t, void *data); diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 252efc293..ac27898c6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -51,24 +51,17 @@ #define VLAN_NONE 0xfff /* identifies sync vs async L2T_WRITE_REQs */ -#define F_SYNC_WR (1 << 12) - -enum { - L2T_STATE_VALID, /* entry is up to date */ - L2T_STATE_STALE, /* entry may be used but needs revalidation */ - L2T_STATE_RESOLVING, /* entry needs address resolution */ - L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */ - - /* when state is one of the below the entry is not hashed */ - L2T_STATE_SWITCHING, /* entry is being used by a switching filter */ - L2T_STATE_UNUSED /* entry not in use */ -}; +#define SYNC_WR_S 12 +#define SYNC_WR_V(x) ((x) << SYNC_WR_S) +#define SYNC_WR_F SYNC_WR_V(1) struct l2t_data { + unsigned int l2t_start; /* start index of our piece of the L2T */ + unsigned int l2t_size; /* number of entries in l2tab */ rwlock_t lock; atomic_t nfree; /* number of free entries */ struct l2t_entry *rover; /* starting point for next allocation */ - struct l2t_entry l2tab[L2T_SIZE]; + struct l2t_entry l2tab[0]; /* MUST BE LAST */ }; static inline unsigned int vlan_prio(const struct l2t_entry *e) @@ -85,29 +78,36 @@ static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) /* * To avoid having to check address families we do not allow v4 and v6 * neighbors to be on the same hash chain. We keep v4 entries in the first - * half of available hash buckets and v6 in the second. + * half of available hash buckets and v6 in the second. We need at least two + * entries in our L2T for this scheme to work. */ enum { - L2T_SZ_HALF = L2T_SIZE / 2, - L2T_HASH_MASK = L2T_SZ_HALF - 1 + L2T_MIN_HASH_BUCKETS = 2, }; -static inline unsigned int arp_hash(const u32 *key, int ifindex) +static inline unsigned int arp_hash(struct l2t_data *d, const u32 *key, + int ifindex) { - return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK; + unsigned int l2t_size_half = d->l2t_size / 2; + + return jhash_2words(*key, ifindex, 0) % l2t_size_half; } -static inline unsigned int ipv6_hash(const u32 *key, int ifindex) +static inline unsigned int ipv6_hash(struct l2t_data *d, const u32 *key, + int ifindex) { + unsigned int l2t_size_half = d->l2t_size / 2; u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3]; - return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK); + return (l2t_size_half + + (jhash_2words(xor, ifindex, 0) % l2t_size_half)); } -static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex) +static unsigned int addr_hash(struct l2t_data *d, const u32 *addr, + int addr_len, int ifindex) { - return addr_len == 4 ? arp_hash(addr, ifindex) : - ipv6_hash(addr, ifindex); + return addr_len == 4 ? arp_hash(d, addr, ifindex) : + ipv6_hash(d, addr, ifindex); } /* @@ -139,6 +139,8 @@ static void neigh_replace(struct l2t_entry *e, struct neighbour *n) */ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) { + struct l2t_data *d = adap->l2t; + unsigned int l2t_idx = e->idx + d->l2t_start; struct sk_buff *skb; struct cpl_l2t_write_req *req; @@ -150,10 +152,10 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) INIT_TP_WR(req, 0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, - e->idx | (sync ? F_SYNC_WR : 0) | + l2t_idx | (sync ? SYNC_WR_F : 0) | TID_QID_V(adap->sge.fw_evtq.abs_id))); req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync)); - req->l2t_idx = htons(e->idx); + req->l2t_idx = htons(l2t_idx); req->vlan = htons(e->vlan); if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK)) memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); @@ -190,18 +192,19 @@ static void send_pending(struct adapter *adap, struct l2t_entry *e) */ void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl) { + struct l2t_data *d = adap->l2t; unsigned int tid = GET_TID(rpl); - unsigned int idx = tid & (L2T_SIZE - 1); + unsigned int l2t_idx = tid % L2T_SIZE; if (unlikely(rpl->status != CPL_ERR_NONE)) { dev_err(adap->pdev_dev, "Unexpected L2T_WRITE_RPL status %u for entry %u\n", - rpl->status, idx); + rpl->status, l2t_idx); return; } - if (tid & F_SYNC_WR) { - struct l2t_entry *e = &adap->l2t->l2tab[idx]; + if (tid & SYNC_WR_F) { + struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start]; spin_lock(&e->lock); if (e->state != L2T_STATE_SWITCHING) { @@ -276,7 +279,7 @@ static struct l2t_entry *alloc_l2e(struct l2t_data *d) return NULL; /* there's definitely a free entry */ - for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e) + for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e) if (atomic_read(&e->refcnt) == 0) goto found; @@ -368,7 +371,7 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, int addr_len = neigh->tbl->key_len; u32 *addr = (u32 *)neigh->primary_key; int ifidx = neigh->dev->ifindex; - int hash = addr_hash(addr, addr_len, ifidx); + int hash = addr_hash(d, addr, addr_len, ifidx); if (neigh->dev->flags & IFF_LOOPBACK) lport = netdev2pinfo(physdev)->tx_chan + 4; @@ -481,7 +484,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) int addr_len = neigh->tbl->key_len; u32 *addr = (u32 *) neigh->primary_key; int ifidx = neigh->dev->ifindex; - int hash = addr_hash(addr, addr_len, ifidx); + int hash = addr_hash(d, addr, addr_len, ifidx); read_lock_bh(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) @@ -554,20 +557,30 @@ int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, return write_l2e(adap, e, 0); } -struct l2t_data *t4_init_l2t(void) +struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end) { + unsigned int l2t_size; int i; struct l2t_data *d; - d = t4_alloc_mem(sizeof(*d)); + if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE) + return NULL; + l2t_size = l2t_end - l2t_start + 1; + if (l2t_size < L2T_MIN_HASH_BUCKETS) + return NULL; + + d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry)); if (!d) return NULL; + d->l2t_start = l2t_start; + d->l2t_size = l2t_size; + d->rover = d->l2tab; - atomic_set(&d->nfree, L2T_SIZE); + atomic_set(&d->nfree, l2t_size); rwlock_init(&d->lock); - for (i = 0; i < L2T_SIZE; ++i) { + for (i = 0; i < d->l2t_size; ++i) { d->l2tab[i].idx = i; d->l2tab[i].state = L2T_STATE_UNUSED; spin_lock_init(&d->l2tab[i].lock); @@ -578,9 +591,9 @@ struct l2t_data *t4_init_l2t(void) static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos) { - struct l2t_entry *l2tab = seq->private; + struct l2t_data *d = seq->private; - return pos >= L2T_SIZE ? NULL : &l2tab[pos]; + return pos >= d->l2t_size ? NULL : &d->l2tab[pos]; } static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) @@ -620,6 +633,7 @@ static int l2t_seq_show(struct seq_file *seq, void *v) "Ethernet address VLAN/P LP State Users Port\n"); else { char ip[60]; + struct l2t_data *d = seq->private; struct l2t_entry *e = v; spin_lock_bh(&e->lock); @@ -628,7 +642,7 @@ static int l2t_seq_show(struct seq_file *seq, void *v) else sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr); seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n", - e->idx, ip, e->dmac, + e->idx + d->l2t_start, ip, e->dmac, e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport, l2e_state(e), atomic_read(&e->refcnt), e->neigh ? e->neigh->dev->name : ""); @@ -652,7 +666,7 @@ static int l2t_seq_open(struct inode *inode, struct file *file) struct adapter *adap = inode->i_private; struct seq_file *seq = file->private_data; - seq->private = adap->l2t->l2tab; + seq->private = adap->l2t; } return rc; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h index a30126ce9..b38dc526a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h @@ -39,6 +39,20 @@ #include #include +enum { L2T_SIZE = 4096 }; /* # of L2T entries */ + +enum { + L2T_STATE_VALID, /* entry is up to date */ + L2T_STATE_STALE, /* entry may be used but needs revalidation */ + L2T_STATE_RESOLVING, /* entry needs address resolution */ + L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */ + L2T_STATE_NOARP, /* Netdev down or removed*/ + + /* when state is one of the below the entry is not hashed */ + L2T_STATE_SWITCHING, /* entry is being used by a switching filter */ + L2T_STATE_UNUSED /* entry not in use */ +}; + struct adapter; struct l2t_data; struct neighbour; @@ -56,7 +70,7 @@ struct cpl_l2t_write_rpl; */ struct l2t_entry { u16 state; /* entry state */ - u16 idx; /* entry index */ + u16 idx; /* entry index within in-memory table */ u32 addr[4]; /* next hop IP or IPv6 address */ int ifindex; /* neighbor's net_device's ifindex */ struct neighbour *neigh; /* associated neighbour */ @@ -104,7 +118,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, u8 port, u8 *eth_addr); -struct l2t_data *t4_init_l2t(void); +struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end); void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); extern const struct file_operations t4_l2t_fops; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 942db078f..9162746d7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -807,7 +807,7 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb) * message or, if we're doing a Large Send Offload, an LSO CPL message * with an embedded TX Packet Write CPL message. */ - flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4; + flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); if (skb_shinfo(skb)->gso_size) flits += (sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_lso_core) + @@ -1137,7 +1137,7 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, */ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) { - u32 wr_mid; + u32 wr_mid, ctrl0; u64 cntrl, *end; int qidx, credits; unsigned int flits, ndesc; @@ -1274,9 +1274,15 @@ out_free: dev_kfree_skb_any(skb); #endif /* CONFIG_CHELSIO_T4_FCOE */ } - cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | - TXPKT_INTF_V(pi->tx_chan) | - TXPKT_PF_V(adap->pf)); + ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | + TXPKT_PF_V(adap->pf); +#ifdef CONFIG_CHELSIO_T4_DCB + if (is_t4(adap->params.chip)) + ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio); + else + ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio); +#endif + cpl->ctrl0 = htonl(ctrl0); cpl->pack = htons(0); cpl->len = htons(skb->len); cpl->ctrl1 = cpu_to_be64(cntrl); @@ -1418,18 +1424,17 @@ static void restart_ctrlq(unsigned long data) struct fw_wr_hdr *wr; unsigned int ndesc = skb->priority; /* previously saved */ - /* - * Write descriptors and free skbs outside the lock to limit + written += ndesc; + /* Write descriptors and free skbs outside the lock to limit * wait times. q->full is still set so new skbs will be queued. */ + wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; + txq_advance(&q->q, ndesc); spin_unlock(&q->sendq.lock); - wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; inline_tx_skb(skb, &q->q, wr); kfree_skb(skb); - written += ndesc; - txq_advance(&q->q, ndesc); if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { unsigned long old = q->q.stops; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 2b52aae7e..44806253c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -37,6 +37,7 @@ #include "t4_regs.h" #include "t4_values.h" #include "t4fw_api.h" +#include "t4fw_version.h" /** * t4_wait_op_done_val - wait until an operation is completed @@ -345,6 +346,43 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, FW_CMD_MAX_TIMEOUT); } +static int t4_edc_err_read(struct adapter *adap, int idx) +{ + u32 edc_ecc_err_addr_reg; + u32 rdata_reg; + + if (is_t4(adap->params.chip)) { + CH_WARN(adap, "%s: T4 NOT supported.\n", __func__); + return 0; + } + if (idx != 0 && idx != 1) { + CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx); + return 0; + } + + edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx); + rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx); + + CH_WARN(adap, + "edc%d err addr 0x%x: 0x%x.\n", + idx, edc_ecc_err_addr_reg, + t4_read_reg(adap, edc_ecc_err_addr_reg)); + CH_WARN(adap, + "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n", + rdata_reg, + (unsigned long long)t4_read_reg64(adap, rdata_reg), + (unsigned long long)t4_read_reg64(adap, rdata_reg + 8), + (unsigned long long)t4_read_reg64(adap, rdata_reg + 16), + (unsigned long long)t4_read_reg64(adap, rdata_reg + 24), + (unsigned long long)t4_read_reg64(adap, rdata_reg + 32), + (unsigned long long)t4_read_reg64(adap, rdata_reg + 40), + (unsigned long long)t4_read_reg64(adap, rdata_reg + 48), + (unsigned long long)t4_read_reg64(adap, rdata_reg + 56), + (unsigned long long)t4_read_reg64(adap, rdata_reg + 64)); + + return 0; +} + /** * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window * @adap: the adapter @@ -1322,9 +1360,10 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) }; static const unsigned int t6_reg_ranges[] = { - 0x1008, 0x114c, + 0x1008, 0x1124, + 0x1138, 0x114c, 0x1180, 0x11b4, - 0x11fc, 0x1250, + 0x11fc, 0x1254, 0x1280, 0x133c, 0x1800, 0x18fc, 0x3000, 0x302c, @@ -1345,18 +1384,18 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x5a80, 0x5a9c, 0x5b94, 0x5bfc, 0x5c10, 0x5ec0, - 0x5ec8, 0x5ec8, + 0x5ec8, 0x5ecc, 0x6000, 0x6040, - 0x6058, 0x6154, + 0x6058, 0x619c, 0x7700, 0x7798, 0x77c0, 0x7880, 0x78cc, 0x78fc, 0x7b00, 0x7c54, 0x7d00, 0x7efc, - 0x8dc0, 0x8de0, + 0x8dc0, 0x8de4, 0x8df8, 0x8e84, 0x8ea0, 0x8f88, - 0x8fb8, 0x911c, + 0x8fb8, 0x9124, 0x9400, 0x9470, 0x9600, 0x971c, 0x9800, 0x9808, @@ -1371,20 +1410,21 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x9f00, 0x9f6c, 0x9f80, 0xa020, 0xd004, 0xd03c, + 0xd100, 0xd118, + 0xd200, 0xd31c, 0xdfc0, 0xdfe0, 0xe000, 0xf008, 0x11000, 0x11014, - 0x11048, 0x11110, - 0x11118, 0x1117c, - 0x11190, 0x11260, + 0x11048, 0x1117c, + 0x11190, 0x11270, 0x11300, 0x1130c, - 0x12000, 0x1205c, + 0x12000, 0x1206c, 0x19040, 0x1906c, 0x19078, 0x19080, 0x1908c, 0x19124, 0x19150, 0x191b0, 0x191d0, 0x191e8, - 0x19238, 0x192b8, + 0x19238, 0x192bc, 0x193f8, 0x19474, 0x19490, 0x194cc, 0x194f0, 0x194f8, @@ -1461,12 +1501,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x1ff00, 0x1ff84, 0x1ffc0, 0x1ffc8, 0x30000, 0x30070, - 0x30100, 0x3015c, - 0x30190, 0x301d0, - 0x30200, 0x30318, + 0x30100, 0x301d0, + 0x30200, 0x30320, 0x30400, 0x3052c, 0x30540, 0x3061c, - 0x30800, 0x3088c, + 0x30800, 0x30890, 0x308c0, 0x30908, 0x30910, 0x309b8, 0x30a00, 0x30a04, @@ -1539,12 +1578,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x33c24, 0x33c50, 0x33cf0, 0x33cfc, 0x34000, 0x34070, - 0x34100, 0x3415c, - 0x34190, 0x341d0, - 0x34200, 0x34318, + 0x34100, 0x341d0, + 0x34200, 0x34320, 0x34400, 0x3452c, 0x34540, 0x3461c, - 0x34800, 0x3488c, + 0x34800, 0x34890, 0x348c0, 0x34908, 0x34910, 0x349b8, 0x34a00, 0x34a04, @@ -2129,6 +2167,61 @@ int t4_get_exprom_version(struct adapter *adap, u32 *vers) return 0; } +/** + * t4_check_fw_version - check if the FW is supported with this driver + * @adap: the adapter + * + * Checks if an adapter's FW is compatible with the driver. Returns 0 + * if there's exact match, a negative error if the version could not be + * read or there's a major version mismatch + */ +int t4_check_fw_version(struct adapter *adap) +{ + int ret, major, minor, micro; + int exp_major, exp_minor, exp_micro; + unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip); + + ret = t4_get_fw_version(adap, &adap->params.fw_vers); + if (ret) + return ret; + + major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers); + minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers); + micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers); + + switch (chip_version) { + case CHELSIO_T4: + exp_major = T4FW_MIN_VERSION_MAJOR; + exp_minor = T4FW_MIN_VERSION_MINOR; + exp_micro = T4FW_MIN_VERSION_MICRO; + break; + case CHELSIO_T5: + exp_major = T5FW_MIN_VERSION_MAJOR; + exp_minor = T5FW_MIN_VERSION_MINOR; + exp_micro = T5FW_MIN_VERSION_MICRO; + break; + case CHELSIO_T6: + exp_major = T6FW_MIN_VERSION_MAJOR; + exp_minor = T6FW_MIN_VERSION_MINOR; + exp_micro = T6FW_MIN_VERSION_MICRO; + break; + default: + dev_err(adap->pdev_dev, "Unsupported chip type, %x\n", + adap->chip); + return -EINVAL; + } + + if (major < exp_major || (major == exp_major && minor < exp_minor) || + (major == exp_major && minor == exp_minor && micro < exp_micro)) { + dev_err(adap->pdev_dev, + "Card has firmware version %u.%u.%u, minimum " + "supported firmware is %u.%u.%u.\n", major, minor, + micro, exp_major, exp_minor, exp_micro); + return -EFAULT; + } + return 0; +} + /* Is the given firmware API compatible with the one the driver was compiled * with? */ @@ -3281,6 +3374,8 @@ static void mem_intr_handler(struct adapter *adapter, int idx) if (v & ECC_CE_INT_CAUSE_F) { u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr)); + t4_edc_err_read(adapter, idx); + t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M)); if (printk_ratelimit()) dev_warn(adapter->pdev_dev, @@ -3488,7 +3583,9 @@ int t4_slow_intr_handler(struct adapter *adapter) void t4_intr_enable(struct adapter *adapter) { u32 val = 0; - u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A)); + u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A); + u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? + SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F; @@ -3513,7 +3610,9 @@ void t4_intr_enable(struct adapter *adapter) */ void t4_intr_disable(struct adapter *adapter) { - u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A)); + u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A); + u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? + SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0); t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0); @@ -3687,6 +3786,11 @@ int t4_read_rss(struct adapter *adapter, u16 *map) return 0; } +static unsigned int t4_use_ldst(struct adapter *adap) +{ + return (adap->flags & FW_OK) || !adap->use_bd; +} + /** * t4_fw_tp_pio_rw - Access TP PIO through LDST * @adap: the adapter @@ -3730,7 +3834,7 @@ static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs, */ void t4_read_rss_key(struct adapter *adap, u32 *key) { - if (adap->flags & FW_OK) + if (t4_use_ldst(adap)) t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1); else t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, @@ -3760,7 +3864,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3)) rss_key_addr_cnt = 32; - if (adap->flags & FW_OK) + if (t4_use_ldst(adap)) t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0); else t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, @@ -3789,7 +3893,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp) { - if (adapter->flags & FW_OK) + if (t4_use_ldst(adapter)) t4_fw_tp_pio_rw(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, 1); else @@ -3829,7 +3933,7 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, /* Grab the VFL/VFH values ... */ - if (adapter->flags & FW_OK) { + if (t4_use_ldst(adapter)) { t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1); t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1); } else { @@ -3850,7 +3954,7 @@ u32 t4_read_rss_pf_map(struct adapter *adapter) { u32 pfmap; - if (adapter->flags & FW_OK) + if (t4_use_ldst(adapter)) t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1); else t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, @@ -3868,7 +3972,7 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter) { u32 pfmask; - if (adapter->flags & FW_OK) + if (t4_use_ldst(adapter)) t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1); else t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, @@ -3924,43 +4028,25 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, */ void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) { - /* T6 and later has 2 channels */ - if (adap->params.arch.nchan == NCHAN) { - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tnl_cong_drops, 8, - TP_MIB_TNL_CNG_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tnl_tx_drops, 4, - TP_MIB_TNL_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->ofld_vlan_drops, 4, - TP_MIB_OFD_VLN_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tcp6_in_errs, 4, - TP_MIB_TCP_V6IN_ERR_0_A); - } else { - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tnl_cong_drops, 2, - TP_MIB_TNL_CNG_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->ofld_chan_drops, 2, - TP_MIB_OFD_CHN_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->ofld_vlan_drops, 2, - TP_MIB_OFD_VLN_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A); - } + int nchan = adap->params.arch.nchan; + + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tnl_cong_drops, nchan, TP_MIB_TNL_CNG_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->ofld_chan_drops, nchan, TP_MIB_OFD_CHN_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->ofld_vlan_drops, nchan, TP_MIB_OFD_VLN_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tcp6_in_errs, nchan, TP_MIB_TCP_V6IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A); } @@ -3974,16 +4060,13 @@ void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) */ void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st) { - /* T6 and later has 2 channels */ - if (adap->params.arch.nchan == NCHAN) { - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req, - 8, TP_MIB_CPL_IN_REQ_0_A); - } else { - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req, - 2, TP_MIB_CPL_IN_REQ_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp, - 2, TP_MIB_CPL_OUT_RSP_0_A); - } + int nchan = adap->params.arch.nchan; + + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req, + nchan, TP_MIB_CPL_IN_REQ_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp, + nchan, TP_MIB_CPL_OUT_RSP_0_A); + } /** @@ -4237,6 +4320,119 @@ void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) } } +/** + * t4_set_trace_filter - configure one of the tracing filters + * @adap: the adapter + * @tp: the desired trace filter parameters + * @idx: which filter to configure + * @enable: whether to enable or disable the filter + * + * Configures one of the tracing filters available in HW. If @enable is + * %0 @tp is not examined and may be %NULL. The user is responsible to + * set the single/multiple trace mode by writing to MPS_TRC_CFG_A register + */ +int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, + int idx, int enable) +{ + int i, ofst = idx * 4; + u32 data_reg, mask_reg, cfg; + u32 multitrc = TRCMULTIFILTER_F; + + if (!enable) { + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0); + return 0; + } + + cfg = t4_read_reg(adap, MPS_TRC_CFG_A); + if (cfg & TRCMULTIFILTER_F) { + /* If multiple tracers are enabled, then maximum + * capture size is 2.5KB (FIFO size of a single channel) + * minus 2 flits for CPL_TRACE_PKT header. + */ + if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8))) + return -EINVAL; + } else { + /* If multiple tracers are disabled, to avoid deadlocks + * maximum packet capture size of 9600 bytes is recommended. + * Also in this mode, only trace0 can be enabled and running. + */ + multitrc = 0; + if (tp->snap_len > 9600 || idx) + return -EINVAL; + } + + if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 || + tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M || + tp->min_len > TFMINPKTSIZE_M) + return -EINVAL; + + /* stop the tracer we'll be changing */ + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0); + + idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A); + data_reg = MPS_TRC_FILTER0_MATCH_A + idx; + mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx; + + for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { + t4_write_reg(adap, data_reg, tp->data[i]); + t4_write_reg(adap, mask_reg, ~tp->mask[i]); + } + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst, + TFCAPTUREMAX_V(tp->snap_len) | + TFMINPKTSIZE_V(tp->min_len)); + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, + TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) | + (is_t4(adap->params.chip) ? + TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) : + T5_TFPORT_V(tp->port) | T5_TFEN_F | + T5_TFINVERTMATCH_V(tp->invert))); + + return 0; +} + +/** + * t4_get_trace_filter - query one of the tracing filters + * @adap: the adapter + * @tp: the current trace filter parameters + * @idx: which trace filter to query + * @enabled: non-zero if the filter is enabled + * + * Returns the current settings of one of the HW tracing filters. + */ +void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, + int *enabled) +{ + u32 ctla, ctlb; + int i, ofst = idx * 4; + u32 data_reg, mask_reg; + + ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst); + ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst); + + if (is_t4(adap->params.chip)) { + *enabled = !!(ctla & TFEN_F); + tp->port = TFPORT_G(ctla); + tp->invert = !!(ctla & TFINVERTMATCH_F); + } else { + *enabled = !!(ctla & T5_TFEN_F); + tp->port = T5_TFPORT_G(ctla); + tp->invert = !!(ctla & T5_TFINVERTMATCH_F); + } + tp->snap_len = TFCAPTUREMAX_G(ctlb); + tp->min_len = TFMINPKTSIZE_G(ctlb); + tp->skip_ofst = TFOFFSET_G(ctla); + tp->skip_len = TFLENGTH_G(ctla); + + ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx; + data_reg = MPS_TRC_FILTER0_MATCH_A + ofst; + mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst; + + for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { + tp->mask[i] = ~t4_read_reg(adap, mask_reg); + tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; + } +} + /** * t4_pmtx_get_stats - returns the HW stats from PMTX * @adap: the adapter @@ -6294,7 +6490,7 @@ int t4_init_tp_params(struct adapter *adap) /* Cache the adapter's Compressed Filter Mode and global Incress * Configuration. */ - if (adap->flags & FW_OK) { + if (t4_use_ldst(adap)) { t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1, TP_VLAN_PRI_MAP_A, 1); t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index c8488f430..640369df8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -47,7 +47,6 @@ enum { TCB_SIZE = 128, /* TCB size */ NMTUS = 16, /* size of MTU table */ NCCTRL_WIN = 32, /* # of congestion control windows */ - L2T_SIZE = 4096, /* # of L2T entries */ PM_NSTATS = 5, /* # of PM stats */ MBOX_LEN = 64, /* mailbox size in bytes */ TRACE_LEN = 112, /* length of trace data and mask */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 132cb8fc0..b99144afd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -660,6 +660,9 @@ struct cpl_tx_pkt { #define TXPKT_OVLAN_IDX_S 12 #define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S) +#define TXPKT_T5_OVLAN_IDX_S 12 +#define TXPKT_T5_OVLAN_IDX_V(x) ((x) << TXPKT_T5_OVLAN_IDX_S) + #define TXPKT_INTF_S 16 #define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index d7ca10692..03ed00c49 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -142,6 +142,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */ CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */ CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ + CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */ + CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */ CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ @@ -155,6 +157,27 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */ CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */ CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5093), /* Custom T580-LP-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5094), /* Custom T540-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */ + CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */ + + /* T6 adapters: + */ + CH_PCI_ID_TABLE_FENTRY(0x6001), + CH_PCI_ID_TABLE_FENTRY(0x6002), + CH_PCI_ID_TABLE_FENTRY(0x6003), + CH_PCI_ID_TABLE_FENTRY(0x6004), + CH_PCI_ID_TABLE_FENTRY(0x6005), + CH_PCI_ID_TABLE_FENTRY(0x6006), + CH_PCI_ID_TABLE_FENTRY(0x6007), + CH_PCI_ID_TABLE_FENTRY(0x6009), + CH_PCI_ID_TABLE_FENTRY(0x600d), + CH_PCI_ID_TABLE_FENTRY(0x6010), + CH_PCI_ID_TABLE_FENTRY(0x6011), + CH_PCI_ID_TABLE_FENTRY(0x6014), + CH_PCI_ID_TABLE_FENTRY(0x6015), CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 375a82557..fc3044c8a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -136,6 +136,20 @@ #define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \ & INGPACKBOUNDARY_M) +#define VFIFO_ENABLE_S 10 +#define VFIFO_ENABLE_V(x) ((x) << VFIFO_ENABLE_S) +#define VFIFO_ENABLE_F VFIFO_ENABLE_V(1U) + +#define SGE_DBVFIFO_BADDR_A 0x1138 + +#define DBVFIFO_SIZE_S 6 +#define DBVFIFO_SIZE_M 0xfffU +#define DBVFIFO_SIZE_G(x) (((x) >> DBVFIFO_SIZE_S) & DBVFIFO_SIZE_M) + +#define T6_DBVFIFO_SIZE_S 0 +#define T6_DBVFIFO_SIZE_M 0x1fffU +#define T6_DBVFIFO_SIZE_G(x) (((x) >> T6_DBVFIFO_SIZE_S) & T6_DBVFIFO_SIZE_M) + #define GLOBALENABLE_S 0 #define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S) #define GLOBALENABLE_F GLOBALENABLE_V(1U) @@ -303,6 +317,8 @@ #define SGE_FL_BUFFER_SIZE7_A 0x1060 #define SGE_FL_BUFFER_SIZE8_A 0x1064 +#define SGE_IMSG_CTXT_BADDR_A 0x1088 +#define SGE_FLM_CACHE_BADDR_A 0x108c #define SGE_INGRESS_RX_THRESHOLD_A 0x10a0 #define THRESHOLD_0_S 24 @@ -338,6 +354,11 @@ #define EGRTHRESHOLDPACKING_G(x) \ (((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M) +#define T6_EGRTHRESHOLDPACKING_S 16 +#define T6_EGRTHRESHOLDPACKING_M 0xffU +#define T6_EGRTHRESHOLDPACKING_G(x) \ + (((x) >> T6_EGRTHRESHOLDPACKING_S) & T6_EGRTHRESHOLDPACKING_M) + #define SGE_TIMESTAMP_LO_A 0x1098 #define SGE_TIMESTAMP_HI_A 0x109c @@ -352,6 +373,7 @@ #define TSVAL_G(x) (((x) >> TSVAL_S) & TSVAL_M) #define SGE_DBFIFO_STATUS_A 0x10a4 +#define SGE_DBVFIFO_SIZE_A 0x113c #define HP_INT_THRESH_S 28 #define HP_INT_THRESH_M 0xfU @@ -864,6 +886,10 @@ /* registers for module MA */ #define MA_EDRAM0_BAR_A 0x77c0 +#define EDRAM0_BASE_S 16 +#define EDRAM0_BASE_M 0xfffU +#define EDRAM0_BASE_G(x) (((x) >> EDRAM0_BASE_S) & EDRAM0_BASE_M) + #define EDRAM0_SIZE_S 0 #define EDRAM0_SIZE_M 0xfffU #define EDRAM0_SIZE_V(x) ((x) << EDRAM0_SIZE_S) @@ -871,6 +897,10 @@ #define MA_EDRAM1_BAR_A 0x77c4 +#define EDRAM1_BASE_S 16 +#define EDRAM1_BASE_M 0xfffU +#define EDRAM1_BASE_G(x) (((x) >> EDRAM1_BASE_S) & EDRAM1_BASE_M) + #define EDRAM1_SIZE_S 0 #define EDRAM1_SIZE_M 0xfffU #define EDRAM1_SIZE_V(x) ((x) << EDRAM1_SIZE_S) @@ -878,6 +908,11 @@ #define MA_EXT_MEMORY_BAR_A 0x77c8 +#define EXT_MEM_BASE_S 16 +#define EXT_MEM_BASE_M 0xfffU +#define EXT_MEM_BASE_V(x) ((x) << EXT_MEM_BASE_S) +#define EXT_MEM_BASE_G(x) (((x) >> EXT_MEM_BASE_S) & EXT_MEM_BASE_M) + #define EXT_MEM_SIZE_S 0 #define EXT_MEM_SIZE_M 0xfffU #define EXT_MEM_SIZE_V(x) ((x) << EXT_MEM_SIZE_S) @@ -885,6 +920,10 @@ #define MA_EXT_MEMORY1_BAR_A 0x7808 +#define EXT_MEM1_BASE_S 16 +#define EXT_MEM1_BASE_M 0xfffU +#define EXT_MEM1_BASE_G(x) (((x) >> EXT_MEM1_BASE_S) & EXT_MEM1_BASE_M) + #define EXT_MEM1_SIZE_S 0 #define EXT_MEM1_SIZE_M 0xfffU #define EXT_MEM1_SIZE_V(x) ((x) << EXT_MEM1_SIZE_S) @@ -892,6 +931,10 @@ #define MA_EXT_MEMORY0_BAR_A 0x77c8 +#define EXT_MEM0_BASE_S 16 +#define EXT_MEM0_BASE_M 0xfffU +#define EXT_MEM0_BASE_G(x) (((x) >> EXT_MEM0_BASE_S) & EXT_MEM0_BASE_M) + #define EXT_MEM0_SIZE_S 0 #define EXT_MEM0_SIZE_M 0xfffU #define EXT_MEM0_SIZE_V(x) ((x) << EXT_MEM0_SIZE_S) @@ -973,6 +1016,10 @@ /* registers for module CIM */ #define CIM_BOOT_CFG_A 0x7b00 +#define CIM_SDRAM_BASE_ADDR_A 0x7b14 +#define CIM_SDRAM_ADDR_SIZE_A 0x7b18 +#define CIM_EXTMEM2_BASE_ADDR_A 0x7b1c +#define CIM_EXTMEM2_ADDR_SIZE_A 0x7b20 #define CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A 0x290 #define BOOTADDR_M 0xffffff00U @@ -1231,6 +1278,33 @@ #define TP_OUT_CONFIG_A 0x7d04 #define TP_GLOBAL_CONFIG_A 0x7d08 +#define TP_CMM_TCB_BASE_A 0x7d10 +#define TP_CMM_MM_BASE_A 0x7d14 +#define TP_CMM_TIMER_BASE_A 0x7d18 +#define TP_PMM_TX_BASE_A 0x7d20 +#define TP_PMM_RX_BASE_A 0x7d28 +#define TP_PMM_RX_PAGE_SIZE_A 0x7d2c +#define TP_PMM_RX_MAX_PAGE_A 0x7d30 +#define TP_PMM_TX_PAGE_SIZE_A 0x7d34 +#define TP_PMM_TX_MAX_PAGE_A 0x7d38 +#define TP_CMM_MM_MAX_PSTRUCT_A 0x7e6c + +#define PMRXNUMCHN_S 31 +#define PMRXNUMCHN_V(x) ((x) << PMRXNUMCHN_S) +#define PMRXNUMCHN_F PMRXNUMCHN_V(1U) + +#define PMTXNUMCHN_S 30 +#define PMTXNUMCHN_M 0x3U +#define PMTXNUMCHN_G(x) (((x) >> PMTXNUMCHN_S) & PMTXNUMCHN_M) + +#define PMTXMAXPAGE_S 0 +#define PMTXMAXPAGE_M 0x1fffffU +#define PMTXMAXPAGE_G(x) (((x) >> PMTXMAXPAGE_S) & PMTXMAXPAGE_M) + +#define PMRXMAXPAGE_S 0 +#define PMRXMAXPAGE_M 0x1fffffU +#define PMRXMAXPAGE_G(x) (((x) >> PMRXMAXPAGE_S) & PMRXMAXPAGE_M) + #define DBGLAMODE_S 14 #define DBGLAMODE_M 0x3U #define DBGLAMODE_G(x) (((x) >> DBGLAMODE_S) & DBGLAMODE_M) @@ -1338,6 +1412,9 @@ #define MTUVALUE_G(x) (((x) >> MTUVALUE_S) & MTUVALUE_M) #define TP_RSS_LKP_TABLE_A 0x7dec +#define TP_CMM_MM_RX_FLST_BASE_A 0x7e60 +#define TP_CMM_MM_TX_FLST_BASE_A 0x7e64 +#define TP_CMM_MM_PS_FLST_BASE_A 0x7e68 #define LKPTBLROWVLD_S 31 #define LKPTBLROWVLD_V(x) ((x) << LKPTBLROWVLD_S) @@ -1483,6 +1560,11 @@ #define TP_MIB_RQE_DFR_PKT_A 0x64 #define ULP_TX_INT_CAUSE_A 0x8dcc +#define ULP_TX_TPT_LLIMIT_A 0x8dd4 +#define ULP_TX_TPT_ULIMIT_A 0x8dd8 +#define ULP_TX_PBL_LLIMIT_A 0x8ddc +#define ULP_TX_PBL_ULIMIT_A 0x8de0 +#define ULP_TX_ERR_TABLE_BASE_A 0x8e04 #define PBL_BOUND_ERR_CH3_S 31 #define PBL_BOUND_ERR_CH3_V(x) ((x) << PBL_BOUND_ERR_CH3_S) @@ -1804,6 +1886,9 @@ #define TRCMULTIFILTER_F TRCMULTIFILTER_V(1U) #define MPS_TRC_RSS_CONTROL_A 0x9808 +#define MPS_TRC_FILTER1_RSS_CONTROL_A 0x9ff4 +#define MPS_TRC_FILTER2_RSS_CONTROL_A 0x9ffc +#define MPS_TRC_FILTER3_RSS_CONTROL_A 0xa004 #define MPS_T5_TRC_RSS_CONTROL_A 0xa00c #define RSSCONTROL_S 16 @@ -1812,6 +1897,59 @@ #define QUEUENUMBER_S 0 #define QUEUENUMBER_V(x) ((x) << QUEUENUMBER_S) +#define TFINVERTMATCH_S 24 +#define TFINVERTMATCH_V(x) ((x) << TFINVERTMATCH_S) +#define TFINVERTMATCH_F TFINVERTMATCH_V(1U) + +#define TFEN_S 22 +#define TFEN_V(x) ((x) << TFEN_S) +#define TFEN_F TFEN_V(1U) + +#define TFPORT_S 18 +#define TFPORT_M 0xfU +#define TFPORT_V(x) ((x) << TFPORT_S) +#define TFPORT_G(x) (((x) >> TFPORT_S) & TFPORT_M) + +#define TFLENGTH_S 8 +#define TFLENGTH_M 0x1fU +#define TFLENGTH_V(x) ((x) << TFLENGTH_S) +#define TFLENGTH_G(x) (((x) >> TFLENGTH_S) & TFLENGTH_M) + +#define TFOFFSET_S 0 +#define TFOFFSET_M 0x1fU +#define TFOFFSET_V(x) ((x) << TFOFFSET_S) +#define TFOFFSET_G(x) (((x) >> TFOFFSET_S) & TFOFFSET_M) + +#define T5_TFINVERTMATCH_S 25 +#define T5_TFINVERTMATCH_V(x) ((x) << T5_TFINVERTMATCH_S) +#define T5_TFINVERTMATCH_F T5_TFINVERTMATCH_V(1U) + +#define T5_TFEN_S 23 +#define T5_TFEN_V(x) ((x) << T5_TFEN_S) +#define T5_TFEN_F T5_TFEN_V(1U) + +#define T5_TFPORT_S 18 +#define T5_TFPORT_M 0x1fU +#define T5_TFPORT_V(x) ((x) << T5_TFPORT_S) +#define T5_TFPORT_G(x) (((x) >> T5_TFPORT_S) & T5_TFPORT_M) + +#define MPS_TRC_FILTER_MATCH_CTL_A_A 0x9810 +#define MPS_TRC_FILTER_MATCH_CTL_B_A 0x9820 + +#define TFMINPKTSIZE_S 16 +#define TFMINPKTSIZE_M 0x1ffU +#define TFMINPKTSIZE_V(x) ((x) << TFMINPKTSIZE_S) +#define TFMINPKTSIZE_G(x) (((x) >> TFMINPKTSIZE_S) & TFMINPKTSIZE_M) + +#define TFCAPTUREMAX_S 0 +#define TFCAPTUREMAX_M 0x3fffU +#define TFCAPTUREMAX_V(x) ((x) << TFCAPTUREMAX_S) +#define TFCAPTUREMAX_G(x) (((x) >> TFCAPTUREMAX_S) & TFCAPTUREMAX_M) + +#define MPS_TRC_FILTER0_MATCH_A 0x9c00 +#define MPS_TRC_FILTER0_DONT_CARE_A 0x9c80 +#define MPS_TRC_FILTER1_MATCH_A 0x9d00 + #define TP_RSS_CONFIG_A 0x7df0 #define TNL4TUPENIPV6_S 31 @@ -2247,12 +2385,32 @@ #define MATCHSRAM_V(x) ((x) << MATCHSRAM_S) #define MATCHSRAM_F MATCHSRAM_V(1U) +#define MPS_RX_PG_RSV0_A 0x11010 +#define MPS_RX_PG_RSV4_A 0x11020 #define MPS_RX_PERR_INT_CAUSE_A 0x11074 +#define MPS_RX_MAC_BG_PG_CNT0_A 0x11208 +#define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218 #define MPS_CLS_TCAM_Y_L_A 0xf000 #define MPS_CLS_TCAM_DATA0_A 0xf000 #define MPS_CLS_TCAM_DATA1_A 0xf004 +#define USED_S 16 +#define USED_M 0x7ffU +#define USED_G(x) (((x) >> USED_S) & USED_M) + +#define ALLOC_S 0 +#define ALLOC_M 0x7ffU +#define ALLOC_G(x) (((x) >> ALLOC_S) & ALLOC_M) + +#define T5_USED_S 16 +#define T5_USED_M 0xfffU +#define T5_USED_G(x) (((x) >> T5_USED_S) & T5_USED_M) + +#define T5_ALLOC_S 0 +#define T5_ALLOC_M 0xfffU +#define T5_ALLOC_G(x) (((x) >> T5_ALLOC_S) & T5_ALLOC_M) + #define DMACH_S 0 #define DMACH_M 0xffffU #define DMACH_G(x) (((x) >> DMACH_S) & DMACH_M) @@ -2410,8 +2568,21 @@ #define SLVFIFOPARINT_F SLVFIFOPARINT_V(1U) #define ULP_RX_INT_CAUSE_A 0x19158 +#define ULP_RX_ISCSI_LLIMIT_A 0x1915c +#define ULP_RX_ISCSI_ULIMIT_A 0x19160 #define ULP_RX_ISCSI_TAGMASK_A 0x19164 #define ULP_RX_ISCSI_PSZ_A 0x19168 +#define ULP_RX_TDDP_LLIMIT_A 0x1916c +#define ULP_RX_TDDP_ULIMIT_A 0x19170 +#define ULP_RX_STAG_LLIMIT_A 0x1917c +#define ULP_RX_STAG_ULIMIT_A 0x19180 +#define ULP_RX_RQ_LLIMIT_A 0x19184 +#define ULP_RX_RQ_ULIMIT_A 0x19188 +#define ULP_RX_PBL_LLIMIT_A 0x1918c +#define ULP_RX_PBL_ULIMIT_A 0x19190 +#define ULP_RX_CTX_BASE_A 0x19194 +#define ULP_RX_RQUDP_LLIMIT_A 0x191a4 +#define ULP_RX_RQUDP_ULIMIT_A 0x191a8 #define ULP_RX_LA_CTL_A 0x1923c #define ULP_RX_LA_RDPTR_A 0x19240 #define ULP_RX_LA_RDDATA_A 0x19244 @@ -2473,6 +2644,10 @@ #define SOURCEPF_M 0x7U #define SOURCEPF_G(x) (((x) >> SOURCEPF_S) & SOURCEPF_M) +#define T6_SOURCEPF_S 9 +#define T6_SOURCEPF_M 0x7U +#define T6_SOURCEPF_G(x) (((x) >> T6_SOURCEPF_S) & T6_SOURCEPF_M) + #define PL_INT_CAUSE_A 0x1940c #define ULP_TX_S 27 @@ -2612,7 +2787,20 @@ #define T6_LIPMISS_V(x) ((x) << T6_LIPMISS_S) #define T6_LIPMISS_F T6_LIPMISS_V(1U) +#define LE_DB_CONFIG_A 0x19c04 +#define LE_DB_SERVER_INDEX_A 0x19c18 +#define LE_DB_SRVR_START_INDEX_A 0x19c18 +#define LE_DB_ACT_CNT_IPV4_A 0x19c20 +#define LE_DB_ACT_CNT_IPV6_A 0x19c24 +#define LE_DB_HASH_TID_BASE_A 0x19c30 +#define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30 #define LE_DB_INT_CAUSE_A 0x19c3c +#define LE_DB_TID_HASHBASE_A 0x19df8 +#define T6_LE_DB_HASH_TID_BASE_A 0x19df8 + +#define HASHEN_S 20 +#define HASHEN_V(x) ((x) << HASHEN_S) +#define HASHEN_F HASHEN_V(1U) #define REQQPARERR_S 16 #define REQQPARERR_V(x) ((x) << REQQPARERR_S) @@ -2634,6 +2822,10 @@ #define LIP0_V(x) ((x) << LIP0_S) #define LIP0_F LIP0_V(1U) +#define BASEADDR_S 3 +#define BASEADDR_M 0x1fffffffU +#define BASEADDR_G(x) (((x) >> BASEADDR_S) & BASEADDR_M) + #define TCAMINTPERR_S 13 #define TCAMINTPERR_V(x) ((x) << TCAMINTPERR_S) #define TCAMINTPERR_F TCAMINTPERR_V(1U) @@ -2740,10 +2932,11 @@ #define EDC_H_BIST_DATA_PATTERN_A 0x50010 #define EDC_H_BIST_STATUS_RDATA_A 0x50028 +#define EDC_H_ECC_ERR_ADDR_A 0x50084 #define EDC_T51_BASE_ADDR 0x50800 -#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) -#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) +#define EDC_T5_STRIDE (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) +#define EDC_T5_REG(reg, idx) (reg + EDC_T5_STRIDE * idx) #define PL_VF_REV_A 0x4 #define PL_VF_WHOAMI_A 0x0 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index ab4674684..a32de30ea 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -762,8 +762,6 @@ enum fw_ldst_func_mod_index { struct fw_ldst_cmd { __be32 op_to_addrspace; -#define FW_LDST_CMD_ADDRSPACE_S 0 -#define FW_LDST_CMD_ADDRSPACE_V(x) ((x) << FW_LDST_CMD_ADDRSPACE_S) __be32 cycles_to_len16; union fw_ldst { struct fw_ldst_addrval { @@ -788,6 +786,13 @@ struct fw_ldst_cmd { __be16 vctl; __be16 rval; } mdio; + struct fw_ldst_cim_rq { + u8 req_first64[8]; + u8 req_second64[8]; + u8 resp_first64[8]; + u8 resp_second64[8]; + __be32 r3[2]; + } cim_rq; union fw_ldst_mps { struct fw_ldst_mps_rplc { __be16 fid_idx; @@ -828,9 +833,33 @@ struct fw_ldst_cmd { __be16 nset_pkd; __be32 data[12]; } pcie; + struct fw_ldst_i2c_deprecated { + u8 pid_pkd; + u8 base; + u8 boffset; + u8 data; + __be32 r9; + } i2c_deprecated; + struct fw_ldst_i2c { + u8 pid; + u8 did; + u8 boffset; + u8 blen; + __be32 r9; + __u8 data[48]; + } i2c; + struct fw_ldst_le { + __be32 index; + __be32 r9; + u8 val[33]; + u8 r11[7]; + } le; } u; }; +#define FW_LDST_CMD_ADDRSPACE_S 0 +#define FW_LDST_CMD_ADDRSPACE_V(x) ((x) << FW_LDST_CMD_ADDRSPACE_S) + #define FW_LDST_CMD_MSG_S 31 #define FW_LDST_CMD_MSG_V(x) ((x) << FW_LDST_CMD_MSG_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index 32b213559..c4b262ca7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -36,18 +36,29 @@ #define __T4FW_VERSION_H__ #define T4FW_VERSION_MAJOR 0x01 -#define T4FW_VERSION_MINOR 0x0D -#define T4FW_VERSION_MICRO 0x20 +#define T4FW_VERSION_MINOR 0x0E +#define T4FW_VERSION_MICRO 0x04 #define T4FW_VERSION_BUILD 0x00 +#define T4FW_MIN_VERSION_MAJOR 0x01 +#define T4FW_MIN_VERSION_MINOR 0x04 +#define T4FW_MIN_VERSION_MICRO 0x00 + #define T5FW_VERSION_MAJOR 0x01 -#define T5FW_VERSION_MINOR 0x0D -#define T5FW_VERSION_MICRO 0x20 +#define T5FW_VERSION_MINOR 0x0E +#define T5FW_VERSION_MICRO 0x04 #define T5FW_VERSION_BUILD 0x00 +#define T5FW_MIN_VERSION_MAJOR 0x00 +#define T5FW_MIN_VERSION_MINOR 0x00 +#define T5FW_MIN_VERSION_MICRO 0x00 + #define T6FW_VERSION_MAJOR 0x01 -#define T6FW_VERSION_MINOR 0x0D -#define T6FW_VERSION_MICRO 0x2D +#define T6FW_VERSION_MINOR 0x0E +#define T6FW_VERSION_MICRO 0x04 #define T6FW_VERSION_BUILD 0x00 +#define T6FW_MIN_VERSION_MAJOR 0x00 +#define T6FW_MIN_VERSION_MINOR 0x00 +#define T6FW_MIN_VERSION_MICRO 0x00 #endif diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index ad53e5ad2..fa3786a9d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1898,7 +1898,10 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) rspq->unhandled_irqs++; val = CIDXINC_V(work_done) | SEINTARM_V(intr_params); - if (is_t4(rspq->adapter->params.chip)) { + /* If we don't have access to the new User GTS (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(!rspq->bar2_addr)) { t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, val | INGRESSQID_V((u32)rspq->cntxt_id)); @@ -1998,10 +2001,13 @@ static unsigned int process_intrq(struct adapter *adapter) } val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params); - if (is_t4(adapter->params.chip)) + /* If we don't have access to the new User GTS (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(!intrq->bar2_addr)) { t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, val | INGRESSQID_V(intrq->cntxt_id)); - else { + } else { writel(val | INGRESSQID_V(intrq->bar2_qid), intrq->bar2_addr + SGE_UDB_GTS); wmb(); @@ -2662,8 +2668,22 @@ int t4vf_sge_init(struct adapter *adapter) * give it more Free List entries. (Note that the SGE's Egress * Congestion Threshold is in units of 2 Free List pointers.) */ - s->fl_starve_thres - = EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1; + switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { + case CHELSIO_T4: + s->fl_starve_thres = + EGRTHRESHOLD_G(sge_params->sge_congestion_control); + break; + case CHELSIO_T5: + s->fl_starve_thres = + EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control); + break; + case CHELSIO_T6: + default: + s->fl_starve_thres = + T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control); + break; + } + s->fl_starve_thres = s->fl_starve_thres * 2 + 1; /* * Set up tasklet timers. diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 0db6dc9e9..63dd5fdac 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -619,7 +619,8 @@ int t4vf_get_sge_params(struct adapter *adapter) */ whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A); - pf = SOURCEPF_G(whoami); + pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? + SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); s_hps = (HOSTPAGESIZEPF0_S + (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf); diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 84b6a2b46..8b53f7d4b 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -33,7 +33,7 @@ #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" -#define DRV_VERSION "2.1.1.83" +#define DRV_VERSION "2.3.0.12" #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" #define ENIC_BARS_MAX 6 @@ -191,6 +191,25 @@ struct enic { struct vnic_gen_stats gen_stats; }; +static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev) +{ + struct enic *enic = vdev->priv; + + return enic->netdev; +} + +/* wrappers function for kernel log + * Make sure variable vdev of struct vnic_dev is available in the block where + * these macros are used + */ +#define vdev_info(args...) dev_info(&vdev->pdev->dev, args) +#define vdev_warn(args...) dev_warn(&vdev->pdev->dev, args) +#define vdev_err(args...) dev_err(&vdev->pdev->dev, args) + +#define vdev_netinfo(args...) netdev_info(vnic_get_netdev(vdev), args) +#define vdev_netwarn(args...) netdev_warn(vnic_get_netdev(vdev), args) +#define vdev_neterr(args...) netdev_err(vnic_get_netdev(vdev), args) + static inline struct device *enic_get_dev(struct enic *enic) { return &(enic->pdev->dev); diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c index d106186f4..3c677ed3c 100644 --- a/drivers/net/ethernet/cisco/enic/enic_clsf.c +++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c @@ -177,7 +177,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, int res, i; enic = netdev_priv(dev); - res = skb_flow_dissect_flow_keys(skb, &keys); + res = skb_flow_dissect_flow_keys(skb, &keys, 0); if (!res || keys.basic.n_proto != htons(ETH_P_IP) || (keys.basic.ip_proto != IPPROTO_TCP && keys.basic.ip_proto != IPPROTO_UDP)) diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index f3f1601a7..f44a39c40 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -224,7 +224,8 @@ static int enic_get_coalesce(struct net_device *netdev, struct enic *enic = netdev_priv(netdev); struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting; - ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) + ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; if (rxcoal->use_adaptive_rx_coalesce) ecmd->use_adaptive_rx_coalesce = 1; @@ -234,6 +235,53 @@ static int enic_get_coalesce(struct net_device *netdev, return 0; } +static int enic_coalesce_valid(struct enic *enic, + struct ethtool_coalesce *ec) +{ + u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev); + u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max, + ec->rx_coalesce_usecs_high); + u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max, + ec->rx_coalesce_usecs_low); + + if (ec->rx_max_coalesced_frames || + ec->rx_coalesce_usecs_irq || + ec->rx_max_coalesced_frames_irq || + ec->tx_max_coalesced_frames || + ec->tx_coalesce_usecs_irq || + ec->tx_max_coalesced_frames_irq || + ec->stats_block_coalesce_usecs || + ec->use_adaptive_tx_coalesce || + ec->pkt_rate_low || + ec->rx_max_coalesced_frames_low || + ec->tx_coalesce_usecs_low || + ec->tx_max_coalesced_frames_low || + ec->pkt_rate_high || + ec->rx_max_coalesced_frames_high || + ec->tx_coalesce_usecs_high || + ec->tx_max_coalesced_frames_high || + ec->rate_sample_interval) + return -EINVAL; + + if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) && + ec->tx_coalesce_usecs) + return -EINVAL; + + if ((ec->tx_coalesce_usecs > coalesce_usecs_max) || + (ec->rx_coalesce_usecs > coalesce_usecs_max) || + (ec->rx_coalesce_usecs_low > coalesce_usecs_max) || + (ec->rx_coalesce_usecs_high > coalesce_usecs_max)) + netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n", + coalesce_usecs_max); + + if (ec->rx_coalesce_usecs_high && + (rx_coalesce_usecs_high < + rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF)) + return -EINVAL; + + return 0; +} + static int enic_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) { @@ -244,8 +292,12 @@ static int enic_set_coalesce(struct net_device *netdev, u32 rx_coalesce_usecs_high; u32 coalesce_usecs_max; unsigned int i, intr; + int ret; struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting; + ret = enic_coalesce_valid(enic, ecmd); + if (ret) + return ret; coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev); tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs, coalesce_usecs_max); @@ -257,59 +309,24 @@ static int enic_set_coalesce(struct net_device *netdev, rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high, coalesce_usecs_max); - switch (vnic_dev_get_intr_mode(enic->vdev)) { - case VNIC_DEV_INTR_MODE_INTX: - if (tx_coalesce_usecs != rx_coalesce_usecs) - return -EINVAL; - if (ecmd->use_adaptive_rx_coalesce || - ecmd->rx_coalesce_usecs_low || - ecmd->rx_coalesce_usecs_high) - return -EINVAL; - - intr = enic_legacy_io_intr(); - vnic_intr_coalescing_timer_set(&enic->intr[intr], - tx_coalesce_usecs); - break; - case VNIC_DEV_INTR_MODE_MSI: - if (tx_coalesce_usecs != rx_coalesce_usecs) - return -EINVAL; - if (ecmd->use_adaptive_rx_coalesce || - ecmd->rx_coalesce_usecs_low || - ecmd->rx_coalesce_usecs_high) - return -EINVAL; - - vnic_intr_coalescing_timer_set(&enic->intr[0], - tx_coalesce_usecs); - break; - case VNIC_DEV_INTR_MODE_MSIX: - if (ecmd->rx_coalesce_usecs_high && - (rx_coalesce_usecs_high < - rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF)) - return -EINVAL; - + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { for (i = 0; i < enic->wq_count; i++) { intr = enic_msix_wq_intr(enic, i); vnic_intr_coalescing_timer_set(&enic->intr[intr], - tx_coalesce_usecs); - } - - rxcoal->use_adaptive_rx_coalesce = - !!ecmd->use_adaptive_rx_coalesce; - if (!rxcoal->use_adaptive_rx_coalesce) - enic_intr_coal_set_rx(enic, rx_coalesce_usecs); - - if (ecmd->rx_coalesce_usecs_high) { - rxcoal->range_end = rx_coalesce_usecs_high; - rxcoal->small_pkt_range_start = rx_coalesce_usecs_low; - rxcoal->large_pkt_range_start = rx_coalesce_usecs_low + - ENIC_AIC_LARGE_PKT_DIFF; + tx_coalesce_usecs); } - break; - default: - break; + enic->tx_coalesce_usecs = tx_coalesce_usecs; + } + rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce; + if (!rxcoal->use_adaptive_rx_coalesce) + enic_intr_coal_set_rx(enic, rx_coalesce_usecs); + if (ecmd->rx_coalesce_usecs_high) { + rxcoal->range_end = rx_coalesce_usecs_high; + rxcoal->small_pkt_range_start = rx_coalesce_usecs_low; + rxcoal->large_pkt_range_start = rx_coalesce_usecs_low + + ENIC_AIC_LARGE_PKT_DIFF; } - enic->tx_coalesce_usecs = tx_coalesce_usecs; enic->rx_coalesce_usecs = rx_coalesce_usecs; return 0; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 918a8e421..3352d027a 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1149,6 +1149,64 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, return 0; } +static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) +{ + unsigned int intr = enic_msix_rq_intr(enic, rq->index); + struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + u32 timer = cq->tobe_rx_coal_timeval; + + if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) { + vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); + cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval; + } +} + +static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) +{ + struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; + struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter; + int index; + u32 timer; + u32 range_start; + u32 traffic; + u64 delta; + ktime_t now = ktime_get(); + + delta = ktime_us_delta(now, cq->prev_ts); + if (delta < ENIC_AIC_TS_BREAK) + return; + cq->prev_ts = now; + + traffic = pkt_size_counter->large_pkt_bytes_cnt + + pkt_size_counter->small_pkt_bytes_cnt; + /* The table takes Mbps + * traffic *= 8 => bits + * traffic *= (10^6 / delta) => bps + * traffic /= 10^6 => Mbps + * + * Combining, traffic *= (8 / delta) + */ + + traffic <<= 3; + traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta; + + for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++) + if (traffic < mod_table[index].rx_rate) + break; + range_start = (pkt_size_counter->small_pkt_bytes_cnt > + pkt_size_counter->large_pkt_bytes_cnt << 1) ? + rx_coal->small_pkt_range_start : + rx_coal->large_pkt_range_start; + timer = range_start + ((rx_coal->range_end - range_start) * + mod_table[index].range_percent / 100); + /* Damping */ + cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1; + + pkt_size_counter->large_pkt_bytes_cnt = 0; + pkt_size_counter->small_pkt_bytes_cnt = 0; +} + static int enic_poll(struct napi_struct *napi, int budget) { struct net_device *netdev = napi->dev; @@ -1199,6 +1257,11 @@ static int enic_poll(struct napi_struct *napi, int budget) if (err) rq_work_done = rq_work_to_do; + if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) + /* Call the function which refreshes the intr coalescing timer + * value based on the traffic. + */ + enic_calc_int_moderation(enic, &enic->rq[0]); if (rq_work_done < rq_work_to_do) { @@ -1207,70 +1270,14 @@ static int enic_poll(struct napi_struct *napi, int budget) */ napi_complete(napi); + if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) + enic_set_int_moderation(enic, &enic->rq[0]); vnic_intr_unmask(&enic->intr[intr]); } return rq_work_done; } -static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) -{ - unsigned int intr = enic_msix_rq_intr(enic, rq->index); - struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; - u32 timer = cq->tobe_rx_coal_timeval; - - if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) { - vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); - cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval; - } -} - -static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) -{ - struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; - struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; - struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter; - int index; - u32 timer; - u32 range_start; - u32 traffic; - u64 delta; - ktime_t now = ktime_get(); - - delta = ktime_us_delta(now, cq->prev_ts); - if (delta < ENIC_AIC_TS_BREAK) - return; - cq->prev_ts = now; - - traffic = pkt_size_counter->large_pkt_bytes_cnt + - pkt_size_counter->small_pkt_bytes_cnt; - /* The table takes Mbps - * traffic *= 8 => bits - * traffic *= (10^6 / delta) => bps - * traffic /= 10^6 => Mbps - * - * Combining, traffic *= (8 / delta) - */ - - traffic <<= 3; - traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta; - - for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++) - if (traffic < mod_table[index].rx_rate) - break; - range_start = (pkt_size_counter->small_pkt_bytes_cnt > - pkt_size_counter->large_pkt_bytes_cnt << 1) ? - rx_coal->small_pkt_range_start : - rx_coal->large_pkt_range_start; - timer = range_start + ((rx_coal->range_end - range_start) * - mod_table[index].range_percent / 100); - /* Damping */ - cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1; - - pkt_size_counter->large_pkt_bytes_cnt = 0; - pkt_size_counter->small_pkt_bytes_cnt = 0; -} - #ifdef CONFIG_RFS_ACCEL static void enic_free_rx_cpu_rmap(struct enic *enic) { @@ -1407,10 +1414,8 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) if (err) work_done = work_to_do; if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) - /* Call the function which refreshes - * the intr coalescing timer value based on - * the traffic. This is supported only in - * the case of MSI-x mode + /* Call the function which refreshes the intr coalescing timer + * value based on the traffic. */ enic_calc_int_moderation(enic, &enic->rq[rq]); @@ -1569,12 +1574,6 @@ static void enic_set_rx_coal_setting(struct enic *enic) int index = -1; struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; - /* If intr mode is not MSIX, do not do adaptive coalescing */ - if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) { - netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing"); - return; - } - /* 1. Read the link speed from fw * 2. Pick the default range for the speed * 3. Update it in enic->rx_coalesce_setting @@ -2485,6 +2484,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_iounmap; } + err = vnic_devcmd_init(enic->vdev); + + if (err) + goto err_out_vnic_unregister; + #ifdef CONFIG_PCI_IOV /* Get number of subvnics */ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); @@ -2659,8 +2663,8 @@ err_out_disable_sriov_pp: pci_disable_sriov(pdev); enic->priv_flags &= ~ENIC_SRIOV_ENABLED; } -err_out_vnic_unregister: #endif +err_out_vnic_unregister: vnic_dev_unregister(enic->vdev); err_out_iounmap: enic_iounmap(enic); diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c index 0daa1c707..abeda2a9e 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_cq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c @@ -24,6 +24,7 @@ #include "vnic_dev.h" #include "vnic_cq.h" +#include "enic.h" void vnic_cq_free(struct vnic_cq *cq) { @@ -42,7 +43,7 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); if (!cq->ctrl) { - pr_err("Failed to hook CQ[%d] resource\n", index); + vdev_err("Failed to hook CQ[%d] resource\n", index); return -EINVAL; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 62f7b7baf..a3badefaf 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -27,46 +27,9 @@ #include "vnic_resource.h" #include "vnic_devcmd.h" #include "vnic_dev.h" +#include "vnic_wq.h" #include "vnic_stats.h" - -enum vnic_proxy_type { - PROXY_NONE, - PROXY_BY_BDF, - PROXY_BY_INDEX, -}; - -struct vnic_res { - void __iomem *vaddr; - dma_addr_t bus_addr; - unsigned int count; -}; - -struct vnic_intr_coal_timer_info { - u32 mul; - u32 div; - u32 max_usec; -}; - -struct vnic_dev { - void *priv; - struct pci_dev *pdev; - struct vnic_res res[RES_TYPE_MAX]; - enum vnic_dev_intr_mode intr_mode; - struct vnic_devcmd __iomem *devcmd; - struct vnic_devcmd_notify *notify; - struct vnic_devcmd_notify notify_copy; - dma_addr_t notify_pa; - u32 notify_sz; - dma_addr_t linkstatus_pa; - struct vnic_stats *stats; - dma_addr_t stats_pa; - struct vnic_devcmd_fw_info *fw_info; - dma_addr_t fw_info_pa; - enum vnic_proxy_type proxy; - u32 proxy_index; - u64 args[VNIC_DEVCMD_NARGS]; - struct vnic_intr_coal_timer_info intr_coal_timer_info; -}; +#include "enic.h" #define VNIC_MAX_RES_HDR_SIZE \ (sizeof(struct vnic_resource_header) + \ @@ -90,14 +53,14 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, return -EINVAL; if (bar->len < VNIC_MAX_RES_HDR_SIZE) { - pr_err("vNIC BAR0 res hdr length error\n"); + vdev_err("vNIC BAR0 res hdr length error\n"); return -EINVAL; } rh = bar->vaddr; mrh = bar->vaddr; if (!rh) { - pr_err("vNIC BAR0 res hdr not mem-mapped\n"); + vdev_err("vNIC BAR0 res hdr not mem-mapped\n"); return -EINVAL; } @@ -106,11 +69,10 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, (ioread32(&rh->version) != VNIC_RES_VERSION)) { if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) || (ioread32(&mrh->version) != MGMTVNIC_VERSION)) { - pr_err("vNIC BAR0 res magic/version error " - "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", - VNIC_RES_MAGIC, VNIC_RES_VERSION, - MGMTVNIC_MAGIC, MGMTVNIC_VERSION, - ioread32(&rh->magic), ioread32(&rh->version)); + vdev_err("vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", + VNIC_RES_MAGIC, VNIC_RES_VERSION, + MGMTVNIC_MAGIC, MGMTVNIC_VERSION, + ioread32(&rh->magic), ioread32(&rh->version)); return -EINVAL; } } @@ -144,17 +106,15 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, /* each count is stride bytes long */ len = count * VNIC_RES_STRIDE; if (len + bar_offset > bar[bar_num].len) { - pr_err("vNIC BAR0 resource %d " - "out-of-bounds, offset 0x%x + " - "size 0x%x > bar len 0x%lx\n", - type, bar_offset, - len, - bar[bar_num].len); + vdev_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n", + type, bar_offset, len, + bar[bar_num].len); return -EINVAL; } break; case RES_TYPE_INTR_PBA_LEGACY: case RES_TYPE_DEVCMD: + case RES_TYPE_DEVCMD2: len = count; break; default: @@ -238,8 +198,8 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, &ring->base_addr_unaligned); if (!ring->descs_unaligned) { - pr_err("Failed to allocate ring (size=%d), aborting\n", - (int)ring->size); + vdev_err("Failed to allocate ring (size=%d), aborting\n", + (int)ring->size); return -ENOMEM; } @@ -281,7 +241,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, return -ENODEV; } if (status & STAT_BUSY) { - pr_err("Busy devcmd %d\n", _CMD_N(cmd)); + vdev_neterr("Busy devcmd %d\n", _CMD_N(cmd)); return -EBUSY; } @@ -315,8 +275,8 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, return -err; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) - pr_err("Error %d devcmd %d\n", - err, _CMD_N(cmd)); + vdev_neterr("Error %d devcmd %d\n", + err, _CMD_N(cmd)); return -err; } @@ -330,10 +290,162 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, } } - pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); + vdev_neterr("Timedout devcmd %d\n", _CMD_N(cmd)); return -ETIMEDOUT; } +static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait) +{ + struct devcmd2_controller *dc2c = vdev->devcmd2; + struct devcmd2_result *result = dc2c->result + dc2c->next_result; + unsigned int i; + int delay, err; + u32 fetch_index, new_posted; + u32 posted = dc2c->posted; + + fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index); + + if (fetch_index == 0xFFFFFFFF) + return -ENODEV; + + new_posted = (posted + 1) % DEVCMD2_RING_SIZE; + + if (new_posted == fetch_index) { + vdev_neterr("devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n", + _CMD_N(cmd), fetch_index, posted); + return -EBUSY; + } + dc2c->cmd_ring[posted].cmd = cmd; + dc2c->cmd_ring[posted].flags = 0; + + if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) + dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT; + if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + dc2c->cmd_ring[posted].args[i] = vdev->args[i]; + + /* Adding write memory barrier prevents compiler and/or CPU reordering, + * thus avoiding descriptor posting before descriptor is initialized. + * Otherwise, hardware can read stale descriptor fields. + */ + wmb(); + iowrite32(new_posted, &dc2c->wq_ctrl->posted_index); + dc2c->posted = new_posted; + + if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) + return 0; + + for (delay = 0; delay < wait; delay++) { + if (result->color == dc2c->color) { + dc2c->next_result++; + if (dc2c->next_result == dc2c->result_size) { + dc2c->next_result = 0; + dc2c->color = dc2c->color ? 0 : 1; + } + if (result->error) { + err = result->error; + if (err != ERR_ECMDUNKNOWN || + cmd != CMD_CAPABILITY) + vdev_neterr("Error %d devcmd %d\n", + err, _CMD_N(cmd)); + return -err; + } + if (_CMD_DIR(cmd) & _CMD_DIR_READ) + for (i = 0; i < VNIC_DEVCMD2_NARGS; i++) + vdev->args[i] = result->results[i]; + + return 0; + } + udelay(100); + } + + vdev_neterr("devcmd %d timed out\n", _CMD_N(cmd)); + + return -ETIMEDOUT; +} + +static int vnic_dev_init_devcmd1(struct vnic_dev *vdev) +{ + vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); + if (!vdev->devcmd) + return -ENODEV; + vdev->devcmd_rtn = _vnic_dev_cmd; + + return 0; +} + +static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) +{ + int err; + unsigned int fetch_index; + + if (vdev->devcmd2) + return 0; + + vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_KERNEL); + if (!vdev->devcmd2) + return -ENOMEM; + + vdev->devcmd2->color = 1; + vdev->devcmd2->result_size = DEVCMD2_RING_SIZE; + err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE, + DEVCMD2_DESC_SIZE); + if (err) + goto err_free_devcmd2; + + fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ + vdev_err("Fatal error in devcmd2 init - hardware surprise removal"); + + return -ENODEV; + } + + enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0, + 0); + vdev->devcmd2->posted = fetch_index; + vnic_wq_enable(&vdev->devcmd2->wq); + + err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring, + DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); + if (err) + goto err_free_wq; + + vdev->devcmd2->result = vdev->devcmd2->results_ring.descs; + vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs; + vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl; + vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr | + VNIC_PADDR_TARGET; + vdev->args[1] = DEVCMD2_RING_SIZE; + + err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000); + if (err) + goto err_free_desc_ring; + + vdev->devcmd_rtn = _vnic_dev_cmd2; + + return 0; + +err_free_desc_ring: + vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); +err_free_wq: + vnic_wq_disable(&vdev->devcmd2->wq); + vnic_wq_free(&vdev->devcmd2->wq); +err_free_devcmd2: + kfree(vdev->devcmd2); + vdev->devcmd2 = NULL; + + return err; +} + +static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev) +{ + vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); + vnic_wq_disable(&vdev->devcmd2->wq); + vnic_wq_free(&vdev->devcmd2->wq); + kfree(vdev->devcmd2); +} + static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) @@ -348,7 +460,7 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, vdev->args[2] = *a0; vdev->args[3] = *a1; - err = _vnic_dev_cmd(vdev, proxy_cmd, wait); + err = vdev->devcmd_rtn(vdev, proxy_cmd, wait); if (err) return err; @@ -357,7 +469,8 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, err = (int)vdev->args[1]; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) - pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd)); + vdev_neterr("Error %d proxy devcmd %d\n", err, + _CMD_N(cmd)); return err; } @@ -375,7 +488,7 @@ static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, vdev->args[0] = *a0; vdev->args[1] = *a1; - err = _vnic_dev_cmd(vdev, cmd, wait); + err = vdev->devcmd_rtn(vdev, cmd, wait); *a0 = vdev->args[0]; *a1 = vdev->args[1]; @@ -650,7 +763,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); if (err) - pr_err("Can't set packet filter\n"); + vdev_neterr("Can't set packet filter\n"); return err; } @@ -667,7 +780,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr) err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); if (err) - pr_err("Can't add addr [%pM], %d\n", addr, err); + vdev_neterr("Can't add addr [%pM], %d\n", addr, err); return err; } @@ -684,7 +797,7 @@ int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr) err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); if (err) - pr_err("Can't del addr [%pM], %d\n", addr, err); + vdev_neterr("Can't del addr [%pM], %d\n", addr, err); return err; } @@ -728,7 +841,7 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) dma_addr_t notify_pa; if (vdev->notify || vdev->notify_pa) { - pr_err("notify block %p still allocated", vdev->notify); + vdev_neterr("notify block %p still allocated", vdev->notify); return -EINVAL; } @@ -838,7 +951,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev) memset(vdev->args, 0, sizeof(vdev->args)); if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT)) - err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait); + err = vdev->devcmd_rtn(vdev, CMD_INTR_COAL_CONVERT, wait); else err = ERR_ECMDUNKNOWN; @@ -847,7 +960,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev) */ if ((err == ERR_ECMDUNKNOWN) || (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) { - pr_warn("Using default conversion factor for interrupt coalesce timer\n"); + vdev_netwarn("Using default conversion factor for interrupt coalesce timer\n"); vnic_dev_intr_coal_timer_info_default(vdev); return 0; } @@ -938,6 +1051,9 @@ void vnic_dev_unregister(struct vnic_dev *vdev) pci_free_consistent(vdev->pdev, sizeof(struct vnic_devcmd_fw_info), vdev->fw_info, vdev->fw_info_pa); + if (vdev->devcmd2) + vnic_dev_deinit_devcmd2(vdev); + kfree(vdev); } } @@ -959,10 +1075,6 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, if (vnic_dev_discover_res(vdev, bar, num_bars)) goto err_out; - vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); - if (!vdev->devcmd) - goto err_out; - return vdev; err_out: @@ -977,6 +1089,29 @@ struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev) } EXPORT_SYMBOL(vnic_dev_get_pdev); +int vnic_devcmd_init(struct vnic_dev *vdev) +{ + void __iomem *res; + int err; + + res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); + if (res) { + err = vnic_dev_init_devcmd2(vdev); + if (err) + vdev_warn("DEVCMD2 init failed: %d, Using DEVCMD1", + err); + else + return 0; + } else { + vdev_warn("DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n"); + } + err = vnic_dev_init_devcmd1(vdev); + if (err) + vdev_err("DEVCMD1 initialization failed: %d", err); + + return err; +} + int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len) { u64 a0, a1 = len; diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h index 1fb214efc..b013b6a78 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.h +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h @@ -70,7 +70,48 @@ struct vnic_dev_ring { unsigned int desc_avail; }; -struct vnic_dev; +enum vnic_proxy_type { + PROXY_NONE, + PROXY_BY_BDF, + PROXY_BY_INDEX, +}; + +struct vnic_res { + void __iomem *vaddr; + dma_addr_t bus_addr; + unsigned int count; +}; + +struct vnic_intr_coal_timer_info { + u32 mul; + u32 div; + u32 max_usec; +}; + +struct vnic_dev { + void *priv; + struct pci_dev *pdev; + struct vnic_res res[RES_TYPE_MAX]; + enum vnic_dev_intr_mode intr_mode; + struct vnic_devcmd __iomem *devcmd; + struct vnic_devcmd_notify *notify; + struct vnic_devcmd_notify notify_copy; + dma_addr_t notify_pa; + u32 notify_sz; + dma_addr_t linkstatus_pa; + struct vnic_stats *stats; + dma_addr_t stats_pa; + struct vnic_devcmd_fw_info *fw_info; + dma_addr_t fw_info_pa; + enum vnic_proxy_type proxy; + u32 proxy_index; + u64 args[VNIC_DEVCMD_NARGS]; + struct vnic_intr_coal_timer_info intr_coal_timer_info; + struct devcmd2_controller *devcmd2; + int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait); +}; + struct vnic_stats; void *vnic_dev_priv(struct vnic_dev *vdev); @@ -135,5 +176,6 @@ int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status); int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, struct filter *data); +int vnic_devcmd_init(struct vnic_dev *vdev); #endif /* _VNIC_DEV_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h index 435d0cd96..2a812880b 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h +++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h @@ -365,6 +365,12 @@ enum vnic_devcmd_cmd { */ CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56), + /* Initialization for the devcmd2 interface. + * in: (u64) a0 = host result buffer physical address + * in: (u16) a1 = number of entries in result buffer + */ + CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57), + /* Add a filter. * in: (u64) a0= filter address * (u32) a1= size of filter @@ -629,4 +635,26 @@ struct vnic_devcmd { u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */ }; +#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */ + +#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS +struct vnic_devcmd2 { + u16 pad; + u16 flags; + u32 cmd; + u64 args[VNIC_DEVCMD2_NARGS]; +}; + +#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS +struct devcmd2_result { + u64 results[VNIC_DEVCMD2_NRESULTS]; + u32 pad; + u16 completed_index; + u8 error; + u8 color; +}; + +#define DEVCMD2_RING_SIZE 32 +#define DEVCMD2_DESC_SIZE 128 + #endif /* _VNIC_DEVCMD_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.c b/drivers/net/ethernet/cisco/enic/vnic_intr.c index 0ca107f7b..942759d9c 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_intr.c +++ b/drivers/net/ethernet/cisco/enic/vnic_intr.c @@ -25,6 +25,7 @@ #include "vnic_dev.h" #include "vnic_intr.h" +#include "enic.h" void vnic_intr_free(struct vnic_intr *intr) { @@ -39,7 +40,7 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); if (!intr->ctrl) { - pr_err("Failed to hook INTR[%d].ctrl resource\n", index); + vdev_err("Failed to hook INTR[%d].ctrl resource\n", index); return -EINVAL; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_resource.h b/drivers/net/ethernet/cisco/enic/vnic_resource.h index e0a73f1ca..4e45f88ac 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_resource.h +++ b/drivers/net/ethernet/cisco/enic/vnic_resource.h @@ -48,6 +48,13 @@ enum vnic_res_type { RES_TYPE_RSVD7, RES_TYPE_DEVCMD, /* Device command region */ RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */ + RES_TYPE_SUBVNIC, /* subvnic resource type */ + RES_TYPE_MQ_WQ, /* MQ Work queues */ + RES_TYPE_MQ_RQ, /* MQ Receive queues */ + RES_TYPE_MQ_CQ, /* MQ Completion queues */ + RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */ + RES_TYPE_DEPRECATED2, /* Old version of devcmd 2 */ + RES_TYPE_DEVCMD2, /* Device control region */ RES_TYPE_MAX, /* Count of resource types */ }; diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c index c4b2183bf..cce2777df 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c @@ -26,6 +26,7 @@ #include "vnic_dev.h" #include "vnic_rq.h" +#include "enic.h" static int vnic_rq_alloc_bufs(struct vnic_rq *rq) { @@ -91,7 +92,7 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index); if (!rq->ctrl) { - pr_err("Failed to hook RQ[%d] resource\n", index); + vdev_err("Failed to hook RQ[%d] resource\n", index); return -EINVAL; } @@ -167,6 +168,7 @@ void vnic_rq_enable(struct vnic_rq *rq) int vnic_rq_disable(struct vnic_rq *rq) { unsigned int wait; + struct vnic_dev *vdev = rq->vdev; iowrite32(0, &rq->ctrl->enable); @@ -177,7 +179,7 @@ int vnic_rq_disable(struct vnic_rq *rq) udelay(10); } - pr_err("Failed to disable RQ[%d]\n", rq->index); + vdev_neterr("Failed to disable RQ[%d]\n", rq->index); return -ETIMEDOUT; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c index b5a1c937f..05ad16a7e 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_wq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c @@ -26,6 +26,7 @@ #include "vnic_dev.h" #include "vnic_wq.h" +#include "enic.h" static int vnic_wq_alloc_bufs(struct vnic_wq *wq) { @@ -94,7 +95,7 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); if (!wq->ctrl) { - pr_err("Failed to hook WQ[%d] resource\n", index); + vdev_err("Failed to hook WQ[%d] resource\n", index); return -EINVAL; } @@ -113,10 +114,27 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, return 0; } -static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, - unsigned int fetch_index, unsigned int posted_index, - unsigned int error_interrupt_enable, - unsigned int error_interrupt_offset) +int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + wq->index = 0; + wq->vdev = vdev; + + wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); + if (!wq->ctrl) + return -EINVAL; + vnic_wq_disable(wq); + err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); + + return err; +} + +void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) { u64 paddr; unsigned int count = wq->ring.desc_count; @@ -140,7 +158,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { - vnic_wq_init_start(wq, cq_index, 0, 0, + enic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable, error_interrupt_offset); } @@ -158,6 +176,7 @@ void vnic_wq_enable(struct vnic_wq *wq) int vnic_wq_disable(struct vnic_wq *wq) { unsigned int wait; + struct vnic_dev *vdev = wq->vdev; iowrite32(0, &wq->ctrl->enable); @@ -168,7 +187,7 @@ int vnic_wq_disable(struct vnic_wq *wq) udelay(10); } - pr_err("Failed to disable WQ[%d]\n", wq->index); + vdev_neterr("Failed to disable WQ[%d]\n", wq->index); return -ETIMEDOUT; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h index 296154351..01209613d 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_wq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h @@ -88,6 +88,18 @@ struct vnic_wq { unsigned int pkts_outstanding; }; +struct devcmd2_controller { + struct vnic_wq_ctrl __iomem *wq_ctrl; + struct vnic_devcmd2 *cmd_ring; + struct devcmd2_result *result; + u16 next_result; + u16 result_size; + int color; + struct vnic_dev_ring results_ring; + struct vnic_wq wq; + u32 posted; +}; + static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) { /* how many does SW own? */ @@ -174,5 +186,11 @@ void vnic_wq_enable(struct vnic_wq *wq); int vnic_wq_disable(struct vnic_wq *wq); void vnic_wq_clean(struct vnic_wq *wq, void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); +int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size); +void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); #endif /* _VNIC_WQ_H_ */ diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index c0a781360..cf94b72db 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1226,7 +1226,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id) if (int_status & ISR_PRS) dm9000_rx(dev); - /* Trnasmit Interrupt check */ + /* Transmit Interrupt check */ if (int_status & ISR_PTS) dm9000_tx_done(dev, db); diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c index d1017509b..f7b424839 100644 --- a/drivers/net/ethernet/ec_bhf.c +++ b/drivers/net/ethernet/ec_bhf.c @@ -604,19 +604,7 @@ static struct pci_driver pci_driver = { .probe = ec_bhf_probe, .remove = ec_bhf_remove, }; - -static int __init ec_bhf_init(void) -{ - return pci_register_driver(&pci_driver); -} - -static void __exit ec_bhf_exit(void) -{ - pci_unregister_driver(&pci_driver); -} - -module_init(ec_bhf_init); -module_exit(ec_bhf_exit); +module_pci_driver(pci_driver); module_param(polling_frequency, long, S_IRUGO); MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns"); diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 8d12b41b3..d463563e1 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -37,7 +37,7 @@ #include "be_hw.h" #include "be_roce.h" -#define DRV_VER "10.6.0.2" +#define DRV_VER "10.6.0.3" #define DRV_NAME "be2net" #define BE_NAME "Emulex BladeEngine2" #define BE3_NAME "Emulex BladeEngine3" @@ -105,6 +105,8 @@ #define MAX_VFS 30 /* Max VFs supported by BE3 FW */ #define FW_VER_LEN 32 +#define CNTL_SERIAL_NUM_WORDS 8 /* Controller serial number words */ +#define CNTL_SERIAL_NUM_WORD_SZ (sizeof(u16)) /* Byte-sz of serial num word */ #define RSS_INDIR_TABLE_LEN 128 #define RSS_HASH_KEY_LEN 40 @@ -228,6 +230,7 @@ struct be_mcc_obj { struct be_tx_stats { u64 tx_bytes; u64 tx_pkts; + u64 tx_vxlan_offload_pkts; u64 tx_reqs; u64 tx_compl; ulong tx_jiffies; @@ -275,6 +278,7 @@ struct be_rx_page_info { struct be_rx_stats { u64 rx_bytes; u64 rx_pkts; + u64 rx_vxlan_offload_pkts; u32 rx_drops_no_skbs; /* skb allocation errors */ u32 rx_drops_no_frags; /* HW has no fetched frags */ u32 rx_post_fail; /* page post alloc failures */ @@ -578,6 +582,7 @@ struct be_adapter { u16 pvid; __be16 vxlan_port; int vxlan_port_count; + int vxlan_port_aliases; struct phy_info phy; u8 wol_cap; bool wol_en; @@ -587,9 +592,11 @@ struct be_adapter { int be_get_temp_freq; struct be_hwmon hwmon_info; u8 pf_number; + u8 pci_func_num; struct rss_info rss_info; /* Filters for packets that need to be sent to BMC */ u32 bmc_filt_mask; + u16 serial_num[CNTL_SERIAL_NUM_WORDS]; }; #define be_physfn(adapter) (!adapter->virtfn) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 9eac3227d..1795c935f 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -88,19 +88,21 @@ static inline void *embedded_payload(struct be_mcc_wrb *wrb) return wrb->payload.embedded_payload; } -static void be_mcc_notify(struct be_adapter *adapter) +static int be_mcc_notify(struct be_adapter *adapter) { struct be_queue_info *mccq = &adapter->mcc_obj.q; u32 val = 0; if (be_check_error(adapter, BE_ERROR_ANY)) - return; + return -EIO; val |= mccq->id & DB_MCCQ_RING_ID_MASK; val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; wmb(); iowrite32(val, adapter->db + DB_MCCQ_OFFSET); + + return 0; } /* To check if valid bit is set, check the entire word as we don't know @@ -170,6 +172,12 @@ static void be_async_cmd_process(struct be_adapter *adapter, return; } + if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE && + subsystem == CMD_SUBSYSTEM_LOWLEVEL) { + complete(&adapter->et_cmd_compl); + return; + } + if ((opcode == OPCODE_COMMON_WRITE_FLASHROM || opcode == OPCODE_COMMON_WRITE_OBJECT) && subsystem == CMD_SUBSYSTEM_COMMON) { @@ -541,7 +549,9 @@ static int be_mcc_notify_wait(struct be_adapter *adapter) resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1); - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto out; status = be_mcc_wait_compl(adapter); if (status == -EIO) @@ -841,8 +851,10 @@ static int be_cmd_notify_wait(struct be_adapter *adapter, return status; dest_wrb = be_cmd_copy(adapter, wrb); - if (!dest_wrb) - return -EBUSY; + if (!dest_wrb) { + status = -EBUSY; + goto unlock; + } if (use_mcc(adapter)) status = be_mcc_notify_wait(adapter); @@ -852,6 +864,7 @@ static int be_cmd_notify_wait(struct be_adapter *adapter, if (!status) memcpy(wrb, dest_wrb, sizeof(*wrb)); +unlock: be_cmd_unlock(adapter); return status; } @@ -1547,7 +1560,10 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) else hdr->version = 2; - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto err; + adapter->stats_cmd_sent = true; err: @@ -1583,7 +1599,10 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); req->cmd_params.params.reset_stats = 0; - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto err; + adapter->stats_cmd_sent = true; err: @@ -1687,8 +1706,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter) OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req), wrb, NULL); - be_mcc_notify(adapter); - + status = be_mcc_notify(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; @@ -1860,7 +1878,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter, cpu_to_le32(set_eqd[i].delay_multiplier); } - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; @@ -1969,6 +1987,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) be_if_cap_flags(adapter)); } flags &= be_if_cap_flags(adapter); + if (!flags) + return -ENOTSUPP; return __be_cmd_rx_filter(adapter, flags, value); } @@ -2320,7 +2340,10 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + sizeof(struct lancer_cmd_req_write_object))); - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto err_unlock; + spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, @@ -2491,7 +2514,10 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, req->params.op_code = cpu_to_le32(flash_opcode); req->params.data_buf_size = cpu_to_le32(buf_size); - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto err_unlock; + spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, @@ -2585,7 +2611,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; - goto err; + goto err_unlock; } req = embedded_payload(wrb); @@ -2599,8 +2625,19 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, req->loopback_type = loopback_type; req->loopback_state = enable; - status = be_mcc_notify_wait(adapter); -err: + status = be_mcc_notify(adapter); + if (status) + goto err_unlock; + + spin_unlock_bh(&adapter->mcc_lock); + + if (!wait_for_completion_timeout(&adapter->et_cmd_compl, + msecs_to_jiffies(SET_LB_MODE_TIMEOUT))) + status = -ETIMEDOUT; + + return status; + +err_unlock: spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2636,7 +2673,9 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, req->num_pkts = cpu_to_le32(num_pkts); req->loopback_type = cpu_to_le32(loopback_type); - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto err; spin_unlock_bh(&adapter->mcc_lock); @@ -2818,10 +2857,11 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) struct be_mcc_wrb *wrb; struct be_cmd_req_cntl_attribs *req; struct be_cmd_resp_cntl_attribs *resp; - int status; + int status, i; int payload_len = max(sizeof(*req), sizeof(*resp)); struct mgmt_controller_attrib *attribs; struct be_dma_mem attribs_cmd; + u32 *serial_num; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; @@ -2852,6 +2892,11 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) if (!status) { attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); adapter->hba_port_num = attribs->hba_attribs.phy_port; + adapter->pci_func_num = attribs->pci_func_num; + serial_num = attribs->hba_attribs.controller_serial_number; + for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++) + adapter->serial_num[i] = le32_to_cpu(serial_num[i]) & + (BIT_MASK(16) - 1); } err: @@ -3670,7 +3715,6 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) status = -EINVAL; goto err; } - adapter->pf_number = desc->pf_num; be_copy_nic_desc(res, desc); } @@ -3682,7 +3726,10 @@ err: return status; } -/* Will use MBOX only if MCCQ has not been created */ +/* Will use MBOX only if MCCQ has not been created + * non-zero domain => a PF is querying this on behalf of a VF + * zero domain => a PF or a VF is querying this for itself + */ int be_cmd_get_profile_config(struct be_adapter *adapter, struct be_resources *res, u8 query, u8 domain) { @@ -3709,10 +3756,15 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, OPCODE_COMMON_GET_PROFILE_CONFIG, cmd.size, &wrb, &cmd); - req->hdr.domain = domain; if (!lancer_chip(adapter)) req->hdr.version = 1; req->type = ACTIVE_PROFILE_TYPE; + /* When a function is querying profile information relating to + * itself hdr.pf_number must be set to it's pci_func_num + 1 + */ + req->hdr.domain = domain; + if (domain == 0) + req->hdr.pf_num = adapter->pci_func_num + 1; /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the * descriptors with all bits set to "1" for the fields which can be @@ -3882,12 +3934,16 @@ static void be_fill_vf_res_template(struct be_adapter *adapter, vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS); } - - nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags); } else { num_vf_qs = 1; } + if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) { + nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); + vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS; + } + + nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags); nic_vft->rq_count = cpu_to_le16(num_vf_qs); nic_vft->txq_count = cpu_to_le16(num_vf_qs); nic_vft->rssq_count = cpu_to_le16(num_vf_qs); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 00e3a6b6b..91155ea74 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -289,7 +289,9 @@ struct be_cmd_req_hdr { u32 timeout; /* dword 1 */ u32 request_length; /* dword 2 */ u8 version; /* dword 3 */ - u8 rsvd[3]; /* dword 3 */ + u8 rsvd1; /* dword 3 */ + u8 pf_num; /* dword 3 */ + u8 rsvd2; /* dword 3 */ }; #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ @@ -1500,6 +1502,8 @@ struct be_cmd_resp_acpi_wol_magic_config_v1 { #define BE_PME_D3COLD_CAP 0x80 /********************** LoopBack test *********************/ +#define SET_LB_MODE_TIMEOUT 12000 + struct be_cmd_req_loopback_test { struct be_cmd_req_hdr hdr; u32 loopback_type; @@ -1640,15 +1644,21 @@ struct be_cmd_req_set_qos { struct mgmt_hba_attribs { u32 rsvd0[24]; u8 controller_model_number[32]; - u32 rsvd1[79]; - u8 rsvd2[3]; + u32 rsvd1[16]; + u32 controller_serial_number[8]; + u32 rsvd2[55]; + u8 rsvd3[3]; u8 phy_port; - u32 rsvd3[13]; + u32 rsvd4[13]; } __packed; struct mgmt_controller_attrib { struct mgmt_hba_attribs hba_attribs; - u32 rsvd0[10]; + u32 rsvd0[2]; + u16 rsvd1; + u8 pci_func_num; + u8 rsvd2; + u32 rsvd3[7]; } __packed; struct be_cmd_req_cntl_attribs { @@ -1763,6 +1773,7 @@ struct be_cmd_req_set_mac_list { /*********************** HSW Config ***********************/ #define PORT_FWD_TYPE_VEPA 0x3 #define PORT_FWD_TYPE_VEB 0x2 +#define PORT_FWD_TYPE_PASSTHRU 0x1 #define ENABLE_MAC_SPOOFCHK 0x2 #define DISABLE_MAC_SPOOFCHK 0x3 diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index b2476dbfd..2c9ed1710 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -138,6 +138,7 @@ static const struct be_ethtool_stat et_stats[] = { static const struct be_ethtool_stat et_rx_stats[] = { {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */ {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */ + {DRVSTAT_RX_INFO(rx_vxlan_offload_pkts)}, {DRVSTAT_RX_INFO(rx_compl)}, {DRVSTAT_RX_INFO(rx_compl_err)}, {DRVSTAT_RX_INFO(rx_mcast_pkts)}, @@ -190,6 +191,7 @@ static const struct be_ethtool_stat et_tx_stats[] = { {DRVSTAT_TX_INFO(tx_internal_parity_err)}, {DRVSTAT_TX_INFO(tx_bytes)}, {DRVSTAT_TX_INFO(tx_pkts)}, + {DRVSTAT_TX_INFO(tx_vxlan_offload_pkts)}, /* Number of skbs queued for trasmission by the driver */ {DRVSTAT_TX_INFO(tx_reqs)}, /* Number of times the TX queue was stopped due to lack @@ -847,10 +849,21 @@ err: static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, u64 *status) { - be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1); + int ret; + + ret = be_cmd_set_loopback(adapter, adapter->hba_port_num, + loopback_type, 1); + if (ret) + return ret; + *status = be_cmd_loopback_test(adapter, adapter->hba_port_num, loopback_type, 1500, 2, 0xabc); - be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1); + + ret = be_cmd_set_loopback(adapter, adapter->hba_port_num, + BE_NO_LOOPBACK, 1); + if (ret) + return ret; + return *status; } diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 6ca693b03..eb48a977f 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -681,11 +681,14 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status) static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb) { struct be_tx_stats *stats = tx_stats(txo); + u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1; u64_stats_update_begin(&stats->sync); stats->tx_reqs++; stats->tx_bytes += skb->len; - stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1); + stats->tx_pkts += tx_pkts; + if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) + stats->tx_vxlan_offload_pkts += tx_pkts; u64_stats_update_end(&stats->sync); } @@ -1120,11 +1123,12 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, struct sk_buff *skb, struct be_wrb_params *wrb_params) { - /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or - * less may cause a transmit stall on that port. So the work-around is - * to pad short packets (<= 32 bytes) to a 36-byte length. + /* Lancer, SH and BE3 in SRIOV mode have a bug wherein + * packets that are 32b or less may cause a transmit stall + * on that port. The workaround is to pad such packets + * (len <= 32 bytes) to a minimum length of 36b. */ - if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { + if (skb->len <= 32) { if (skb_put_padto(skb, 36)) return NULL; } @@ -1258,7 +1262,7 @@ static bool be_send_pkt_to_bmc(struct be_adapter *adapter, if (is_udp_pkt((*skb))) { struct udphdr *udp = udp_hdr((*skb)); - switch (udp->dest) { + switch (ntohs(udp->dest)) { case DHCP_CLIENT_PORT: os2bmc = is_dhcp_client_filt_enabled(adapter); goto done; @@ -1961,6 +1965,8 @@ static void be_rx_stats_update(struct be_rx_obj *rxo, stats->rx_compl++; stats->rx_bytes += rxcp->pkt_size; stats->rx_pkts++; + if (rxcp->tunneled) + stats->rx_vxlan_offload_pkts++; if (rxcp->pkt_type == BE_MULTICAST_PACKET) stats->rx_mcast_pkts++; if (rxcp->err) @@ -3610,15 +3616,15 @@ err: static int be_setup_wol(struct be_adapter *adapter, bool enable) { + struct device *dev = &adapter->pdev->dev; struct be_dma_mem cmd; - int status = 0; u8 mac[ETH_ALEN]; + int status; eth_zero_addr(mac); cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, - GFP_KERNEL); + cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); if (!cmd.va) return -ENOMEM; @@ -3627,24 +3633,18 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK); if (status) { - dev_err(&adapter->pdev->dev, - "Could not enable Wake-on-lan\n"); - dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, - cmd.dma); - return status; + dev_err(dev, "Could not enable Wake-on-lan\n"); + goto err; } - status = be_cmd_enable_magic_wol(adapter, - adapter->netdev->dev_addr, - &cmd); - pci_enable_wake(adapter->pdev, PCI_D3hot, 1); - pci_enable_wake(adapter->pdev, PCI_D3cold, 1); } else { - status = be_cmd_enable_magic_wol(adapter, mac, &cmd); - pci_enable_wake(adapter->pdev, PCI_D3hot, 0); - pci_enable_wake(adapter->pdev, PCI_D3cold, 0); + ether_addr_copy(mac, adapter->netdev->dev_addr); } - dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); + status = be_cmd_enable_magic_wol(adapter, mac, &cmd); + pci_enable_wake(adapter->pdev, PCI_D3hot, enable); + pci_enable_wake(adapter->pdev, PCI_D3cold, enable); +err: + dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma); return status; } @@ -4206,10 +4206,6 @@ static int be_get_config(struct be_adapter *adapter) int status, level; u16 profile_id; - status = be_cmd_get_cntl_attributes(adapter); - if (status) - return status; - status = be_cmd_query_fw_cfg(adapter); if (status) return status; @@ -4408,6 +4404,11 @@ static int be_setup(struct be_adapter *adapter) if (!lancer_chip(adapter)) be_cmd_req_native_mode(adapter); + /* Need to invoke this cmd first to get the PCI Function Number */ + status = be_cmd_get_cntl_attributes(adapter); + if (status) + return status; + if (!BE2_chip(adapter) && be_physfn(adapter)) be_alloc_sriov_res(adapter); @@ -4977,7 +4978,7 @@ static bool be_check_ufi_compatibility(struct be_adapter *adapter, { if (!fhdr) { dev_err(&adapter->pdev->dev, "Invalid FW UFI file"); - return -1; + return false; } /* First letter of the build version is used to identify @@ -5000,7 +5001,15 @@ static bool be_check_ufi_compatibility(struct be_adapter *adapter, return false; } - return (fhdr->asic_type_rev >= adapter->asic_rev); + /* In BE3 FW images the "asic_type_rev" field doesn't track the + * asic_rev of the chips it is compatible with. + * When asic_type_rev is 0 the image is compatible only with + * pre-BE3-R chips (asic_rev < 0x10) + */ + if (BEx_chip(adapter) && fhdr->asic_type_rev == 0) + return adapter->asic_rev < 0x10; + else + return (fhdr->asic_type_rev >= adapter->asic_rev); } static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) @@ -5132,9 +5141,6 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, int status = 0; u8 hsw_mode; - if (!sriov_enabled(adapter)) - return 0; - /* BE and Lancer chips support VEB mode only */ if (BEx_chip(adapter) || lancer_chip(adapter)) { hsw_mode = PORT_FWD_TYPE_VEB; @@ -5144,6 +5150,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, NULL); if (status) return 0; + + if (hsw_mode == PORT_FWD_TYPE_PASSTHRU) + return 0; } return ndo_dflt_bridge_getlink(skb, pid, seq, dev, @@ -5177,6 +5186,11 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) return; + if (adapter->vxlan_port == port && adapter->vxlan_port_count) { + adapter->vxlan_port_aliases++; + return; + } + if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { dev_info(dev, "Only one UDP port supported for VxLAN offloads\n"); @@ -5227,6 +5241,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, if (adapter->vxlan_port != port) goto done; + if (adapter->vxlan_port_aliases) { + adapter->vxlan_port_aliases--; + return; + } + be_disable_vxlan_offloads(adapter); dev_info(&adapter->pdev->dev, @@ -5278,6 +5297,27 @@ static netdev_features_t be_features_check(struct sk_buff *skb, } #endif +static int be_get_phys_port_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1; + struct be_adapter *adapter = netdev_priv(dev); + u8 *id; + + if (MAX_PHYS_ITEM_ID_LEN < id_len) + return -ENOSPC; + + ppid->id[0] = adapter->hba_port_num + 1; + id = &ppid->id[1]; + for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0; + i--, id += CNTL_SERIAL_NUM_WORD_SZ) + memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ); + + ppid->id_len = id_len; + + return 0; +} + static const struct net_device_ops be_netdev_ops = { .ndo_open = be_open, .ndo_stop = be_close, @@ -5308,6 +5348,7 @@ static const struct net_device_ops be_netdev_ops = { .ndo_del_vxlan_port = be_del_vxlan_port, .ndo_features_check = be_features_check, #endif + .ndo_get_phys_port_id = be_get_phys_port_id, }; static void be_netdev_init(struct net_device *netdev) @@ -5866,7 +5907,6 @@ static int be_pci_resume(struct pci_dev *pdev) if (status) return status; - pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); status = be_resume(adapter); @@ -5946,7 +5986,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; pci_set_master(pdev); - pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* Check if card is ok and fw is ready */ diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 442410cd2..a2c96fd88 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1132,10 +1132,6 @@ static int ethoc_probe(struct platform_device *pdev) memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); priv->phy_id = pdata->phy_id; } else { - priv->phy_id = -1; - -#ifdef CONFIG_OF - { const uint8_t *mac; mac = of_get_property(pdev->dev.of_node, @@ -1143,8 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev) NULL); if (mac) memcpy(netdev->dev_addr, mac, IFHWADDRLEN); - } -#endif + priv->phy_id = -1; } /* Check that the given MAC address is valid. If it isn't, read the diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 24a85b292..63c2bcf80 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -150,6 +150,9 @@ static void nps_enet_tx_handler(struct net_device *ndev) if (!priv->tx_packet_sent || tx_ctrl.ct) return; + /* Ack Tx ctrl register */ + nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0); + /* Check Tx transmit error */ if (unlikely(tx_ctrl.et)) { ndev->stats.tx_errors++; @@ -158,11 +161,7 @@ static void nps_enet_tx_handler(struct net_device *ndev) ndev->stats.tx_bytes += tx_ctrl.nt; } - if (priv->tx_skb) { - dev_kfree_skb(priv->tx_skb); - priv->tx_skb = NULL; - } - + dev_kfree_skb(priv->tx_skb); priv->tx_packet_sent = false; if (netif_queue_stopped(ndev)) @@ -180,15 +179,16 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) { struct net_device *ndev = napi->dev; struct nps_enet_priv *priv = netdev_priv(ndev); - struct nps_enet_buf_int_enable buf_int_enable; u32 work_done; - buf_int_enable.rx_rdy = NPS_ENET_ENABLE; - buf_int_enable.tx_done = NPS_ENET_ENABLE; nps_enet_tx_handler(ndev); work_done = nps_enet_rx_handler(ndev); if (work_done < budget) { + struct nps_enet_buf_int_enable buf_int_enable; + napi_complete(napi); + buf_int_enable.rx_rdy = NPS_ENET_ENABLE; + buf_int_enable.tx_done = NPS_ENET_ENABLE; nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, buf_int_enable.value); } @@ -211,12 +211,13 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance) { struct net_device *ndev = dev_instance; struct nps_enet_priv *priv = netdev_priv(ndev); - struct nps_enet_buf_int_cause buf_int_cause; + struct nps_enet_rx_ctl rx_ctrl; + struct nps_enet_tx_ctl tx_ctrl; - buf_int_cause.value = - nps_enet_reg_get(priv, NPS_ENET_REG_BUF_INT_CAUSE); + rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); + tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); - if (buf_int_cause.tx_done || buf_int_cause.rx_rdy) + if ((!tx_ctrl.ct && priv->tx_packet_sent) || rx_ctrl.cr) if (likely(napi_schedule_prep(&priv->napi))) { nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); __napi_schedule(&priv->napi); @@ -307,11 +308,8 @@ static void nps_enet_hw_enable_control(struct net_device *ndev) /* Discard Packets bigger than max frame length */ max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN; - if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) { + if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) ge_mac_cfg_3->max_len = max_frame_length; - nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3, - ge_mac_cfg_3->value); - } /* Enable interrupts */ buf_int_enable.rx_rdy = NPS_ENET_ENABLE; @@ -339,11 +337,14 @@ static void nps_enet_hw_enable_control(struct net_device *ndev) ge_mac_cfg_0.tx_fc_en = NPS_ENET_ENABLE; ge_mac_cfg_0.rx_fc_en = NPS_ENET_ENABLE; ge_mac_cfg_0.tx_fc_retr = NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR; + ge_mac_cfg_3->cf_drop = NPS_ENET_ENABLE; /* Enable Rx and Tx */ ge_mac_cfg_0.rx_en = NPS_ENET_ENABLE; ge_mac_cfg_0.tx_en = NPS_ENET_ENABLE; + nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3, + ge_mac_cfg_3->value); nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0, ge_mac_cfg_0.value); } @@ -527,10 +528,10 @@ static netdev_tx_t nps_enet_start_xmit(struct sk_buff *skb, /* This driver handles one frame at a time */ netif_stop_queue(ndev); - nps_enet_send_frame(ndev, skb); - priv->tx_skb = skb; + nps_enet_send_frame(ndev, skb); + return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h index fc45c9daa..6703674d6 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.h +++ b/drivers/net/ethernet/ezchip/nps_enet.h @@ -36,7 +36,6 @@ #define NPS_ENET_REG_RX_CTL 0x810 #define NPS_ENET_REG_RX_BUF 0x818 #define NPS_ENET_REG_BUF_INT_ENABLE 0x8C0 -#define NPS_ENET_REG_BUF_INT_CAUSE 0x8C4 #define NPS_ENET_REG_GE_MAC_CFG_0 0x1000 #define NPS_ENET_REG_GE_MAC_CFG_1 0x1004 #define NPS_ENET_REG_GE_MAC_CFG_2 0x1008 @@ -108,25 +107,6 @@ struct nps_enet_buf_int_enable { }; }; -/* Interrupt cause for data buffer events register */ -struct nps_enet_buf_int_cause { - union { - /* tx_done: Interrupt in the case when current frame was - * read from TX buffer. - * rx_rdy: Interrupt in the case when new frame is ready - * in RX buffer. - */ - struct { - u32 - __reserved:30, - tx_done:1, - rx_rdy:1; - }; - - u32 value; - }; -}; - /* Gbps Eth MAC Configuration 0 register */ struct nps_enet_ge_mac_cfg_0 { union { diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index de63266de..dd4ca39d5 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -364,7 +364,7 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) return 0; } -static int +static struct bufdesc * fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, struct net_device *ndev) @@ -439,10 +439,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, bdp->cbd_sc = status; } - txq->cur_tx = bdp; - - return 0; - + return bdp; dma_mapping_error: bdp = txq->cur_tx; for (i = 0; i < frag; i++) { @@ -450,7 +447,7 @@ dma_mapping_error: dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, bdp->cbd_datlen, DMA_TO_DEVICE); } - return NETDEV_TX_OK; + return ERR_PTR(-ENOMEM); } static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, @@ -467,7 +464,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, unsigned int estatus = 0; unsigned int index; int entries_free; - int ret; entries_free = fec_enet_get_free_txdesc_num(fep, txq); if (entries_free < MAX_SKB_FRAGS + 1) { @@ -485,6 +481,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, /* Fill in a Tx ring entry */ bdp = txq->cur_tx; + last_bdp = bdp; status = bdp->cbd_sc; status &= ~BD_ENET_TX_STATS; @@ -513,9 +510,9 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, } if (nr_frags) { - ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev); - if (ret) - return ret; + last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); + if (IS_ERR(last_bdp)) + return NETDEV_TX_OK; } else { status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); if (fep->bufdesc_ex) { @@ -544,7 +541,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ebdp->cbd_esc = estatus; } - last_bdp = txq->cur_tx; index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); /* Save skb pointer */ txq->tx_skbuff[index] = skb; @@ -563,6 +559,10 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, skb_tx_timestamp(skb); + /* Make sure the update to bdp and tx_skbuff are performed before + * cur_tx. + */ + wmb(); txq->cur_tx = bdp; /* Trigger transmission start */ @@ -1218,10 +1218,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) /* get next bdp of dirty_tx */ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); - while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { - - /* current queue is empty */ - if (bdp == txq->cur_tx) + while (bdp != READ_ONCE(txq->cur_tx)) { + /* Order the load of cur_tx and cbd_sc */ + rmb(); + status = READ_ONCE(bdp->cbd_sc); + if (status & BD_ENET_TX_READY) break; index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); @@ -1275,6 +1276,10 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) /* Free the sk buffer associated with this last transmit */ dev_kfree_skb_any(skb); + /* Make sure the update to bdp and tx_skbuff are performed + * before dirty_tx + */ + wmb(); txq->dirty_tx = bdp; /* Update pointer to next buffer descriptor to be transmitted */ @@ -1775,7 +1780,7 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) int ret = 0; ret = pm_runtime_get_sync(dev); - if (IS_ERR_VALUE(ret)) + if (ret < 0) return ret; fep->mii_timeout = 0; @@ -1811,11 +1816,13 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, struct fec_enet_private *fep = bus->priv; struct device *dev = &fep->pdev->dev; unsigned long time_left; - int ret = 0; + int ret; ret = pm_runtime_get_sync(dev); - if (IS_ERR_VALUE(ret)) + if (ret < 0) return ret; + else + ret = 0; fep->mii_timeout = 0; reinit_completion(&fep->mdio_done); @@ -2866,7 +2873,7 @@ fec_enet_open(struct net_device *ndev) int ret; ret = pm_runtime_get_sync(&fep->pdev->dev); - if (IS_ERR_VALUE(ret)) + if (ret < 0) return ret; pinctrl_pm_select_default_state(&fep->pdev->dev); @@ -3024,6 +3031,14 @@ fec_set_mac_address(struct net_device *ndev, void *p) memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); } + /* Add netif status check here to avoid system hang in below case: + * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx; + * After ethx down, fec all clocks are gated off and then register + * access causes system hang. + */ + if (!netif_running(ndev)) + return 0; + writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), fep->hwp + FEC_ADDR_LOW); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index f457a23d0..1543cf0e8 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -506,12 +506,6 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) break; default: - /* - * register RXMTRL must be set in order to do V1 packets, - * therefore it is not possible to time stamp both V1 Sync and - * Delay_Req messages and hardware does not support - * timestamping all packets => return error - */ fep->hwts_rx_en = 1; config.rx_filter = HWTSTAMP_FILTER_ALL; break; diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 3c40f6b99..55c36230e 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -198,17 +198,28 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus) #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) /* + * Return the TBIPA address, starting from the address + * of the mapped GFAR MDIO registers (struct gfar) * This is mildly evil, but so is our hardware for doing this. * Also, we have to cast back to struct gfar because of * definition weirdness done in gianfar.h. */ -static uint32_t __iomem *get_gfar_tbipa(void __iomem *p) +static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p) { struct gfar __iomem *enet_regs = p; return &enet_regs->tbipa; } +/* + * Return the TBIPA address, starting from the address + * of the mapped GFAR MII registers (gfar_mii_regs[] within struct gfar) + */ +static uint32_t __iomem *get_gfar_tbipa_from_mii(void __iomem *p) +{ + return get_gfar_tbipa_from_mdio(container_of(p, struct gfar, gfar_mii_regs)); +} + /* * Return the TBIPAR address for an eTSEC2 node */ @@ -220,11 +231,12 @@ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p) #if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) /* - * Return the TBIPAR address for a QE MDIO node + * Return the TBIPAR address for a QE MDIO node, starting from the address + * of the mapped MII registers (struct fsl_pq_mii) */ static uint32_t __iomem *get_ucc_tbipa(void __iomem *p) { - struct fsl_pq_mdio __iomem *mdio = p; + struct fsl_pq_mdio __iomem *mdio = container_of(p, struct fsl_pq_mdio, mii); return &mdio->utbipar; } @@ -300,14 +312,14 @@ static const struct of_device_id fsl_pq_mdio_match[] = { .compatible = "fsl,gianfar-tbi", .data = &(struct fsl_pq_mdio_data) { .mii_offset = 0, - .get_tbipa = get_gfar_tbipa, + .get_tbipa = get_gfar_tbipa_from_mii, }, }, { .compatible = "fsl,gianfar-mdio", .data = &(struct fsl_pq_mdio_data) { .mii_offset = 0, - .get_tbipa = get_gfar_tbipa, + .get_tbipa = get_gfar_tbipa_from_mii, }, }, { @@ -315,7 +327,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = { .compatible = "gianfar", .data = &(struct fsl_pq_mdio_data) { .mii_offset = offsetof(struct fsl_pq_mdio, mii), - .get_tbipa = get_gfar_tbipa, + .get_tbipa = get_gfar_tbipa_from_mdio, }, }, { @@ -445,6 +457,16 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) tbipa = data->get_tbipa(priv->map); + /* + * Add consistency check to make sure TBI is contained + * within the mapped range (not because we would get a + * segfault, rather to catch bugs in computing TBI + * address). Print error message but continue anyway. + */ + if ((void *)tbipa > priv->map + resource_size(&res) - 4) + dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n", + ((void *)tbipa - priv->map) + 4); + iowrite32be(be32_to_cpup(prop), tbipa); } } diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 10b3bbbba..ce38d266f 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -109,15 +109,15 @@ #define TX_TIMEOUT (1*HZ) -const char gfar_driver_version[] = "1.3"; +const char gfar_driver_version[] = "2.0"; static int gfar_enet_open(struct net_device *dev); static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); static void gfar_reset_task(struct work_struct *work); static void gfar_timeout(struct net_device *dev); static int gfar_close(struct net_device *dev); -static struct sk_buff *gfar_new_skb(struct net_device *dev, - dma_addr_t *bufaddr); +static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, + int alloc_cnt); static int gfar_set_mac_address(struct net_device *dev); static int gfar_change_mtu(struct net_device *dev, int new_mtu); static irqreturn_t gfar_error(int irq, void *dev_id); @@ -141,8 +141,7 @@ static void gfar_netpoll(struct net_device *dev); #endif int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); -static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, - int amount_pull, struct napi_struct *napi); +static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb); static void gfar_halt_nodisable(struct gfar_private *priv); static void gfar_clear_exact_match(struct net_device *dev); static void gfar_set_mac_for_addr(struct net_device *dev, int num, @@ -169,17 +168,15 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, bdp->lstatus = cpu_to_be32(lstatus); } -static int gfar_init_bds(struct net_device *ndev) +static void gfar_init_bds(struct net_device *ndev) { struct gfar_private *priv = netdev_priv(ndev); struct gfar __iomem *regs = priv->gfargrp[0].regs; struct gfar_priv_tx_q *tx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL; struct txbd8 *txbdp; - struct rxbd8 *rxbdp; u32 __iomem *rfbptr; int i, j; - dma_addr_t bufaddr; for (i = 0; i < priv->num_tx_queues; i++) { tx_queue = priv->tx_queue[i]; @@ -207,40 +204,26 @@ static int gfar_init_bds(struct net_device *ndev) rfbptr = ®s->rfbptr0; for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - rx_queue->cur_rx = rx_queue->rx_bd_base; - rx_queue->skb_currx = 0; - rxbdp = rx_queue->rx_bd_base; - for (j = 0; j < rx_queue->rx_ring_size; j++) { - struct sk_buff *skb = rx_queue->rx_skbuff[j]; + rx_queue->next_to_clean = 0; + rx_queue->next_to_use = 0; + rx_queue->next_to_alloc = 0; - if (skb) { - bufaddr = be32_to_cpu(rxbdp->bufPtr); - } else { - skb = gfar_new_skb(ndev, &bufaddr); - if (!skb) { - netdev_err(ndev, "Can't allocate RX buffers\n"); - return -ENOMEM; - } - rx_queue->rx_skbuff[j] = skb; - } - - gfar_init_rxbdp(rx_queue, rxbdp, bufaddr); - rxbdp++; - } + /* make sure next_to_clean != next_to_use after this + * by leaving at least 1 unused descriptor + */ + gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue)); rx_queue->rfbptr = rfbptr; rfbptr += 2; } - - return 0; } static int gfar_alloc_skb_resources(struct net_device *ndev) { void *vaddr; dma_addr_t addr; - int i, j, k; + int i, j; struct gfar_private *priv = netdev_priv(ndev); struct device *dev = priv->dev; struct gfar_priv_tx_q *tx_queue = NULL; @@ -279,7 +262,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) rx_queue = priv->rx_queue[i]; rx_queue->rx_bd_base = vaddr; rx_queue->rx_bd_dma_base = addr; - rx_queue->dev = ndev; + rx_queue->ndev = ndev; + rx_queue->dev = dev; addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; } @@ -294,25 +278,20 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) if (!tx_queue->tx_skbuff) goto cleanup; - for (k = 0; k < tx_queue->tx_ring_size; k++) - tx_queue->tx_skbuff[k] = NULL; + for (j = 0; j < tx_queue->tx_ring_size; j++) + tx_queue->tx_skbuff[j] = NULL; } for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - rx_queue->rx_skbuff = - kmalloc_array(rx_queue->rx_ring_size, - sizeof(*rx_queue->rx_skbuff), - GFP_KERNEL); - if (!rx_queue->rx_skbuff) + rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, + sizeof(*rx_queue->rx_buff), + GFP_KERNEL); + if (!rx_queue->rx_buff) goto cleanup; - - for (j = 0; j < rx_queue->rx_ring_size; j++) - rx_queue->rx_skbuff[j] = NULL; } - if (gfar_init_bds(ndev)) - goto cleanup; + gfar_init_bds(ndev); return 0; @@ -354,28 +333,16 @@ static void gfar_init_rqprm(struct gfar_private *priv) } } -static void gfar_rx_buff_size_config(struct gfar_private *priv) +static void gfar_rx_offload_en(struct gfar_private *priv) { - int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN; - /* set this when rx hw offload (TOE) functions are being used */ priv->uses_rxfcb = 0; if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) priv->uses_rxfcb = 1; - if (priv->hwts_rx_en) + if (priv->hwts_rx_en || priv->rx_filer_enable) priv->uses_rxfcb = 1; - - if (priv->uses_rxfcb) - frame_size += GMAC_FCB_LEN; - - frame_size += priv->padding; - - frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + - INCREMENTAL_BUFFER_SIZE; - - priv->rx_buffer_size = frame_size; } static void gfar_mac_rx_config(struct gfar_private *priv) @@ -384,7 +351,7 @@ static void gfar_mac_rx_config(struct gfar_private *priv) u32 rctrl = 0; if (priv->rx_filer_enable) { - rctrl |= RCTRL_FILREN; + rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; /* Program the RIR0 reg with the required distribution */ if (priv->poll_mode == GFAR_SQ_POLLING) gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); @@ -593,9 +560,8 @@ static int gfar_alloc_rx_queues(struct gfar_private *priv) if (!priv->rx_queue[i]) return -ENOMEM; - priv->rx_queue[i]->rx_skbuff = NULL; priv->rx_queue[i]->qindex = i; - priv->rx_queue[i]->dev = priv->ndev; + priv->rx_queue[i]->ndev = priv->ndev; } return 0; } @@ -1187,12 +1153,11 @@ void gfar_mac_reset(struct gfar_private *priv) udelay(3); - /* Compute rx_buff_size based on config flags */ - gfar_rx_buff_size_config(priv); + gfar_rx_offload_en(priv); /* Initialize the max receive frame/buffer lengths */ - gfar_write(®s->maxfrm, priv->rx_buffer_size); - gfar_write(®s->mrblr, priv->rx_buffer_size); + gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE); + gfar_write(®s->mrblr, GFAR_RXB_SIZE); /* Initialize the Minimum Frame Length Register */ gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); @@ -1200,12 +1165,11 @@ void gfar_mac_reset(struct gfar_private *priv) /* Initialize MACCFG2. */ tempval = MACCFG2_INIT_SETTINGS; - /* If the mtu is larger than the max size for standard - * ethernet frames (ie, a jumbo frame), then set maccfg2 - * to allow huge frames, and to check the length + /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1 + * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1, + * and by checking RxBD[LG] and discarding larger than MAXFRM. */ - if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || - gfar_has_errata(priv, GFAR_ERRATA_74)) + if (gfar_has_errata(priv, GFAR_ERRATA_74)) tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; gfar_write(®s->maccfg2, tempval); @@ -1415,8 +1379,6 @@ static int gfar_probe(struct platform_device *ofdev) priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) dev->needed_headroom = GMAC_FCB_LEN; - priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; - /* Initializing some of the rx/tx queue level parameters */ for (i = 0; i < priv->num_tx_queues; i++) { priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; @@ -1599,10 +1561,7 @@ static int gfar_restore(struct device *dev) return 0; } - if (gfar_init_bds(ndev)) { - free_skb_resources(priv); - return -ENOMEM; - } + gfar_init_bds(ndev); gfar_mac_reset(priv); @@ -1751,8 +1710,10 @@ static void gfar_configure_serdes(struct net_device *dev) * everything for us? Resetting it takes the link down and requires * several seconds for it to come back. */ - if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) + if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { + put_device(&tbiphy->dev); return; + } /* Single clk mode, mii mode off(for serdes communication) */ phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); @@ -1764,6 +1725,8 @@ static void gfar_configure_serdes(struct net_device *dev) phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); + + put_device(&tbiphy->dev); } static int __gfar_is_rx_idle(struct gfar_private *priv) @@ -1893,26 +1856,32 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) { - struct rxbd8 *rxbdp; - struct gfar_private *priv = netdev_priv(rx_queue->dev); int i; - rxbdp = rx_queue->rx_bd_base; + struct rxbd8 *rxbdp = rx_queue->rx_bd_base; + + if (rx_queue->skb) + dev_kfree_skb(rx_queue->skb); for (i = 0; i < rx_queue->rx_ring_size; i++) { - if (rx_queue->rx_skbuff[i]) { - dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr), - priv->rx_buffer_size, - DMA_FROM_DEVICE); - dev_kfree_skb_any(rx_queue->rx_skbuff[i]); - rx_queue->rx_skbuff[i] = NULL; - } + struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; + rxbdp->lstatus = 0; rxbdp->bufPtr = 0; rxbdp++; + + if (!rxb->page) + continue; + + dma_unmap_single(rx_queue->dev, rxb->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(rxb->page); + + rxb->page = NULL; } - kfree(rx_queue->rx_skbuff); - rx_queue->rx_skbuff = NULL; + + kfree(rx_queue->rx_buff); + rx_queue->rx_buff = NULL; } /* If there are any tx skbs or rx skbs still around, free them. @@ -1937,7 +1906,7 @@ static void free_skb_resources(struct gfar_private *priv) for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - if (rx_queue->rx_skbuff) + if (rx_queue->rx_buff) free_skb_rx_queue(rx_queue); } @@ -2005,8 +1974,7 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) /* Install our interrupt handlers for Error, * Transmit, and Receive */ - err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, - IRQF_NO_SUSPEND, + err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, gfar_irq(grp, ER)->name, grp); if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", @@ -2014,6 +1982,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) goto err_irq_fail; } + enable_irq_wake(gfar_irq(grp, ER)->irq); + err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, gfar_irq(grp, TX)->name, grp); if (err < 0) { @@ -2029,14 +1999,14 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) goto rx_irq_fail; } } else { - err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, - IRQF_NO_SUSPEND, + err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, gfar_irq(grp, TX)->name, grp); if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", gfar_irq(grp, TX)->irq); goto err_irq_fail; } + enable_irq_wake(gfar_irq(grp, TX)->irq); } return 0; @@ -2500,7 +2470,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) struct gfar_private *priv = netdev_priv(dev); int frame_size = new_mtu + ETH_HLEN; - if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { + if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) { netif_err(priv, drv, dev, "Invalid MTU setting\n"); return -EINVAL; } @@ -2554,15 +2524,6 @@ static void gfar_timeout(struct net_device *dev) schedule_work(&priv->reset_task); } -static void gfar_align_skb(struct sk_buff *skb) -{ - /* We need the data buffer to be aligned properly. We will reserve - * as many bytes as needed to align the data properly - */ - skb_reserve(skb, RXBUF_ALIGNMENT - - (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); -} - /* Interrupt Handler for Transmit complete */ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) { @@ -2620,7 +2581,8 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { struct skb_shared_hwtstamps shhwtstamps; - u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); + u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) & + ~0x7UL); memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(*ns); @@ -2669,49 +2631,85 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) netdev_tx_completed_queue(txq, howmany, bytes_sent); } -static struct sk_buff *gfar_alloc_skb(struct net_device *dev) +static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) { - struct gfar_private *priv = netdev_priv(dev); - struct sk_buff *skb; + struct page *page; + dma_addr_t addr; - skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); - if (!skb) - return NULL; + page = dev_alloc_page(); + if (unlikely(!page)) + return false; - gfar_align_skb(skb); + addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rxq->dev, addr))) { + __free_page(page); - return skb; + return false; + } + + rxb->dma = addr; + rxb->page = page; + rxb->page_offset = 0; + + return true; } -static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr) +static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) { - struct gfar_private *priv = netdev_priv(dev); - struct sk_buff *skb; - dma_addr_t addr; + struct gfar_private *priv = netdev_priv(rx_queue->ndev); + struct gfar_extra_stats *estats = &priv->extra_stats; - skb = gfar_alloc_skb(dev); - if (!skb) - return NULL; + netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); + atomic64_inc(&estats->rx_alloc_err); +} - addr = dma_map_single(priv->dev, skb->data, - priv->rx_buffer_size, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(priv->dev, addr))) { - dev_kfree_skb_any(skb); - return NULL; +static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, + int alloc_cnt) +{ + struct rxbd8 *bdp; + struct gfar_rx_buff *rxb; + int i; + + i = rx_queue->next_to_use; + bdp = &rx_queue->rx_bd_base[i]; + rxb = &rx_queue->rx_buff[i]; + + while (alloc_cnt--) { + /* try reuse page */ + if (unlikely(!rxb->page)) { + if (unlikely(!gfar_new_page(rx_queue, rxb))) { + gfar_rx_alloc_err(rx_queue); + break; + } + } + + /* Setup the new RxBD */ + gfar_init_rxbdp(rx_queue, bdp, + rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); + + /* Update to the next pointer */ + bdp++; + rxb++; + + if (unlikely(++i == rx_queue->rx_ring_size)) { + i = 0; + bdp = rx_queue->rx_bd_base; + rxb = rx_queue->rx_buff; + } } - *bufaddr = addr; - return skb; + rx_queue->next_to_use = i; + rx_queue->next_to_alloc = i; } -static inline void count_errors(unsigned short status, struct net_device *dev) +static void count_errors(u32 lstatus, struct net_device *ndev) { - struct gfar_private *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; + struct gfar_private *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; struct gfar_extra_stats *estats = &priv->extra_stats; /* If the packet was truncated, none of the other errors matter */ - if (status & RXBD_TRUNCATED) { + if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) { stats->rx_length_errors++; atomic64_inc(&estats->rx_trunc); @@ -2719,25 +2717,25 @@ static inline void count_errors(unsigned short status, struct net_device *dev) return; } /* Count the errors, if there were any */ - if (status & (RXBD_LARGE | RXBD_SHORT)) { + if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) { stats->rx_length_errors++; - if (status & RXBD_LARGE) + if (lstatus & BD_LFLAG(RXBD_LARGE)) atomic64_inc(&estats->rx_large); else atomic64_inc(&estats->rx_short); } - if (status & RXBD_NONOCTET) { + if (lstatus & BD_LFLAG(RXBD_NONOCTET)) { stats->rx_frame_errors++; atomic64_inc(&estats->rx_nonoctet); } - if (status & RXBD_CRCERR) { + if (lstatus & BD_LFLAG(RXBD_CRCERR)) { atomic64_inc(&estats->rx_crcerr); stats->rx_crc_errors++; } - if (status & RXBD_OVERRUN) { + if (lstatus & BD_LFLAG(RXBD_OVERRUN)) { atomic64_inc(&estats->rx_overrun); - stats->rx_crc_errors++; + stats->rx_over_errors++; } } @@ -2788,6 +2786,93 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id) return IRQ_HANDLED; } +static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, + struct sk_buff *skb, bool first) +{ + unsigned int size = lstatus & BD_LENGTH_MASK; + struct page *page = rxb->page; + + /* Remove the FCS from the packet length */ + if (likely(lstatus & BD_LFLAG(RXBD_LAST))) + size -= ETH_FCS_LEN; + + if (likely(first)) + skb_put(skb, size); + else + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rxb->page_offset + RXBUF_ALIGNMENT, + size, GFAR_RXB_TRUESIZE); + + /* try reuse page */ + if (unlikely(page_count(page) != 1)) + return false; + + /* change offset to the other half */ + rxb->page_offset ^= GFAR_RXB_TRUESIZE; + + atomic_inc(&page->_count); + + return true; +} + +static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq, + struct gfar_rx_buff *old_rxb) +{ + struct gfar_rx_buff *new_rxb; + u16 nta = rxq->next_to_alloc; + + new_rxb = &rxq->rx_buff[nta]; + + /* find next buf that can reuse a page */ + nta++; + rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; + + /* copy page reference */ + *new_rxb = *old_rxb; + + /* sync for use by the device */ + dma_sync_single_range_for_device(rxq->dev, old_rxb->dma, + old_rxb->page_offset, + GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); +} + +static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue, + u32 lstatus, struct sk_buff *skb) +{ + struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; + struct page *page = rxb->page; + bool first = false; + + if (likely(!skb)) { + void *buff_addr = page_address(page) + rxb->page_offset; + + skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE); + if (unlikely(!skb)) { + gfar_rx_alloc_err(rx_queue); + return NULL; + } + skb_reserve(skb, RXBUF_ALIGNMENT); + first = true; + } + + dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, + GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); + + if (gfar_add_rx_frag(rxb, lstatus, skb, first)) { + /* reuse the free half of the page */ + gfar_reuse_rx_page(rx_queue, rxb); + } else { + /* page cannot be reused, unmap it */ + dma_unmap_page(rx_queue->dev, rxb->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear rxb content */ + rxb->page = NULL; + + return skb; +} + static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) { /* If valid headers were found, and valid sums @@ -2802,10 +2887,9 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) } /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ -static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, - int amount_pull, struct napi_struct *napi) +static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) { - struct gfar_private *priv = netdev_priv(dev); + struct gfar_private *priv = netdev_priv(ndev); struct rxfcb *fcb = NULL; /* fcb is at the beginning if exists */ @@ -2814,10 +2898,8 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, /* Remove the FCB from the skb * Remove the padded bytes, if there are any */ - if (amount_pull) { - skb_record_rx_queue(skb, fcb->rq); - skb_pull(skb, amount_pull); - } + if (priv->uses_rxfcb) + skb_pull(skb, GMAC_FCB_LEN); /* Get receive timestamp from the skb */ if (priv->hwts_rx_en) { @@ -2831,24 +2913,20 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, if (priv->padding) skb_pull(skb, priv->padding); - if (dev->features & NETIF_F_RXCSUM) + if (ndev->features & NETIF_F_RXCSUM) gfar_rx_checksum(skb, fcb); /* Tell the skb what kind of packet this is */ - skb->protocol = eth_type_trans(skb, dev); + skb->protocol = eth_type_trans(skb, ndev); /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. * Even if vlan rx accel is disabled, on some chips * RXFCB_VLN is pseudo randomly set. */ - if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && + if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && be16_to_cpu(fcb->flags) & RXFCB_VLN) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(fcb->vlctl)); - - /* Send the packet up the stack */ - napi_gro_receive(napi, skb); - } /* gfar_clean_rx_ring() -- Processes each frame in the rx ring @@ -2857,91 +2935,89 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, */ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) { - struct net_device *dev = rx_queue->dev; - struct rxbd8 *bdp, *base; - struct sk_buff *skb; - int pkt_len; - int amount_pull; - int howmany = 0; - struct gfar_private *priv = netdev_priv(dev); + struct net_device *ndev = rx_queue->ndev; + struct gfar_private *priv = netdev_priv(ndev); + struct rxbd8 *bdp; + int i, howmany = 0; + struct sk_buff *skb = rx_queue->skb; + int cleaned_cnt = gfar_rxbd_unused(rx_queue); + unsigned int total_bytes = 0, total_pkts = 0; /* Get the first full descriptor */ - bdp = rx_queue->cur_rx; - base = rx_queue->rx_bd_base; + i = rx_queue->next_to_clean; - amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; + while (rx_work_limit--) { + u32 lstatus; - while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) { - struct sk_buff *newskb; - dma_addr_t bufaddr; + if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) { + gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); + cleaned_cnt = 0; + } + + bdp = &rx_queue->rx_bd_base[i]; + lstatus = be32_to_cpu(bdp->lstatus); + if (lstatus & BD_LFLAG(RXBD_EMPTY)) + break; + /* order rx buffer descriptor reads */ rmb(); - /* Add another skb for the future */ - newskb = gfar_new_skb(dev, &bufaddr); + /* fetch next to clean buffer from the ring */ + skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb); + if (unlikely(!skb)) + break; - skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; + cleaned_cnt++; + howmany++; - dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), - priv->rx_buffer_size, DMA_FROM_DEVICE); - - if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) && - be16_to_cpu(bdp->length) > priv->rx_buffer_size)) - bdp->status = cpu_to_be16(RXBD_LARGE); - - /* We drop the frame if we failed to allocate a new buffer */ - if (unlikely(!newskb || - !(be16_to_cpu(bdp->status) & RXBD_LAST) || - be16_to_cpu(bdp->status) & RXBD_ERR)) { - count_errors(be16_to_cpu(bdp->status), dev); - - if (unlikely(!newskb)) { - newskb = skb; - bufaddr = be32_to_cpu(bdp->bufPtr); - } else if (skb) - dev_kfree_skb(skb); - } else { - /* Increment the number of packets */ - rx_queue->stats.rx_packets++; - howmany++; - - if (likely(skb)) { - pkt_len = be16_to_cpu(bdp->length) - - ETH_FCS_LEN; - /* Remove the FCS from the packet length */ - skb_put(skb, pkt_len); - rx_queue->stats.rx_bytes += pkt_len; - skb_record_rx_queue(skb, rx_queue->qindex); - gfar_process_frame(dev, skb, amount_pull, - &rx_queue->grp->napi_rx); + if (unlikely(++i == rx_queue->rx_ring_size)) + i = 0; - } else { - netif_warn(priv, rx_err, dev, "Missing skb!\n"); - rx_queue->stats.rx_dropped++; - atomic64_inc(&priv->extra_stats.rx_skbmissing); - } + rx_queue->next_to_clean = i; + + /* fetch next buffer if not the last in frame */ + if (!(lstatus & BD_LFLAG(RXBD_LAST))) + continue; + + if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) { + count_errors(lstatus, ndev); + /* discard faulty buffer */ + dev_kfree_skb(skb); + skb = NULL; + rx_queue->stats.rx_dropped++; + continue; } - rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; + /* Increment the number of packets */ + total_pkts++; + total_bytes += skb->len; - /* Setup the new bdp */ - gfar_init_rxbdp(rx_queue, bdp, bufaddr); + skb_record_rx_queue(skb, rx_queue->qindex); - /* Update Last Free RxBD pointer for LFC */ - if (unlikely(rx_queue->rfbptr && priv->tx_actual_en)) - gfar_write(rx_queue->rfbptr, (u32)bdp); + gfar_process_frame(ndev, skb); - /* Update to the next pointer */ - bdp = next_bd(bdp, base, rx_queue->rx_ring_size); + /* Send the packet up the stack */ + napi_gro_receive(&rx_queue->grp->napi_rx, skb); - /* update to point at the next skb */ - rx_queue->skb_currx = (rx_queue->skb_currx + 1) & - RX_RING_MOD_MASK(rx_queue->rx_ring_size); + skb = NULL; } - /* Update the current rxbd pointer to be the next one */ - rx_queue->cur_rx = bdp; + /* Store incomplete frames for completion */ + rx_queue->skb = skb; + + rx_queue->stats.rx_packets += total_pkts; + rx_queue->stats.rx_bytes += total_bytes; + + if (cleaned_cnt) + gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); + + /* Update Last Free RxBD pointer for LFC */ + if (unlikely(priv->tx_actual_en)) { + u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); + + gfar_write(rx_queue->rfbptr, bdp_dma); + } return howmany; } @@ -3386,11 +3462,9 @@ static irqreturn_t gfar_error(int irq, void *grp_id) netif_dbg(priv, tx_err, dev, "Transmit Error\n"); } if (events & IEVENT_BSY) { - dev->stats.rx_errors++; + dev->stats.rx_over_errors++; atomic64_inc(&priv->extra_stats.rx_bsy); - gfar_receive(irq, grp_id); - netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", gfar_read(®s->rstat)); } @@ -3459,7 +3533,6 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) struct phy_device *phydev = priv->phydev; struct gfar_priv_rx_q *rx_queue = NULL; int i; - struct rxbd8 *bdp; if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) return; @@ -3516,15 +3589,11 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) /* Turn last free buffer recording on */ if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { for (i = 0; i < priv->num_rx_queues; i++) { + u32 bdp_dma; + rx_queue = priv->rx_queue[i]; - bdp = rx_queue->cur_rx; - /* skip to previous bd */ - bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1, - rx_queue->rx_bd_base, - rx_queue->rx_ring_size); - - if (rx_queue->rfbptr) - gfar_write(rx_queue->rfbptr, (u32)bdp); + bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); + gfar_write(rx_queue->rfbptr, bdp_dma); } priv->tx_actual_en = 1; diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 5545e4103..8c1994856 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -71,11 +71,6 @@ struct ethtool_rx_list { /* Number of bytes to align the rx bufs to */ #define RXBUF_ALIGNMENT 64 -/* The number of bytes which composes a unit for the purpose of - * allocating data buffers. ie-for any given MTU, the data buffer - * will be the next highest multiple of 512 bytes. */ -#define INCREMENTAL_BUFFER_SIZE 512 - #define PHY_INIT_TIMEOUT 100000 #define DRV_NAME "gfar-enet" @@ -92,6 +87,8 @@ extern const char gfar_driver_version[]; #define DEFAULT_TX_RING_SIZE 256 #define DEFAULT_RX_RING_SIZE 256 +#define GFAR_RX_BUFF_ALLOC 16 + #define GFAR_RX_MAX_RING_SIZE 256 #define GFAR_TX_MAX_RING_SIZE 256 @@ -103,11 +100,14 @@ extern const char gfar_driver_version[]; #define DEFAULT_RX_LFC_THR 16 #define DEFAULT_LFC_PTVVAL 4 -#define DEFAULT_RX_BUFFER_SIZE 1536 +#define GFAR_RXB_SIZE 1536 +#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \ + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define GFAR_RXB_TRUESIZE 2048 + #define TX_RING_MOD_MASK(size) (size-1) #define RX_RING_MOD_MASK(size) (size-1) -#define JUMBO_BUFFER_SIZE 9728 -#define JUMBO_FRAME_SIZE 9600 +#define GFAR_JUMBO_FRAME_SIZE 9600 #define DEFAULT_FIFO_TX_THR 0x100 #define DEFAULT_FIFO_TX_STARVE 0x40 @@ -640,6 +640,7 @@ struct rmon_mib }; struct gfar_extra_stats { + atomic64_t rx_alloc_err; atomic64_t rx_large; atomic64_t rx_short; atomic64_t rx_nonoctet; @@ -651,7 +652,6 @@ struct gfar_extra_stats { atomic64_t eberr; atomic64_t tx_babt; atomic64_t tx_underrun; - atomic64_t rx_skbmissing; atomic64_t tx_timeout; }; @@ -1012,34 +1012,42 @@ struct rx_q_stats { unsigned long rx_dropped; }; +struct gfar_rx_buff { + dma_addr_t dma; + struct page *page; + unsigned int page_offset; +}; + /** * struct gfar_priv_rx_q - per rx queue structure - * @rx_skbuff: skb pointers - * @skb_currx: currently use skb pointer + * @rx_buff: Array of buffer info metadata structs * @rx_bd_base: First rx buffer descriptor - * @cur_rx: Next free rx ring entry + * @next_to_use: index of the next buffer to be alloc'd + * @next_to_clean: index of the next buffer to be cleaned * @qindex: index of this queue - * @dev: back pointer to the dev structure + * @ndev: back pointer to net_device * @rx_ring_size: Rx ring size * @rxcoalescing: enable/disable rx-coalescing * @rxic: receive interrupt coalescing vlaue */ struct gfar_priv_rx_q { - struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES); - dma_addr_t rx_bd_dma_base; + struct gfar_rx_buff *rx_buff __aligned(SMP_CACHE_BYTES); struct rxbd8 *rx_bd_base; - struct rxbd8 *cur_rx; - struct net_device *dev; - struct gfar_priv_grp *grp; + struct net_device *ndev; + struct device *dev; + u16 rx_ring_size; + u16 qindex; + struct gfar_priv_grp *grp; + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + struct sk_buff *skb; struct rx_q_stats stats; - u16 skb_currx; - u16 qindex; - unsigned int rx_ring_size; - /* RX Coalescing values */ + u32 __iomem *rfbptr; unsigned char rxcoalescing; unsigned long rxic; - u32 __iomem *rfbptr; + dma_addr_t rx_bd_dma_base; }; enum gfar_irqinfo_id { @@ -1109,7 +1117,6 @@ struct gfar_private { struct device *dev; struct net_device *ndev; enum gfar_errata errata; - unsigned int rx_buffer_size; u16 uses_rxfcb; u16 padding; @@ -1292,6 +1299,28 @@ static inline void gfar_clear_txbd_status(struct txbd8 *bdp) bdp->lstatus = cpu_to_be32(lstatus); } +static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq) +{ + if (rxq->next_to_clean > rxq->next_to_use) + return rxq->next_to_clean - rxq->next_to_use - 1; + + return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1; +} + +static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq) +{ + struct rxbd8 *bdp; + u32 bdp_dma; + int i; + + i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1; + bdp = &rxq->rx_bd_base[i]; + bdp_dma = lower_32_bits(rxq->rx_bd_dma_base); + bdp_dma += (uintptr_t)bdp - (uintptr_t)rxq->rx_bd_base; + + return bdp_dma; +} + irqreturn_t gfar_receive(int irq, void *dev_id); int startup_gfar(struct net_device *dev); void stop_gfar(struct net_device *dev); diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 5b90fcf96..a33e4a829 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -61,6 +61,8 @@ static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo); static const char stat_gstrings[][ETH_GSTRING_LEN] = { + /* extra stats */ + "rx-allocation-errors", "rx-large-frame-errors", "rx-short-frame-errors", "rx-non-octet-errors", @@ -72,8 +74,8 @@ static const char stat_gstrings[][ETH_GSTRING_LEN] = { "ethernet-bus-error", "tx-babbling-errors", "tx-underrun-errors", - "rx-skb-missing-errors", "tx-timeout-errors", + /* rmon stats */ "tx-rx-64-frames", "tx-rx-65-127-frames", "tx-rx-128-255-frames", @@ -674,14 +676,14 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) u32 fcr = 0x0, fpr = FPR_FILER_MASK; if (ethflow & RXH_L2DA) { - fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH | + fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH | RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); priv->cur_filer_idx = priv->cur_filer_idx - 1; - fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH | + fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH | RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 8e3cd77aa..664d0c261 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -557,6 +557,7 @@ static const struct of_device_id match_table[] = { { .compatible = "fsl,etsec-ptp" }, {}, }; +MODULE_DEVICE_TABLE(of, match_table); static struct platform_driver gianfar_ptp_driver = { .driver = { diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 4dd40e057..650f7888e 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -1384,6 +1384,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) value = phy_read(tbiphy, ENET_TBI_MII_CR); value &= ~0x1000; /* Turn off autonegotiation */ phy_write(tbiphy, ENET_TBI_MII_CR, value); + + put_device(&tbiphy->dev); } init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); @@ -1702,8 +1704,10 @@ static void uec_configure_serdes(struct net_device *dev) * everything for us? Resetting it takes the link down and requires * several seconds for it to come back. */ - if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) + if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) { + put_device(&tbiphy->dev); return; + } /* Single clk mode, mii mode off(for serdes communication) */ phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); @@ -1711,6 +1715,8 @@ static void uec_configure_serdes(struct net_device *dev) phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); + + put_device(&tbiphy->dev); } /* Configure the PHY for dev. diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index d49bee38c..253f8ed05 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -816,7 +816,7 @@ static int hip04_mac_probe(struct platform_device *pdev) struct net_device *ndev; struct hip04_priv *priv; struct resource *res; - unsigned int irq; + int irq; int ret; ndev = alloc_etherdev(sizeof(struct hip04_priv)); @@ -965,7 +965,6 @@ static struct platform_driver hip04_mac_driver = { .remove = hip04_remove, .driver = { .name = DRV_NAME, - .owner = THIS_MODULE, .of_match_table = hip04_mac_match, }, }; diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c index b3bac25db..fca0a5be1 100644 --- a/drivers/net/ethernet/hisilicon/hip04_mdio.c +++ b/drivers/net/ethernet/hisilicon/hip04_mdio.c @@ -174,7 +174,6 @@ static struct platform_driver hip04_mdio_driver = { .remove = hip04_mdio_remove, .driver = { .name = "hip04-mdio", - .owner = THIS_MODULE, .of_match_table = hip04_mdio_match, }, }; diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 29bbb628d..7af870a3c 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -79,6 +79,11 @@ static unsigned int rx_flush __read_mostly = 0; module_param(rx_flush, uint, 0644); MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use"); +static bool old_large_send __read_mostly; +module_param(old_large_send, bool, S_IRUGO); +MODULE_PARM_DESC(old_large_send, + "Use old large send method on firmware that supports the new method"); + struct ibmveth_stat { char name[ETH_GSTRING_LEN]; int offset; @@ -101,7 +106,8 @@ struct ibmveth_stat ibmveth_stats[] = { { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) }, { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) }, { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) }, - { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) } + { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }, + { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) } }; /* simple methods of getting data from the current rxq entry */ @@ -848,25 +854,91 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) return rc1 ? rc1 : rc2; } +static int ibmveth_set_tso(struct net_device *dev, u32 data) +{ + struct ibmveth_adapter *adapter = netdev_priv(dev); + unsigned long set_attr, clr_attr, ret_attr; + long ret1, ret2; + int rc1 = 0, rc2 = 0; + int restart = 0; + + if (netif_running(dev)) { + restart = 1; + adapter->pool_config = 1; + ibmveth_close(dev); + adapter->pool_config = 0; + } + + set_attr = 0; + clr_attr = 0; + + if (data) + set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED; + else + clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED; + + ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); + + if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) && + !old_large_send) { + ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, + set_attr, &ret_attr); + + if (ret2 != H_SUCCESS) { + netdev_err(dev, "unable to change tso settings. %d rc=%ld\n", + data, ret2); + + h_illan_attributes(adapter->vdev->unit_address, + set_attr, clr_attr, &ret_attr); + + if (data == 1) + dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + rc1 = -EIO; + + } else { + adapter->fw_large_send_support = data; + adapter->large_send = data; + } + } else { + /* Older firmware version of large send offload does not + * support tcp6/ipv6 + */ + if (data == 1) { + dev->features &= ~NETIF_F_TSO6; + netdev_info(dev, "TSO feature requires all partitions to have updated driver"); + } + adapter->large_send = data; + } + + if (restart) + rc2 = ibmveth_open(dev); + + return rc1 ? rc1 : rc2; +} + static int ibmveth_set_features(struct net_device *dev, netdev_features_t features) { struct ibmveth_adapter *adapter = netdev_priv(dev); int rx_csum = !!(features & NETIF_F_RXCSUM); - int rc; - netdev_features_t changed = features ^ dev->features; - - if (features & NETIF_F_TSO & changed) - netdev_info(dev, "TSO feature requires all partitions to have updated driver"); + int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6)); + int rc1 = 0, rc2 = 0; - if (rx_csum == adapter->rx_csum) - return 0; + if (rx_csum != adapter->rx_csum) { + rc1 = ibmveth_set_csum_offload(dev, rx_csum); + if (rc1 && !adapter->rx_csum) + dev->features = + features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); + } - rc = ibmveth_set_csum_offload(dev, rx_csum); - if (rc && !adapter->rx_csum) - dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); + if (large_send != adapter->large_send) { + rc2 = ibmveth_set_tso(dev, large_send); + if (rc2 && !adapter->large_send) + dev->features = + features & ~(NETIF_F_TSO | NETIF_F_TSO6); + } - return rc; + return rc1 ? rc1 : rc2; } static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -917,7 +989,7 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1)) static int ibmveth_send(struct ibmveth_adapter *adapter, - union ibmveth_buf_desc *descs) + union ibmveth_buf_desc *descs, unsigned long mss) { unsigned long correlator; unsigned int retry_count; @@ -934,7 +1006,8 @@ static int ibmveth_send(struct ibmveth_adapter *adapter, descs[0].desc, descs[1].desc, descs[2].desc, descs[3].desc, descs[4].desc, descs[5].desc, - correlator, &correlator); + correlator, &correlator, mss, + adapter->fw_large_send_support); } while ((ret == H_BUSY) && (retry_count--)); if (ret != H_SUCCESS && ret != H_DROPPED) { @@ -955,6 +1028,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, int last, i; int force_bounce = 0; dma_addr_t dma_addr; + unsigned long mss = 0; /* * veth handles a maximum of 6 segments including the header, so @@ -980,6 +1054,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, desc_flags = IBMVETH_BUF_VALID; + if (skb_is_gso(skb) && adapter->fw_large_send_support) + desc_flags |= IBMVETH_BUF_LRG_SND; + if (skb->ip_summed == CHECKSUM_PARTIAL) { unsigned char *buf = skb_transport_header(skb) + skb->csum_offset; @@ -1007,7 +1084,7 @@ retry_bounce: descs[0].fields.flags_len = desc_flags | skb->len; descs[0].fields.address = adapter->bounce_buffer_dma; - if (ibmveth_send(adapter, descs)) { + if (ibmveth_send(adapter, descs, 0)) { adapter->tx_send_failed++; netdev->stats.tx_dropped++; } else { @@ -1041,16 +1118,23 @@ retry_bounce: descs[i+1].fields.address = dma_addr; } - if (skb_is_gso(skb) && !skb_is_gso_v6(skb)) { - /* Put -1 in the IP checksum to tell phyp it - * is a largesend packet and put the mss in the TCP checksum. - */ - ip_hdr(skb)->check = 0xffff; - tcp_hdr(skb)->check = cpu_to_be16(skb_shinfo(skb)->gso_size); - adapter->tx_large_packets++; + if (skb_is_gso(skb)) { + if (adapter->fw_large_send_support) { + mss = (unsigned long)skb_shinfo(skb)->gso_size; + adapter->tx_large_packets++; + } else if (!skb_is_gso_v6(skb)) { + /* Put -1 in the IP checksum to tell phyp it + * is a largesend packet. Put the mss in + * the TCP checksum. + */ + ip_hdr(skb)->check = 0xffff; + tcp_hdr(skb)->check = + cpu_to_be16(skb_shinfo(skb)->gso_size); + adapter->tx_large_packets++; + } } - if (ibmveth_send(adapter, descs)) { + if (ibmveth_send(adapter, descs, mss)) { adapter->tx_send_failed++; netdev->stats.tx_dropped++; } else { @@ -1401,6 +1485,8 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) struct ibmveth_adapter *adapter; unsigned char *mac_addr_p; unsigned int *mcastFilterSize_p; + long ret; + unsigned long ret_attr; dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n", dev->unit_address); @@ -1449,10 +1535,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) SET_NETDEV_DEV(netdev, &dev->dev); netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->features |= netdev->hw_features; - /* TSO is disabled by default */ - netdev->hw_features |= NETIF_F_TSO; + ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); + + /* If running older firmware, TSO should not be enabled by default */ + if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) && + !old_large_send) { + netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + netdev->features |= netdev->hw_features; + } else { + netdev->hw_features |= NETIF_F_TSO; + } memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index 41dedb1fb..4eade67fe 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h @@ -40,6 +40,8 @@ #define IbmVethMcastRemoveFilter 0x2UL #define IbmVethMcastClearFilterTable 0x3UL +#define IBMVETH_ILLAN_LRG_SR_ENABLED 0x0000000000010000UL +#define IBMVETH_ILLAN_LRG_SND_SUPPORT 0x0000000000008000UL #define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL #define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00UL #define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004UL @@ -59,13 +61,20 @@ static inline long h_send_logical_lan(unsigned long unit_address, unsigned long desc1, unsigned long desc2, unsigned long desc3, unsigned long desc4, unsigned long desc5, unsigned long desc6, - unsigned long corellator_in, unsigned long *corellator_out) + unsigned long corellator_in, unsigned long *corellator_out, + unsigned long mss, unsigned long large_send_support) { long rc; unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; - rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1, - desc2, desc3, desc4, desc5, desc6, corellator_in); + if (large_send_support) + rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, + desc1, desc2, desc3, desc4, desc5, desc6, + corellator_in, mss); + else + rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, + desc1, desc2, desc3, desc4, desc5, desc6, + corellator_in); *corellator_out = retbuf[0]; @@ -147,11 +156,13 @@ struct ibmveth_adapter { struct ibmveth_rx_q rx_queue; int pool_config; int rx_csum; + int large_send; void *bounce_buffer; dma_addr_t bounce_buffer_dma; u64 fw_ipv6_csum_support; u64 fw_ipv4_csum_support; + u64 fw_large_send_support; /* adapter specific stats */ u64 replenish_task_cycles; u64 replenish_no_mem; @@ -182,6 +193,7 @@ struct ibmveth_buf_desc_fields { #endif #define IBMVETH_BUF_VALID 0x80000000 #define IBMVETH_BUF_TOGGLE 0x40000000 +#define IBMVETH_BUF_LRG_SND 0x04000000 #define IBMVETH_BUF_NO_CSUM 0x02000000 #define IBMVETH_BUF_CSUM_GOOD 0x01000000 #define IBMVETH_BUF_LEN_MASK 0x00FFFFFF diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index dd54e8f63..8c8363dac 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1769,8 +1769,11 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb, dma_addr = pci_map_single(nic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); /* If we can't map the skb, have the upper layer try later */ - if (pci_dma_mapping_error(nic->pdev, dma_addr)) + if (pci_dma_mapping_error(nic->pdev, dma_addr)) { + dev_kfree_skb_any(skb); + skb = NULL; return -ENOMEM; + } /* * Use the last 4 bytes of the SKB payload packet as the CRC, used for @@ -2966,6 +2969,11 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) nic->params.cbs.max * sizeof(struct cb), sizeof(u32), 0); + if (!nic->cbs_pool) { + netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); + err = -ENOMEM; + goto err_out_pool; + } netif_info(nic, probe, nic->netdev, "addr 0x%llx, irq %d, MAC addr %pM\n", (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), @@ -2973,6 +2981,8 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; +err_out_pool: + unregister_netdev(netdev); err_out_free: e100_free(nic); err_out_iounmap: diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 26459853c..34c551e32 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -106,14 +106,14 @@ #define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000 /* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */ -#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000 +#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000 #define K1_ENTRY_LATENCY 0 #define K1_MIN_TIME 1 #define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */ #define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */ #define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ - +#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index adfe1de78..faf4b3f3d 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -48,7 +48,7 @@ #define DRV_EXTRAVERSION "-k" -#define DRV_VERSION "3.2.5" DRV_EXTRAVERSION +#define DRV_VERSION "3.2.6" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -1737,12 +1737,6 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; adapter->flags2 &= ~FLAG2_IS_DISCARDING; - - writel(0, rx_ring->head); - if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) - e1000e_update_rdt_wa(rx_ring, 0); - else - writel(0, rx_ring->tail); } static void e1000e_downshift_workaround(struct work_struct *work) @@ -2447,12 +2441,6 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - - writel(0, tx_ring->head); - if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) - e1000e_update_tdt_wa(tx_ring, 0); - else - writel(0, tx_ring->tail); } /** @@ -2954,6 +2942,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); + writel(0, tx_ring->head); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_tdt_wa(tx_ring, 0); + else + writel(0, tx_ring->tail); + /* Set the Tx Interrupt Delay register */ ew32(TIDV, adapter->tx_int_delay); /* Tx irq moderation */ @@ -3275,6 +3269,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); + writel(0, rx_ring->head); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(rx_ring, 0); + else + writel(0, rx_ring->tail); + /* Enable Receive Checksum Offload for TCP and UDP */ rxcsum = er32(RXCSUM); if (adapter->netdev->features & NETIF_F_RXCSUM) @@ -4599,6 +4599,7 @@ static int e1000_open(struct net_device *netdev) return 0; err_req_irq: + pm_qos_remove_request(&adapter->pm_qos_req); e1000e_release_hw_control(adapter); e1000_power_down_phy(adapter); e1000e_free_rx_resources(adapter->rx_ring); @@ -6327,6 +6328,33 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) return retval; } + /* Ensure that the appropriate bits are set in LPI_CTRL + * for EEE in Sx + */ + if ((hw->phy.type >= e1000_phy_i217) && + adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) { + u16 lpi_ctrl = 0; + + retval = hw->phy.ops.acquire(hw); + if (!retval) { + retval = e1e_rphy_locked(hw, I82579_LPI_CTRL, + &lpi_ctrl); + if (!retval) { + if (adapter->eee_advert & + hw->dev_spec.ich8lan.eee_lp_ability & + I82579_EEE_100_SUPPORTED) + lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; + if (adapter->eee_advert & + hw->dev_spec.ich8lan.eee_lp_ability & + I82579_EEE_1000_SUPPORTED) + lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; + + retval = e1e_wphy_locked(hw, I82579_LPI_CTRL, + lpi_ctrl); + } + } + hw->phy.ops.release(hw); + } /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. @@ -6476,7 +6504,7 @@ static int __e1000_resume(struct pci_dev *pdev) if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) aspm_disable_flag |= PCIE_LINK_STATE_L1; if (aspm_disable_flag) - e1000e_disable_aspm_locked(pdev, aspm_disable_flag); + e1000e_disable_aspm(pdev, aspm_disable_flag); pci_set_master(pdev); @@ -6754,7 +6782,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) aspm_disable_flag |= PCIE_LINK_STATE_L1; if (aspm_disable_flag) - e1000e_disable_aspm(pdev, aspm_disable_flag); + e1000e_disable_aspm_locked(pdev, aspm_disable_flag); err = pci_enable_device_mem(pdev); if (err) { diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index b24e5fee1..1d5e0b770 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -38,8 +38,8 @@ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ -#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */ -#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */ +#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */ +#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */ #define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ @@ -125,7 +125,6 @@ (0x054E4 + ((_i - 16) * 8))) #define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) #define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) -#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index ec76c3fa3..e7462793d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -79,10 +79,13 @@ #define I40E_MIN_MSIX 2 #define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */ #define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */ -#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */ +/* max 16 qps */ +#define i40e_default_queues_per_vmdq(pf) \ + (((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1) #define I40E_DEFAULT_QUEUES_PER_VF 4 #define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ -#define I40E_MAX_QUEUES_PER_TC 64 /* should be a power of 2 */ +#define i40e_pf_get_max_q_per_tc(pf) \ + (((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64) #define I40E_FDIR_RING 0 #define I40E_FDIR_RING_COUNT 32 #ifdef I40E_FCOE @@ -98,7 +101,7 @@ #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9) /* Ethtool Private Flags */ -#define I40E_PRIV_FLAGS_NPAR_FLAG (1 << 0) +#define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0) #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) @@ -289,35 +292,42 @@ struct i40e_pf { struct work_struct service_task; u64 flags; -#define I40E_FLAG_RX_CSUM_ENABLED (u64)(1 << 1) -#define I40E_FLAG_MSI_ENABLED (u64)(1 << 2) -#define I40E_FLAG_MSIX_ENABLED (u64)(1 << 3) -#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4) -#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5) -#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6) -#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7) -#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8) -#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9) +#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) +#define I40E_FLAG_MSI_ENABLED BIT_ULL(2) +#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) +#define I40E_FLAG_RX_1BUF_ENABLED BIT_ULL(4) +#define I40E_FLAG_RX_PS_ENABLED BIT_ULL(5) +#define I40E_FLAG_RSS_ENABLED BIT_ULL(6) +#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) +#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8) +#define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9) +#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) #ifdef I40E_FCOE -#define I40E_FLAG_FCOE_ENABLED (u64)(1 << 11) +#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11) #endif /* I40E_FCOE */ -#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12) -#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13) -#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14) -#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15) -#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17) -#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18) -#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19) -#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20) -#define I40E_FLAG_FD_SB_ENABLED (u64)(1 << 21) -#define I40E_FLAG_FD_ATR_ENABLED (u64)(1 << 22) -#define I40E_FLAG_PTP (u64)(1 << 25) -#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26) +#define I40E_FLAG_IN_NETPOLL BIT_ULL(12) +#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13) +#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14) +#define I40E_FLAG_FILTER_SYNC BIT_ULL(15) +#define I40E_FLAG_PROCESS_MDD_EVENT BIT_ULL(17) +#define I40E_FLAG_PROCESS_VFLR_EVENT BIT_ULL(18) +#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19) +#define I40E_FLAG_DCB_ENABLED BIT_ULL(20) +#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21) +#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22) +#define I40E_FLAG_PTP BIT_ULL(25) +#define I40E_FLAG_MFP_ENABLED BIT_ULL(26) #ifdef CONFIG_I40E_VXLAN -#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27) +#define I40E_FLAG_VXLAN_FILTER_SYNC BIT_ULL(27) #endif -#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28) -#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29) +#define I40E_FLAG_PORT_ID_VALID BIT_ULL(28) +#define I40E_FLAG_DCB_CAPABLE BIT_ULL(29) +#define I40E_FLAG_RSS_AQ_CAPABLE BIT_ULL(31) +#define I40E_FLAG_HW_ATR_EVICT_CAPABLE BIT_ULL(32) +#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE BIT_ULL(33) +#define I40E_FLAG_128_QP_RSS_CAPABLE BIT_ULL(34) +#define I40E_FLAG_WB_ON_ITR_CAPABLE BIT_ULL(35) +#define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(38) #define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) /* tracks features that get auto disabled by errors */ @@ -362,6 +372,7 @@ struct i40e_pf { #ifdef CONFIG_DEBUG_FS struct dentry *i40e_dbg_pf; #endif /* CONFIG_DEBUG_FS */ + bool cur_promisc; u16 instance; /* A unique number per i40e_pf instance in the system */ @@ -432,6 +443,8 @@ struct i40e_veb { bool stat_offsets_loaded; struct i40e_eth_stats stats; struct i40e_eth_stats stats_offsets; + struct i40e_veb_tc_stats tc_stats; + struct i40e_veb_tc_stats tc_stats_offsets; }; /* struct that defines a VSI, associated with a dev */ @@ -443,8 +456,8 @@ struct i40e_vsi { u32 current_netdev_flags; unsigned long state; -#define I40E_VSI_FLAG_FILTER_CHANGED (1<<0) -#define I40E_VSI_FLAG_VEB_OWNER (1<<1) +#define I40E_VSI_FLAG_FILTER_CHANGED BIT(0) +#define I40E_VSI_FLAG_VEB_OWNER BIT(1) unsigned long flags; struct list_head mac_filter_list; @@ -550,6 +563,7 @@ struct i40e_q_vector { cpumask_t affinity_mask; struct rcu_head rcu; /* to avoid race with update stats on free */ char name[I40E_INT_NAME_STR_LEN]; + bool arm_wb_state; } ____cacheline_internodealigned_in_smp; /* lan device */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 3e0d20037..c0e943aec 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -386,7 +386,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; - hw->aq.asq.count = hw->aq.num_asq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_asq_ring(hw); @@ -404,6 +403,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -445,7 +445,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; - hw->aq.arq.count = hw->aq.num_arq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_arq_ring(hw); @@ -463,6 +462,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -946,6 +946,13 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); + if (hw->aq.arq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQRX: Admin queue not initialized.\n"); + ret_code = I40E_ERR_QUEUE_EMPTY; + goto clean_arq_element_err; + } + /* set next_to_use to head */ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); if (ntu == ntc) { @@ -1007,6 +1014,8 @@ clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); + +clean_arq_element_err: mutex_unlock(&hw->aq.arq_mutex); if (i40e_is_nvm_update_op(&e->desc)) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 929e3d72a..95d23bfbc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -34,7 +34,7 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0002 +#define I40E_FW_API_VERSION_MINOR 0x0004 struct i40e_aq_desc { __le16 flags; @@ -132,12 +132,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_list_func_capabilities = 0x000A, i40e_aqc_opc_list_dev_capabilities = 0x000B, - i40e_aqc_opc_set_cppm_configuration = 0x0103, - i40e_aqc_opc_set_arp_proxy_entry = 0x0104, - i40e_aqc_opc_set_ns_proxy_entry = 0x0105, - /* LAA */ - i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ i40e_aqc_opc_mac_address_read = 0x0107, i40e_aqc_opc_mac_address_write = 0x0108, @@ -262,7 +257,10 @@ enum i40e_admin_queue_opc { /* Tunnel commands */ i40e_aqc_opc_add_udp_tunnel = 0x0B00, i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_tunnel_key_structure = 0x0B10, + i40e_aqc_opc_set_rss_key = 0x0B02, + i40e_aqc_opc_set_rss_lut = 0x0B03, + i40e_aqc_opc_get_rss_key = 0x0B04, + i40e_aqc_opc_get_rss_lut = 0x0B05, /* Async Events */ i40e_aqc_opc_event_lan_overflow = 0x1001, @@ -274,8 +272,6 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, /* debug commands */ - i40e_aqc_opc_debug_get_deviceid = 0xFF00, - i40e_aqc_opc_debug_set_mode = 0xFF01, i40e_aqc_opc_debug_read_reg = 0xFF03, i40e_aqc_opc_debug_write_reg = 0xFF04, i40e_aqc_opc_debug_modify_reg = 0xFF07, @@ -509,7 +505,8 @@ struct i40e_aqc_mac_address_read { #define I40E_AQC_SAN_ADDR_VALID 0x20 #define I40E_AQC_PORT_ADDR_VALID 0x40 #define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_ADDR_VALID_MASK 0xf0 +#define I40E_AQC_MC_MAG_EN_VALID 0x100 +#define I40E_AQC_ADDR_VALID_MASK 0x1F0 u8 reserved[6]; __le32 addr_high; __le32 addr_low; @@ -532,7 +529,9 @@ struct i40e_aqc_mac_address_write { #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 #define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_MASK 0xc000 +#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 +#define I40E_AQC_WRITE_TYPE_MASK 0xC000 + __le16 mac_sah; __le32 mac_sal; u8 reserved[8]; @@ -826,8 +825,12 @@ struct i40e_aqc_vsi_properties_data { I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) /* queueing option section */ u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 +#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 u8 queueing_opt_reserved[3]; /* scheduler section */ u8 up_enable_bits; @@ -1068,6 +1071,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 u8 reserved[8]; }; @@ -2064,6 +2068,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); #define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT) #define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8 #define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) +#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8 +#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT) +#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xA +#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT) +#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10 +#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT) struct i40e_aqc_get_cee_dcb_cfg_v1_resp { u8 reserved1; u8 oper_num_tc; @@ -2177,6 +2187,46 @@ struct i40e_aqc_del_udp_tunnel_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); +struct i40e_aqc_get_set_rss_key { +#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) + __le16 vsi_id; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); + +struct i40e_aqc_get_set_rss_key_data { + u8 standard_rss_key[0x28]; + u8 extended_hash_key[0xc]; +}; + +I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); + +struct i40e_aqc_get_set_rss_lut { +#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) + __le16 vsi_id; +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \ + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) + +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 + __le16 flags; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); + /* tunnel key structure 0x0B10 */ struct i40e_aqc_tunnel_key_structure { diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 0bae22da0..114dc6450 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -54,6 +54,15 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_20G_KR2: hw->mac.type = I40E_MAC_XL710; break; + case I40E_DEV_ID_SFP_X722: + case I40E_DEV_ID_1G_BASE_T_X722: + case I40E_DEV_ID_10G_BASE_T_X722: + hw->mac.type = I40E_MAC_X722; + break; + case I40E_DEV_ID_X722_VF: + case I40E_DEV_ID_X722_VF_HV: + hw->mac.type = I40E_MAC_X722_VF; + break; case I40E_DEV_ID_VF: case I40E_DEV_ID_VF_HV: hw->mac.type = I40E_MAC_VF; @@ -71,6 +80,212 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) return status; } +/** + * i40e_aq_str - convert AQ err code to a string + * @hw: pointer to the HW structure + * @aq_err: the AQ error code to convert + **/ +char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) +{ + switch (aq_err) { + case I40E_AQ_RC_OK: + return "OK"; + case I40E_AQ_RC_EPERM: + return "I40E_AQ_RC_EPERM"; + case I40E_AQ_RC_ENOENT: + return "I40E_AQ_RC_ENOENT"; + case I40E_AQ_RC_ESRCH: + return "I40E_AQ_RC_ESRCH"; + case I40E_AQ_RC_EINTR: + return "I40E_AQ_RC_EINTR"; + case I40E_AQ_RC_EIO: + return "I40E_AQ_RC_EIO"; + case I40E_AQ_RC_ENXIO: + return "I40E_AQ_RC_ENXIO"; + case I40E_AQ_RC_E2BIG: + return "I40E_AQ_RC_E2BIG"; + case I40E_AQ_RC_EAGAIN: + return "I40E_AQ_RC_EAGAIN"; + case I40E_AQ_RC_ENOMEM: + return "I40E_AQ_RC_ENOMEM"; + case I40E_AQ_RC_EACCES: + return "I40E_AQ_RC_EACCES"; + case I40E_AQ_RC_EFAULT: + return "I40E_AQ_RC_EFAULT"; + case I40E_AQ_RC_EBUSY: + return "I40E_AQ_RC_EBUSY"; + case I40E_AQ_RC_EEXIST: + return "I40E_AQ_RC_EEXIST"; + case I40E_AQ_RC_EINVAL: + return "I40E_AQ_RC_EINVAL"; + case I40E_AQ_RC_ENOTTY: + return "I40E_AQ_RC_ENOTTY"; + case I40E_AQ_RC_ENOSPC: + return "I40E_AQ_RC_ENOSPC"; + case I40E_AQ_RC_ENOSYS: + return "I40E_AQ_RC_ENOSYS"; + case I40E_AQ_RC_ERANGE: + return "I40E_AQ_RC_ERANGE"; + case I40E_AQ_RC_EFLUSHED: + return "I40E_AQ_RC_EFLUSHED"; + case I40E_AQ_RC_BAD_ADDR: + return "I40E_AQ_RC_BAD_ADDR"; + case I40E_AQ_RC_EMODE: + return "I40E_AQ_RC_EMODE"; + case I40E_AQ_RC_EFBIG: + return "I40E_AQ_RC_EFBIG"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); + return hw->err_str; +} + +/** + * i40e_stat_str - convert status err code to a string + * @hw: pointer to the HW structure + * @stat_err: the status error code to convert + **/ +char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) +{ + switch (stat_err) { + case 0: + return "OK"; + case I40E_ERR_NVM: + return "I40E_ERR_NVM"; + case I40E_ERR_NVM_CHECKSUM: + return "I40E_ERR_NVM_CHECKSUM"; + case I40E_ERR_PHY: + return "I40E_ERR_PHY"; + case I40E_ERR_CONFIG: + return "I40E_ERR_CONFIG"; + case I40E_ERR_PARAM: + return "I40E_ERR_PARAM"; + case I40E_ERR_MAC_TYPE: + return "I40E_ERR_MAC_TYPE"; + case I40E_ERR_UNKNOWN_PHY: + return "I40E_ERR_UNKNOWN_PHY"; + case I40E_ERR_LINK_SETUP: + return "I40E_ERR_LINK_SETUP"; + case I40E_ERR_ADAPTER_STOPPED: + return "I40E_ERR_ADAPTER_STOPPED"; + case I40E_ERR_INVALID_MAC_ADDR: + return "I40E_ERR_INVALID_MAC_ADDR"; + case I40E_ERR_DEVICE_NOT_SUPPORTED: + return "I40E_ERR_DEVICE_NOT_SUPPORTED"; + case I40E_ERR_MASTER_REQUESTS_PENDING: + return "I40E_ERR_MASTER_REQUESTS_PENDING"; + case I40E_ERR_INVALID_LINK_SETTINGS: + return "I40E_ERR_INVALID_LINK_SETTINGS"; + case I40E_ERR_AUTONEG_NOT_COMPLETE: + return "I40E_ERR_AUTONEG_NOT_COMPLETE"; + case I40E_ERR_RESET_FAILED: + return "I40E_ERR_RESET_FAILED"; + case I40E_ERR_SWFW_SYNC: + return "I40E_ERR_SWFW_SYNC"; + case I40E_ERR_NO_AVAILABLE_VSI: + return "I40E_ERR_NO_AVAILABLE_VSI"; + case I40E_ERR_NO_MEMORY: + return "I40E_ERR_NO_MEMORY"; + case I40E_ERR_BAD_PTR: + return "I40E_ERR_BAD_PTR"; + case I40E_ERR_RING_FULL: + return "I40E_ERR_RING_FULL"; + case I40E_ERR_INVALID_PD_ID: + return "I40E_ERR_INVALID_PD_ID"; + case I40E_ERR_INVALID_QP_ID: + return "I40E_ERR_INVALID_QP_ID"; + case I40E_ERR_INVALID_CQ_ID: + return "I40E_ERR_INVALID_CQ_ID"; + case I40E_ERR_INVALID_CEQ_ID: + return "I40E_ERR_INVALID_CEQ_ID"; + case I40E_ERR_INVALID_AEQ_ID: + return "I40E_ERR_INVALID_AEQ_ID"; + case I40E_ERR_INVALID_SIZE: + return "I40E_ERR_INVALID_SIZE"; + case I40E_ERR_INVALID_ARP_INDEX: + return "I40E_ERR_INVALID_ARP_INDEX"; + case I40E_ERR_INVALID_FPM_FUNC_ID: + return "I40E_ERR_INVALID_FPM_FUNC_ID"; + case I40E_ERR_QP_INVALID_MSG_SIZE: + return "I40E_ERR_QP_INVALID_MSG_SIZE"; + case I40E_ERR_QP_TOOMANY_WRS_POSTED: + return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; + case I40E_ERR_INVALID_FRAG_COUNT: + return "I40E_ERR_INVALID_FRAG_COUNT"; + case I40E_ERR_QUEUE_EMPTY: + return "I40E_ERR_QUEUE_EMPTY"; + case I40E_ERR_INVALID_ALIGNMENT: + return "I40E_ERR_INVALID_ALIGNMENT"; + case I40E_ERR_FLUSHED_QUEUE: + return "I40E_ERR_FLUSHED_QUEUE"; + case I40E_ERR_INVALID_PUSH_PAGE_INDEX: + return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; + case I40E_ERR_INVALID_IMM_DATA_SIZE: + return "I40E_ERR_INVALID_IMM_DATA_SIZE"; + case I40E_ERR_TIMEOUT: + return "I40E_ERR_TIMEOUT"; + case I40E_ERR_OPCODE_MISMATCH: + return "I40E_ERR_OPCODE_MISMATCH"; + case I40E_ERR_CQP_COMPL_ERROR: + return "I40E_ERR_CQP_COMPL_ERROR"; + case I40E_ERR_INVALID_VF_ID: + return "I40E_ERR_INVALID_VF_ID"; + case I40E_ERR_INVALID_HMCFN_ID: + return "I40E_ERR_INVALID_HMCFN_ID"; + case I40E_ERR_BACKING_PAGE_ERROR: + return "I40E_ERR_BACKING_PAGE_ERROR"; + case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: + return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; + case I40E_ERR_INVALID_PBLE_INDEX: + return "I40E_ERR_INVALID_PBLE_INDEX"; + case I40E_ERR_INVALID_SD_INDEX: + return "I40E_ERR_INVALID_SD_INDEX"; + case I40E_ERR_INVALID_PAGE_DESC_INDEX: + return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; + case I40E_ERR_INVALID_SD_TYPE: + return "I40E_ERR_INVALID_SD_TYPE"; + case I40E_ERR_MEMCPY_FAILED: + return "I40E_ERR_MEMCPY_FAILED"; + case I40E_ERR_INVALID_HMC_OBJ_INDEX: + return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; + case I40E_ERR_INVALID_HMC_OBJ_COUNT: + return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; + case I40E_ERR_INVALID_SRQ_ARM_LIMIT: + return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; + case I40E_ERR_SRQ_ENABLED: + return "I40E_ERR_SRQ_ENABLED"; + case I40E_ERR_ADMIN_QUEUE_ERROR: + return "I40E_ERR_ADMIN_QUEUE_ERROR"; + case I40E_ERR_ADMIN_QUEUE_TIMEOUT: + return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; + case I40E_ERR_BUF_TOO_SHORT: + return "I40E_ERR_BUF_TOO_SHORT"; + case I40E_ERR_ADMIN_QUEUE_FULL: + return "I40E_ERR_ADMIN_QUEUE_FULL"; + case I40E_ERR_ADMIN_QUEUE_NO_WORK: + return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; + case I40E_ERR_BAD_IWARP_CQE: + return "I40E_ERR_BAD_IWARP_CQE"; + case I40E_ERR_NVM_BLANK_MODE: + return "I40E_ERR_NVM_BLANK_MODE"; + case I40E_ERR_NOT_IMPLEMENTED: + return "I40E_ERR_NOT_IMPLEMENTED"; + case I40E_ERR_PE_DOORBELL_NOT_ENABLED: + return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; + case I40E_ERR_DIAG_TEST_FAILED: + return "I40E_ERR_DIAG_TEST_FAILED"; + case I40E_ERR_NOT_READY: + return "I40E_ERR_NOT_READY"; + case I40E_NOT_SUPPORTED: + return "I40E_NOT_SUPPORTED"; + case I40E_ERR_FIRMWARE_API_VERSION: + return "I40E_ERR_FIRMWARE_API_VERSION"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); + return hw->err_str; +} + /** * i40e_debug_aq * @hw: debug mask related to admin queue @@ -177,6 +392,169 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, return status; } +/** + * i40e_aq_get_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * @set: set true to set the table, false to get the table + * + * Internal function to get or set RSS look up table + **/ +static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, + u16 vsi_id, bool pf_lut, + u8 *lut, u16 lut_size, + bool set) +{ + i40e_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_lut *cmd_resp = + (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; + + if (set) + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_lut); + else + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_lut); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); + + if (pf_lut) + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + else + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + + cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut)); + cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut)); + + status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); + + return status; +} + +/** + * i40e_aq_get_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * get the RSS lookup table, PF or VSI type + **/ +i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, + false); +} + +/** + * i40e_aq_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * set the RSS lookup table, PF or VSI type + **/ +i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); +} + +/** + * i40e_aq_get_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * @set: set true to set the key, false to get the key + * + * get the RSS key per VSI + **/ +static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key, + bool set) +{ + i40e_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_key *cmd_resp = + (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; + u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); + + if (set) + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_key); + else + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_key); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); + cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key)); + cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key)); + + status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); + + return status; +} + +/** + * i40e_aq_get_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + **/ +i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); +} + +/** + * i40e_aq_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + * set the RSS key per VSI + **/ +i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); +} + /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the * hardware to a bit-field that can be used by SW to more easily determine the * packet type. @@ -563,6 +941,7 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw) switch (hw->mac.type) { case I40E_MAC_XL710: + case I40E_MAC_X722: break; default: return I40E_ERR_DEVICE_NOT_SUPPORTED; @@ -1187,9 +1566,9 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) blink = false; if (blink) - gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); + gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); else - gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); + gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); break; @@ -2391,7 +2770,7 @@ i40e_aq_erase_nvm_exit: #define I40E_DEV_FUNC_CAP_MSIX_VF 0x44 #define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45 #define I40E_DEV_FUNC_CAP_IEEE_1588 0x46 -#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1 +#define I40E_DEV_FUNC_CAP_FLEX10 0xF1 #define I40E_DEV_FUNC_CAP_CEM 0xF2 #define I40E_DEV_FUNC_CAP_IWARP 0x51 #define I40E_DEV_FUNC_CAP_LED 0x61 @@ -2416,6 +2795,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, u32 valid_functions, num_functions; u32 number, logical_id, phys_id; struct i40e_hw_capabilities *p; + u8 major_rev; u32 i = 0; u16 id; @@ -2433,6 +2813,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, number = le32_to_cpu(cap->number); logical_id = le32_to_cpu(cap->logical_id); phys_id = le32_to_cpu(cap->phys_id); + major_rev = cap->major_rev; switch (id) { case I40E_DEV_FUNC_CAP_SWITCH_MODE: @@ -2507,9 +2888,21 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, case I40E_DEV_FUNC_CAP_MSIX_VF: p->num_msix_vectors_vf = number; break; - case I40E_DEV_FUNC_CAP_MFP_MODE_1: - if (number == 1) - p->mfp_mode_1 = true; + case I40E_DEV_FUNC_CAP_FLEX10: + if (major_rev == 1) { + if (number == 1) { + p->flex10_enable = true; + p->flex10_capable = true; + } + } else { + /* Capability revision >= 2 */ + if (number & 1) + p->flex10_enable = true; + if (number & 2) + p->flex10_capable = true; + } + p->flex10_mode = logical_id; + p->flex10_status = phys_id; break; case I40E_DEV_FUNC_CAP_CEM: if (number == 1) @@ -2557,7 +2950,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, /* Software override ensuring FCoE is disabled if npar or mfp * mode because it is not supported in these modes. */ - if (p->npar_enable || p->mfp_mode_1) + if (p->npar_enable || p->flex10_enable) p->fcoe = false; /* count the enabled ports (aka the "not disabled" ports) */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 2547aa21b..90de46aef 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -588,6 +588,8 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) if (!ret) { /* CEE mode */ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + hw->local_dcbx_config.tlv_status = + le16_to_cpu(cee_v1_cfg.tlv_status); i40e_cee_to_dcb_v1_config(&cee_v1_cfg, &hw->local_dcbx_config); } @@ -597,6 +599,8 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) if (!ret) { /* CEE mode */ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + hw->local_dcbx_config.tlv_status = + le32_to_cpu(cee_cfg.tlv_status); i40e_cee_to_dcb_config(&cee_cfg, &hw->local_dcbx_config); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index e137e3fac..50fc894a4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -58,9 +58,9 @@ #define I40E_IEEE_ETS_MAXTC_SHIFT 0 #define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT) #define I40E_IEEE_ETS_CBS_SHIFT 6 -#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT) +#define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT) #define I40E_IEEE_ETS_WILLING_SHIFT 7 -#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT) +#define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT) #define I40E_IEEE_ETS_PRIO_0_SHIFT 0 #define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT) #define I40E_IEEE_ETS_PRIO_1_SHIFT 4 @@ -79,9 +79,9 @@ #define I40E_IEEE_PFC_CAP_SHIFT 0 #define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT) #define I40E_IEEE_PFC_MBC_SHIFT 6 -#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT) +#define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT) #define I40E_IEEE_PFC_WILLING_SHIFT 7 -#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT) +#define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT) /* Defines for IEEE APP TLV */ #define I40E_IEEE_APP_SEL_SHIFT 0 diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index bd5079d5c..1c51f736a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -187,7 +187,7 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi) /* Set up all the App TLVs if DCBx is negotiated */ for (i = 0; i < dcbxcfg->numapps; i++) { prio = dcbxcfg->app[i].priority; - tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]); + tc_map = BIT(dcbxcfg->etscfg.prioritytable[prio]); /* Add APP only if the TC is enabled for this VSI */ if (tc_map & vsi->tc_config.enabled_tc) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index da0faf478..d7c15d17f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -964,7 +964,7 @@ static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) pf->auto_disable_flags |= flag; } dev_info(&pf->pdev->dev, "requesting a PF reset\n"); - i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); + i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED)); } #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4) @@ -1471,19 +1471,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } } else if (strncmp(cmd_buf, "pfr", 3) == 0) { dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n"); - i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); + i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "corer", 5) == 0) { dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n"); - i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED)); + i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "globr", 5) == 0) { dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); - i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED)); + i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "empr", 4) == 0) { dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n"); - i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED)); + i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "read", 4) == 0) { u32 address; diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c index 56438bd57..f141e78d4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.c +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c @@ -144,11 +144,8 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw) ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, ®_val); if (!ret_code && ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) == - (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) { - ret_code = i40e_validate_nvm_checksum(hw, NULL); - } else { - ret_code = I40E_ERR_DIAG_TEST_FAILED; - } - - return ret_code; + BIT(I40E_SR_CONTROL_WORD_1_SHIFT))) + return i40e_validate_nvm_checksum(hw, NULL); + else + return I40E_ERR_DIAG_TEST_FAILED; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9a68c65b1..13a5d4cf4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -114,7 +114,7 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("tx_errors", stats.eth.tx_errors), I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), - I40E_PF_STAT("crc_errors", stats.crc_errors), + I40E_PF_STAT("rx_crc_errors", stats.crc_errors), I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), I40E_PF_STAT("mac_local_faults", stats.mac_local_faults), I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults), @@ -148,7 +148,9 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match), + I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status), I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), + I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status), /* LPI stats */ I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status), @@ -195,7 +197,14 @@ static const struct i40e_stats i40e_gstrings_fcoe_stats[] = { FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \ FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \ / sizeof(u64)) +#define I40E_VEB_TC_STATS_LEN ( \ + (FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_packets) + \ + FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_bytes) + \ + FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \ + FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \ + / sizeof(u64)) #define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats) +#define I40E_VEB_STATS_TOTAL (I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN) #define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \ I40E_PFC_STATS_LEN + \ I40E_VSI_STATS_LEN((n))) @@ -679,15 +688,17 @@ static int i40e_set_settings(struct net_device *netdev, /* make the aq call */ status = i40e_aq_set_phy_config(hw, &config, NULL); if (status) { - netdev_info(netdev, "Set phy config failed with error %d.\n", - status); + netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); return -EAGAIN; } status = i40e_aq_get_link_info(hw, true, NULL, NULL); if (status) - netdev_info(netdev, "Updating link info failed with error %d\n", - status); + netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); } else { netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); @@ -707,8 +718,9 @@ static int i40e_nway_reset(struct net_device *netdev) ret = i40e_aq_set_link_restart_an(hw, link_up, NULL); if (ret) { - netdev_info(netdev, "link restart failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + netdev_info(netdev, "link restart failed, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); return -EIO; } @@ -820,18 +832,21 @@ static int i40e_set_pauseparam(struct net_device *netdev, status = i40e_set_fc(hw, &aq_failures, link_up); if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) { - netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n", - status, hw->aq.asq_last_status); + netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; } if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) { - netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n", - status, hw->aq.asq_last_status); + netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; } if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) { - netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n", - status, hw->aq.asq_last_status); + netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; } @@ -1009,7 +1024,7 @@ static int i40e_get_eeprom_len(struct net_device *netdev) & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; /* register returns value in power of 2, 64Kbyte chunks. */ - val = (64 * 1024) * (1 << val); + val = (64 * 1024) * BIT(val); return val; } @@ -1249,7 +1264,7 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) int len = I40E_PF_STATS_LEN(netdev); if (pf->lan_veb != I40E_NO_VEB) - len += I40E_VEB_STATS_LEN; + len += I40E_VEB_STATS_TOTAL; return len; } else { return I40E_VSI_STATS_LEN(netdev); @@ -1329,6 +1344,12 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } + for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) { + data[i++] = veb->tc_stats.tc_tx_packets[j]; + data[i++] = veb->tc_stats.tc_tx_bytes[j]; + data[i++] = veb->tc_stats.tc_rx_packets[j]; + data[i++] = veb->tc_stats.tc_rx_bytes[j]; + } } for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { p = (char *)pf + i40e_gstrings_stats[j].stat_offset; @@ -1400,6 +1421,20 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, i40e_gstrings_veb_stats[i].stat_string); p += ETH_GSTRING_LEN; } + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + snprintf(p, ETH_GSTRING_LEN, + "veb.tc_%u_tx_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "veb.tc_%u_tx_bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "veb.tc_%u_rx_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "veb.tc_%u_rx_bytes", i); + p += ETH_GSTRING_LEN; + } } for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { snprintf(p, ETH_GSTRING_LEN, "port.%s", @@ -1462,20 +1497,11 @@ static int i40e_get_ts_info(struct net_device *dev, else info->phc_index = -1; - info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); - - info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; } @@ -1560,6 +1586,21 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf) return false; } +static inline bool i40e_active_vmdqs(struct i40e_pf *pf) +{ + struct i40e_vsi **vsi = pf->vsi; + int i; + + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (!vsi[i]) + continue; + if (vsi[i]->type == I40E_VSI_VMDQ2) + return true; + } + + return false; +} + static void i40e_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { @@ -1573,9 +1614,9 @@ static void i40e_diag_test(struct net_device *netdev, set_bit(__I40E_TESTING, &pf->state); - if (i40e_active_vfs(pf)) { + if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) { dev_warn(&pf->pdev->dev, - "Please take active VFS offline and restart the adapter before running NIC diagnostics\n"); + "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n"); data[I40E_ETH_TEST_REG] = 1; data[I40E_ETH_TEST_EEPROM] = 1; data[I40E_ETH_TEST_INTR] = 1; @@ -1591,11 +1632,13 @@ static void i40e_diag_test(struct net_device *netdev, /* indicate we're in test mode */ dev_close(netdev); else - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + /* This reset does not affect link - if it is + * changed to a type of reset that does affect + * link then the following link test would have + * to be moved to before the reset + */ + i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); - /* Link test performed before hardware reset - * so autoneg doesn't interfere with test result - */ if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1613,7 +1656,7 @@ static void i40e_diag_test(struct net_device *netdev, eth_test->flags |= ETH_TEST_FL_FAILED; clear_bit(__I40E_TESTING, &pf->state); - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); if (if_running) dev_open(netdev); @@ -1646,7 +1689,7 @@ static void i40e_get_wol(struct net_device *netdev, /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); - if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) { + if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) { wol->supported = 0; wol->wolopts = 0; } else { @@ -1679,7 +1722,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); - if (((1 << hw->port) & wol_nvm_bits)) + if (BIT(hw->port) & wol_nvm_bits) return -EOPNOTSUPP; /* only magic packet is supported */ @@ -2025,10 +2068,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case TCP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); break; default: return -EINVAL; @@ -2037,10 +2080,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case TCP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); break; default: return -EINVAL; @@ -2049,12 +2092,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case UDP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; default: return -EINVAL; @@ -2063,12 +2106,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case UDP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; default: return -EINVAL; @@ -2081,7 +2124,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); break; case AH_ESP_V6_FLOW: case AH_V6_FLOW: @@ -2090,15 +2133,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); break; case IPV4_FLOW: - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4); break; case IPV6_FLOW: - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6); break; default: return -EINVAL; @@ -2509,7 +2552,7 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, * @indir: indirection table * @key: hash key * - * Returns -EINVAL if the table specifies an inavlid queue id, otherwise + * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. **/ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index c8b621e0e..5ea75dd53 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -298,8 +298,8 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf) /* enable FCoE hash filter */ val = rd32(hw, I40E_PFQF_HENA(1)); - val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32); - val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32); + val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32); + val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32); val &= I40E_PFQF_HENA_PTYPE_ENA_MASK; wr32(hw, I40E_PFQF_HENA(1), val); @@ -308,10 +308,10 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf) pf->num_fcoe_qps = I40E_DEFAULT_FCOE; /* Reserve 4K DDP contexts and 20K filter size for FCoE */ - pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) * - I40E_DMA_CNTX_BASE_SIZE; + pf->fcoe_hmc_cntx_num = BIT(I40E_DMA_CNTX_SIZE_4K) * + I40E_DMA_CNTX_BASE_SIZE; pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num + - (1 << I40E_HASH_FILTER_SIZE_16K) * + BIT(I40E_HASH_FILTER_SIZE_16K) * I40E_HASH_FILTER_BASE_SIZE; /* FCoE object: max 16K filter buckets and 4K DMA contexts */ @@ -348,7 +348,7 @@ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf) if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && app.protocolid == ETH_P_FCOE) { tc = dcbcfg->etscfg.prioritytable[app.priority]; - enabled_tc |= (1 << tc); + enabled_tc |= BIT(tc); break; } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h index 0d49e2d15..a93174dde 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h @@ -59,9 +59,9 @@ (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1) #define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT \ - (1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) + BIT(I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) #define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT \ - (1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) + BIT(I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) #define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e) \ I40E_RX_PROG_FCOE_ERROR_CONFLICT(e) diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 9b987ccc9..5ebe12d56 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -116,6 +116,7 @@ exit: * @hw: pointer to our HW structure * @hmc_info: pointer to the HMC configuration information structure * @pd_index: which page descriptor index to manipulate + * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one. * * This function: * 1. Initializes the pd entry @@ -129,12 +130,14 @@ exit: **/ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, - u32 pd_index) + u32 pd_index, + struct i40e_dma_mem *rsrc_pg) { i40e_status ret_code = 0; struct i40e_hmc_pd_table *pd_table; struct i40e_hmc_pd_entry *pd_entry; struct i40e_dma_mem mem; + struct i40e_dma_mem *page = &mem; u32 sd_idx, rel_pd_idx; u64 *pd_addr; u64 page_desc; @@ -155,18 +158,24 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; pd_entry = &pd_table->pd_entry[rel_pd_idx]; if (!pd_entry->valid) { - /* allocate a 4K backing page */ - ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp, - I40E_HMC_PAGED_BP_SIZE, - I40E_HMC_PD_BP_BUF_ALIGNMENT); - if (ret_code) - goto exit; + if (rsrc_pg) { + pd_entry->rsrc_pg = true; + page = rsrc_pg; + } else { + /* allocate a 4K backing page */ + ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp, + I40E_HMC_PAGED_BP_SIZE, + I40E_HMC_PD_BP_BUF_ALIGNMENT); + if (ret_code) + goto exit; + pd_entry->rsrc_pg = false; + } - pd_entry->bp.addr = mem; + pd_entry->bp.addr = *page; pd_entry->bp.sd_pd_index = pd_index; pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; /* Set page address and valid bit */ - page_desc = mem.pa | 0x1; + page_desc = page->pa | 0x1; pd_addr = (u64 *)pd_table->pd_page_addr.va; pd_addr += rel_pd_idx; @@ -240,7 +249,8 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); /* free memory here */ - ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); + if (!pd_entry->rsrc_pg) + ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr); if (ret_code) goto exit; if (!pd_table->ref_cnt) @@ -287,21 +297,15 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, u32 idx, bool is_pf) { struct i40e_hmc_sd_entry *sd_entry; - i40e_status ret_code = 0; + + if (!is_pf) + return I40E_NOT_SUPPORTED; /* get the entry and decrease its ref counter */ sd_entry = &hmc_info->sd_table.sd_entry[idx]; - if (is_pf) { - I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT); - } else { - ret_code = I40E_NOT_SUPPORTED; - goto exit; - } - ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr)); - if (ret_code) - goto exit; -exit: - return ret_code; + I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT); + + return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr); } /** @@ -341,20 +345,13 @@ i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx, bool is_pf) { - i40e_status ret_code = 0; struct i40e_hmc_sd_entry *sd_entry; + if (!is_pf) + return I40E_NOT_SUPPORTED; + sd_entry = &hmc_info->sd_table.sd_entry[idx]; - if (is_pf) { - I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); - } else { - ret_code = I40E_NOT_SUPPORTED; - goto exit; - } - /* free memory here */ - ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr)); - if (ret_code) - goto exit; -exit: - return ret_code; + I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); + + return i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h index 732a02660..d90669211 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -62,6 +62,7 @@ struct i40e_hmc_bp { struct i40e_hmc_pd_entry { struct i40e_hmc_bp bp; u32 sd_index; + bool rsrc_pg; bool valid; }; @@ -126,8 +127,8 @@ struct i40e_hmc_info { I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ - (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ - val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \ @@ -146,7 +147,7 @@ struct i40e_hmc_info { I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ - val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \ @@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, - u32 pd_index); + u32 pd_index, + struct i40e_dma_mem *rsrc_pg); i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx); diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index 0079ad7bc..fa371a2a4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -129,7 +129,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, obj->cnt = txq_num; obj->base = 0; size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ); - obj->size = (u64)1 << size_exp; + obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (txq_num > obj->max_cnt) { @@ -152,7 +152,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size); obj->base = i40e_align_l2obj_base(obj->base); size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ); - obj->size = (u64)1 << size_exp; + obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (rxq_num > obj->max_cnt) { @@ -175,7 +175,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size); obj->base = i40e_align_l2obj_base(obj->base); size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ); - obj->size = (u64)1 << size_exp; + obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (fcoe_cntx_num > obj->max_cnt) { @@ -198,7 +198,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size); obj->base = i40e_align_l2obj_base(obj->base); size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ); - obj->size = (u64)1 << size_exp; + obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (fcoe_filt_num > obj->max_cnt) { @@ -387,7 +387,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw, /* update the pd table entry */ ret_code = i40e_add_pd_table_entry(hw, info->hmc_info, - i); + i, NULL); if (ret_code) { pd_error = true; break; @@ -763,7 +763,7 @@ static void i40e_write_byte(u8 *hmc_bits, /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; - mask = ((u8)1 << ce_info->width) - 1; + mask = BIT(ce_info->width) - 1; src_byte = *from; src_byte &= mask; @@ -804,7 +804,7 @@ static void i40e_write_word(u8 *hmc_bits, /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; - mask = ((u16)1 << ce_info->width) - 1; + mask = BIT(ce_info->width) - 1; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines @@ -854,7 +854,7 @@ static void i40e_write_dword(u8 *hmc_bits, * to 5 bits so the shift will do nothing */ if (ce_info->width < 32) - mask = ((u32)1 << ce_info->width) - 1; + mask = BIT(ce_info->width) - 1; else mask = ~(u32)0; @@ -906,7 +906,7 @@ static void i40e_write_qword(u8 *hmc_bits, * to 6 bits so the shift will do nothing */ if (ce_info->width < 64) - mask = ((u64)1 << ce_info->width) - 1; + mask = BIT_ULL(ce_info->width) - 1; else mask = ~(u64)0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 48a52b35b..3dd26cdd0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -39,7 +39,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 4 +#define DRV_VERSION_BUILD 9 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -76,6 +76,9 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, /* required last entry */ {0, } }; @@ -520,7 +523,7 @@ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, if (likely(new_data >= *offset)) *stat = new_data - *offset; else - *stat = (new_data + ((u64)1 << 48)) - *offset; + *stat = (new_data + BIT_ULL(48)) - *offset; *stat &= 0xFFFFFFFFFFFFULL; } @@ -543,7 +546,7 @@ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, if (likely(new_data >= *offset)) *stat = (u32)(new_data - *offset); else - *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); + *stat = (u32)((new_data + BIT_ULL(32)) - *offset); } /** @@ -621,11 +624,15 @@ static void i40e_update_veb_stats(struct i40e_veb *veb) struct i40e_hw *hw = &pf->hw; struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ - int idx = 0; + struct i40e_veb_tc_stats *veb_oes; + struct i40e_veb_tc_stats *veb_es; + int i, idx = 0; idx = veb->stats_idx; es = &veb->stats; oes = &veb->stats_offsets; + veb_es = &veb->tc_stats; + veb_oes = &veb->tc_stats_offsets; /* Gather up the stats that the hw collects */ i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), @@ -661,6 +668,28 @@ static void i40e_update_veb_stats(struct i40e_veb *veb) i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), veb->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx), + I40E_GLVEBTC_RPCL(i, idx), + veb->stat_offsets_loaded, + &veb_oes->tc_rx_packets[i], + &veb_es->tc_rx_packets[i]); + i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx), + I40E_GLVEBTC_RBCL(i, idx), + veb->stat_offsets_loaded, + &veb_oes->tc_rx_bytes[i], + &veb_es->tc_rx_bytes[i]); + i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx), + I40E_GLVEBTC_TPCL(i, idx), + veb->stat_offsets_loaded, + &veb_oes->tc_tx_packets[i], + &veb_es->tc_tx_packets[i]); + i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx), + I40E_GLVEBTC_TBCL(i, idx), + veb->stat_offsets_loaded, + &veb_oes->tc_tx_bytes[i], + &veb_es->tc_tx_bytes[i]); + } veb->stat_offsets_loaded = true; } @@ -1123,6 +1152,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) pf->stat_offsets_loaded, &osd->rx_lpi_count, &nsd->rx_lpi_count); + if (pf->flags & I40E_FLAG_FD_SB_ENABLED && + !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) + nsd->fd_sb_status = true; + else + nsd->fd_sb_status = false; + + if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && + !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + nsd->fd_atr_status = true; + else + nsd->fd_atr_status = false; + pf->stat_offsets_loaded = true; } @@ -1240,6 +1281,8 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, struct i40e_mac_filter *f; list_for_each_entry(f, &vsi->mac_filter_list, list) { + if (vsi->info.pvid) + f->vlan = le16_to_cpu(vsi->info.pvid); if (!i40e_find_filter(vsi, macaddr, f->vlan, is_vf, is_netdev)) { if (!i40e_add_filter(vsi, macaddr, f->vlan, @@ -1264,7 +1307,7 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) { struct i40e_aqc_remove_macvlan_element_data element; struct i40e_pf *pf = vsi->back; - i40e_status aq_ret; + i40e_status ret; /* Only appropriate for the PF main VSI */ if (vsi->type != I40E_VSI_MAIN) @@ -1275,8 +1318,8 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) element.vlan_tag = 0; element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; - aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); - if (aq_ret) + ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); + if (ret) return -ENOENT; return 0; @@ -1514,7 +1557,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { /* Find numtc from enabled TC bitmap */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & (1 << i)) /* TC is enabled */ + if (enabled_tc & BIT_ULL(i)) /* TC is enabled */ numtc++; } if (!numtc) { @@ -1533,14 +1576,18 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, * vectors available and so we need to lower the used * q count. */ - qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); + else + qcount = vsi->alloc_queue_pairs; num_tc_qps = qcount / numtc; - num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); + num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); /* Setup queue offset/count for all TCs for given VSI */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* See if the given TC is enabled for the given VSI */ - if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */ + if (vsi->tc_config.enabled_tc & BIT_ULL(i)) { + /* TC is enabled */ int pow, num_qps; switch (vsi->type) { @@ -1566,7 +1613,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, /* find the next higher power-of-2 of num queue pairs */ num_qps = qcount; pow = 0; - while (num_qps && ((1 << pow) < qcount)) { + while (num_qps && (BIT_ULL(pow) < qcount)) { pow++; num_qps >>= 1; } @@ -1596,7 +1643,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { if (vsi->req_queue_pairs > 0) vsi->num_queue_pairs = vsi->req_queue_pairs; - else + else if (pf->flags & I40E_FLAG_MSIX_ENABLED) vsi->num_queue_pairs = pf->num_lan_msix; } @@ -1716,10 +1763,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) bool add_happened = false; int filter_list_len = 0; u32 changed_flags = 0; - i40e_status aq_ret = 0; + i40e_status ret = 0; struct i40e_pf *pf; int num_add = 0; int num_del = 0; + int aq_err = 0; u16 cmd_flags; /* empty array typed pointers, kcalloc later */ @@ -1771,31 +1819,31 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) /* flush a full buffer */ if (num_del == filter_list_len) { - aq_ret = i40e_aq_remove_macvlan(&pf->hw, - vsi->seid, del_list, num_del, - NULL); + ret = i40e_aq_remove_macvlan(&pf->hw, + vsi->seid, del_list, num_del, + NULL); + aq_err = pf->hw.aq.asq_last_status; num_del = 0; memset(del_list, 0, sizeof(*del_list)); - if (aq_ret && - pf->hw.aq.asq_last_status != - I40E_AQ_RC_ENOENT) + if (ret && aq_err != I40E_AQ_RC_ENOENT) dev_info(&pf->pdev->dev, - "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", - aq_ret, - pf->hw.aq.asq_last_status); + "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, aq_err)); } } if (num_del) { - aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, + ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, del_list, num_del, NULL); + aq_err = pf->hw.aq.asq_last_status; num_del = 0; - if (aq_ret && - pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT) + if (ret && aq_err != I40E_AQ_RC_ENOENT) dev_info(&pf->pdev->dev, - "ignoring delete macvlan error, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "ignoring delete macvlan error, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, aq_err)); } kfree(del_list); @@ -1833,29 +1881,31 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) /* flush a full buffer */ if (num_add == filter_list_len) { - aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, - add_list, num_add, - NULL); + ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + add_list, num_add, + NULL); + aq_err = pf->hw.aq.asq_last_status; num_add = 0; - if (aq_ret) + if (ret) break; memset(add_list, 0, sizeof(*add_list)); } } if (num_add) { - aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, - add_list, num_add, NULL); + ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + add_list, num_add, NULL); + aq_err = pf->hw.aq.asq_last_status; num_add = 0; } kfree(add_list); add_list = NULL; - if (add_happened && aq_ret && - pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) { + if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) { dev_info(&pf->pdev->dev, - "add filter failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "add filter failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, aq_err)); if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)) { @@ -1871,34 +1921,60 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) if (changed_flags & IFF_ALLMULTI) { bool cur_multipromisc; cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); - aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, - vsi->seid, - cur_multipromisc, - NULL); - if (aq_ret) + ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, + vsi->seid, + cur_multipromisc, + NULL); + if (ret) dev_info(&pf->pdev->dev, - "set multi promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "set multi promisc failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); } if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { bool cur_promisc; cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)); - aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, - vsi->seid, - cur_promisc, NULL); - if (aq_ret) - dev_info(&pf->pdev->dev, - "set uni promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); - aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, - vsi->seid, - cur_promisc, NULL); - if (aq_ret) + if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) { + /* set defport ON for Main VSI instead of true promisc + * this way we will get all unicast/multicast and VLAN + * promisc behavior but will not get VF or VMDq traffic + * replicated on the Main VSI. + */ + if (pf->cur_promisc != cur_promisc) { + pf->cur_promisc = cur_promisc; + i40e_do_reset_safe(pf, + BIT(__I40E_PF_RESET_REQUESTED)); + } + } else { + ret = i40e_aq_set_vsi_unicast_promiscuous( + &vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (ret) + dev_info(&pf->pdev->dev, + "set unicast promisc failed, err %d, aq_err %d\n", + ret, pf->hw.aq.asq_last_status); + ret = i40e_aq_set_vsi_multicast_promiscuous( + &vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (ret) + dev_info(&pf->pdev->dev, + "set multicast promisc failed, err %d, aq_err %d\n", + ret, pf->hw.aq.asq_last_status); + } + ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (ret) dev_info(&pf->pdev->dev, - "set brdcast promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "set brdcast promisc failed, err %s, aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); } clear_bit(__I40E_CONFIG_BUSY, &vsi->state); @@ -1994,8 +2070,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "%s: update vsi failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); + "update vlan stripping failed, err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); } } @@ -2023,8 +2101,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "%s: update vsi failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); + "update vlan stripping failed, err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); } } @@ -2294,7 +2374,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi) int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) { struct i40e_vsi_context ctxt; - i40e_status aq_ret; + i40e_status ret; vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.pvid = cpu_to_le16(vid); @@ -2304,11 +2384,13 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) ctxt.seid = vsi->seid; ctxt.info = vsi->info; - aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); - if (aq_ret) { + ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (ret) { dev_info(&vsi->back->pdev->dev, - "%s: update vsi failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); + "add pvid failed, err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); return -ENOENT; } @@ -2590,7 +2672,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) rx_ctx.lrxqthresh = 2; rx_ctx.crcstrip = 1; rx_ctx.l2tsel = 1; - rx_ctx.showiv = 1; + /* this controls whether VLAN is stripped from inner headers */ + rx_ctx.showiv = 0; #ifdef I40E_FCOE rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); #endif @@ -2696,9 +2779,9 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) #endif /* I40E_FCOE */ /* round up for the chip's needs */ vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, - (1 << I40E_RXQ_CTX_HBUFF_SHIFT)); + BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT)); vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, - (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); + BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); /* set up individual rings */ for (i = 0; i < vsi->num_queue_pairs && !err; i++) @@ -2728,7 +2811,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) } for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { - if (!(vsi->tc_config.enabled_tc & (1 << n))) + if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) continue; qoffset = vsi->tc_config.tc_info[n].qoffset; @@ -2877,6 +2960,9 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf) I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK; + if (pf->flags & I40E_FLAG_IWARP_ENABLED) + val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; + if (pf->flags & I40E_FLAG_PTP) val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; @@ -3167,6 +3253,13 @@ static irqreturn_t i40e_intr(int irq, void *data) (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) pf->sw_int_count++; + if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && + (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) { + ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; + icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; + dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n"); + } + /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { @@ -3373,7 +3466,7 @@ static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) * @v_idx: vector index * @qp_idx: queue pair index **/ -static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) +static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) { struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; @@ -3427,7 +3520,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) q_vector->tx.ring = NULL; while (num_ringpairs--) { - map_vector_to_qp(vsi, v_start, qp_idx); + i40e_map_vector_to_qp(vsi, v_start, qp_idx); qp_idx++; qp_remaining--; } @@ -3929,6 +4022,7 @@ static void i40e_vsi_close(struct i40e_vsi *vsi) i40e_vsi_free_irq(vsi); i40e_vsi_free_tx_resources(vsi); i40e_vsi_free_rx_resources(vsi); + vsi->current_netdev_flags = 0; } /** @@ -4073,7 +4167,7 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) if (app.selector == I40E_APP_SEL_TCPIP && app.protocolid == I40E_APP_PROTOID_ISCSI) { tc = dcbcfg->etscfg.prioritytable[app.priority]; - enabled_tc |= (1 << tc); + enabled_tc |= BIT_ULL(tc); break; } } @@ -4122,7 +4216,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) u8 i; for (i = 0; i < num_tc; i++) - enabled_tc |= 1 << i; + enabled_tc |= BIT(i); return enabled_tc; } @@ -4157,7 +4251,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) /* At least have TC0 */ enabled_tc = (enabled_tc ? enabled_tc : 0x1); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & (1 << i)) + if (enabled_tc & BIT_ULL(i)) num_tc++; } return num_tc; @@ -4179,11 +4273,11 @@ static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) /* Find the first enabled TC */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & (1 << i)) + if (enabled_tc & BIT_ULL(i)) break; } - return 1 << i; + return BIT(i); } /** @@ -4221,26 +4315,28 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - i40e_status aq_ret; + i40e_status ret; u32 tc_bw_max; int i; /* Get the VSI level BW configuration */ - aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); - if (aq_ret) { + ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); + if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi bw config, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "couldn't get PF vsi bw config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } /* Get the VSI level BW configuration per TC */ - aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, - NULL); - if (aq_ret) { + ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, + NULL); + if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi ets bw config, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "couldn't get PF vsi ets bw config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } @@ -4279,16 +4375,16 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, u8 *bw_share) { struct i40e_aqc_configure_vsi_tc_bw_data bw_data; - i40e_status aq_ret; + i40e_status ret; int i; bw_data.tc_valid_bits = enabled_tc; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) bw_data.tc_bw_credits[i] = bw_share[i]; - aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, - NULL); - if (aq_ret) { + ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, + NULL); + if (ret) { dev_info(&vsi->back->pdev->dev, "AQ command Config VSI BW allocation per TC failed = %d\n", vsi->back->hw.aq.asq_last_status); @@ -4337,7 +4433,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) * will set the numtc for netdev as 2 that will be * referenced by the netdev layer as TC 0 and 1. */ - if (vsi->tc_config.enabled_tc & (1 << i)) + if (vsi->tc_config.enabled_tc & BIT_ULL(i)) netdev_set_tc_queue(netdev, vsi->tc_config.tc_info[i].netdev_tc, vsi->tc_config.tc_info[i].qcount, @@ -4399,7 +4495,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) /* Enable ETS TCs with equal BW Share for now across all VSIs */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & (1 << i)) + if (enabled_tc & BIT_ULL(i)) bw_share[i] = 1; } @@ -4423,8 +4519,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "update vsi failed, aq_err=%d\n", - vsi->back->hw.aq.asq_last_status); + "Update vsi tc config failed, err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); goto out; } /* update the local VSI info with updated queue map */ @@ -4435,8 +4533,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ret = i40e_vsi_get_bw_info(vsi); if (ret) { dev_info(&vsi->back->pdev->dev, - "Failed updating vsi bw info, aq_err=%d\n", - vsi->back->hw.aq.asq_last_status); + "Failed updating vsi bw info, err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); goto out; } @@ -4469,7 +4569,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) /* Enable ETS TCs with equal BW Share for now */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & (1 << i)) + if (enabled_tc & BIT_ULL(i)) bw_data.tc_bw_share_credits[i] = 1; } @@ -4477,8 +4577,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, - "veb bw config failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + "VEB bw config failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto out; } @@ -4486,8 +4587,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) ret = i40e_veb_get_bw_info(veb); if (ret) { dev_info(&pf->pdev->dev, - "Failed getting veb bw config, aq_err=%d\n", - pf->hw.aq.asq_last_status); + "Failed getting veb bw config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } out: @@ -4574,8 +4676,9 @@ static int i40e_resume_port_tx(struct i40e_pf *pf) ret = i40e_aq_resume_port_tx(hw, NULL); if (ret) { dev_info(&pf->pdev->dev, - "AQ command Resume Port Tx failed = %d\n", - pf->hw.aq.asq_last_status); + "Resume Port Tx failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Schedule PF reset to recover */ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); i40e_service_event_schedule(pf); @@ -4627,8 +4730,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) } } else { dev_info(&pf->pdev->dev, - "AQ Querying DCB configuration failed: aq_err %d\n", - pf->hw.aq.asq_last_status); + "Query for DCB configuration failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } out: @@ -4859,7 +4963,7 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc) /* Generate TC map for number of tc requested */ for (i = 0; i < tc; i++) - enabled_tc |= (1 << i); + enabled_tc |= BIT_ULL(i); /* Requesting same TC configuration as already enabled */ if (enabled_tc == vsi->tc_config.enabled_tc) @@ -4998,7 +5102,7 @@ err_setup_rx: err_setup_tx: i40e_vsi_free_tx_resources(vsi); if (vsi == pf->vsi[pf->lan_vsi]) - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); return err; } @@ -5066,7 +5170,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) i40e_vc_notify_reset(pf); /* do the biggest reset indicated */ - if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) { + if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { /* Request a Global Reset * @@ -5081,7 +5185,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) val |= I40E_GLGEN_RTRIG_GLOBR_MASK; wr32(&pf->hw, I40E_GLGEN_RTRIG, val); - } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) { + } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) { /* Request a Core Reset * @@ -5093,7 +5197,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) wr32(&pf->hw, I40E_GLGEN_RTRIG, val); i40e_flush(&pf->hw); - } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { + } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) { /* Request a PF Reset * @@ -5106,7 +5210,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) dev_dbg(&pf->pdev->dev, "PFR requested\n"); i40e_handle_reset_warning(pf); - } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { + } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { int v; /* Find the VSI(s) that requested a re-init */ @@ -5123,7 +5227,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) /* no further action needed, so return now */ return; - } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) { + } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { int v; /* Find the VSI(s) that needs to be brought down */ @@ -5253,7 +5357,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, /* Get updated DCBX data from firmware */ ret = i40e_get_dcb_config(&pf->hw); if (ret) { - dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n"); + dev_info(&pf->pdev->dev, + "Failed querying DCB configuration data from firmware, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto exit; } @@ -5761,23 +5868,23 @@ static void i40e_reset_subtask(struct i40e_pf *pf) rtnl_lock(); if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { - reset_flags |= (1 << __I40E_REINIT_REQUESTED); + reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED); clear_bit(__I40E_REINIT_REQUESTED, &pf->state); } if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { - reset_flags |= (1 << __I40E_PF_RESET_REQUESTED); + reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED); clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { - reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED); + reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED); clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { - reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED); + reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED); clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { - reset_flags |= (1 << __I40E_DOWN_REQUESTED); + reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED); clear_bit(__I40E_DOWN_REQUESTED, &pf->state); } @@ -5983,27 +6090,29 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; - int aq_ret; + int ret; ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; - aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); - if (aq_ret) { + ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (ret) { dev_info(&pf->pdev->dev, - "%s couldn't get PF vsi config, err %d, aq_err %d\n", - __func__, aq_ret, pf->hw.aq.asq_last_status); + "couldn't get PF vsi config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return; } ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); - aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); - if (aq_ret) { + ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (ret) { dev_info(&pf->pdev->dev, - "%s: update vsi switch failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); + "update vsi switch failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } @@ -6017,27 +6126,29 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; - int aq_ret; + int ret; ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; - aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); - if (aq_ret) { + ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (ret) { dev_info(&pf->pdev->dev, - "%s couldn't get PF vsi config, err %d, aq_err %d\n", - __func__, aq_ret, pf->hw.aq.asq_last_status); + "couldn't get PF vsi config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return; } ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); - aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); - if (aq_ret) { + ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (ret) { dev_info(&pf->pdev->dev, - "%s: update vsi switch failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); + "update vsi switch failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } @@ -6097,7 +6208,8 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) ret = i40e_add_vsi(ctl_vsi); if (ret) { dev_info(&pf->pdev->dev, - "rebuild of owner VSI failed: %d\n", ret); + "rebuild of veb_idx %d owner VSI failed: %d\n", + veb->idx, ret); goto end_reconstitute; } i40e_vsi_reset_stats(ctl_vsi); @@ -6176,8 +6288,10 @@ static int i40e_get_capabilities(struct i40e_pf *pf) buf_len = data_size; } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { dev_info(&pf->pdev->dev, - "capability discovery failed: aq=%d\n", - pf->hw.aq.asq_last_status); + "capability discovery failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); return -ENODEV; } } while (err); @@ -6363,7 +6477,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ ret = i40e_init_adminq(&pf->hw); if (ret) { - dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret); + dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto clear_recovery; } @@ -6373,11 +6489,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) i40e_clear_pxe_mode(hw); ret = i40e_get_capabilities(pf); - if (ret) { - dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", - ret); + if (ret) goto end_core_reset; - } ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, @@ -6418,12 +6531,16 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); if (ret) - dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret); + dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* make sure our flow control settings are restored */ ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); if (ret) - dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret); + dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Rebuild the VSIs and VEBs that existed before reset. * They are still in our local switch element arrays, so only @@ -6484,8 +6601,10 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) msleep(75); ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (ret) - dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); } /* reinit the misc interrupt */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) @@ -6647,8 +6766,8 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { - if (pf->pending_vxlan_bitmap & (1 << i)) { - pf->pending_vxlan_bitmap &= ~(1 << i); + if (pf->pending_vxlan_bitmap & BIT_ULL(i)) { + pf->pending_vxlan_bitmap &= ~BIT_ULL(i); port = pf->vxlan_ports[i]; if (port) ret = i40e_aq_add_udp_tunnel(hw, ntohs(port), @@ -6659,10 +6778,12 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) if (ret) { dev_info(&pf->pdev->dev, - "%s vxlan port %d, index %d failed, err %d, aq_err %d\n", + "%s vxlan port %d, index %d failed, err %s aq_err %s\n", port ? "add" : "delete", - ntohs(port), i, ret, - pf->hw.aq.asq_last_status); + ntohs(port), i, + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); pf->vxlan_ports[i] = 0; } } @@ -7013,6 +7134,10 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) tx_ring->count = vsi->num_desc; tx_ring->size = 0; tx_ring->dcb_tc = 0; + if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) + tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; + if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) + tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM; vsi->tx_rings[i] = tx_ring; rx_ring = &tx_ring[1]; @@ -7411,62 +7536,139 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) } /** - * i40e_config_rss - Prepare for RSS if used + * i40e_config_rss_aq - Prepare for RSS using AQ commands + * @vsi: vsi structure + * @seed: RSS hash seed + **/ +static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed) +{ + struct i40e_aqc_get_set_rss_key_data rss_key; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + bool pf_lut = false; + u8 *rss_lut; + int ret, i; + + memset(&rss_key, 0, sizeof(rss_key)); + memcpy(&rss_key, seed, sizeof(rss_key)); + + rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL); + if (!rss_lut) + return -ENOMEM; + + /* Populate the LUT with max no. of queues in round robin fashion */ + for (i = 0; i < vsi->rss_table_size; i++) + rss_lut[i] = i % vsi->rss_size; + + ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key); + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot set RSS key, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + return ret; + } + + if (vsi->type == I40E_VSI_MAIN) + pf_lut = true; + + ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut, + vsi->rss_table_size); + if (ret) + dev_info(&pf->pdev->dev, + "Cannot set RSS lut, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + + return ret; +} + +/** + * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used + * @vsi: VSI structure + **/ +static int i40e_vsi_config_rss(struct i40e_vsi *vsi) +{ + u8 seed[I40E_HKEY_ARRAY_SIZE]; + struct i40e_pf *pf = vsi->back; + + netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); + vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) + return i40e_config_rss_aq(vsi, seed); + + return 0; +} + +/** + * i40e_config_rss_reg - Prepare for RSS if used * @pf: board private structure + * @seed: RSS hash seed **/ -static int i40e_config_rss(struct i40e_pf *pf) +static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed) { - u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1]; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; + u32 *seed_dw = (u32 *)seed; + u32 current_queue = 0; u32 lut = 0; int i, j; - u64 hena; - u32 reg_val; - netdev_rss_key_fill(rss_key, sizeof(rss_key)); + /* Fill out hash function seed */ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]); + wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); + + for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { + lut = 0; + for (j = 0; j < 4; j++) { + if (current_queue == vsi->rss_size) + current_queue = 0; + lut |= ((current_queue) << (8 * j)); + current_queue++; + } + wr32(&pf->hw, I40E_PFQF_HLUT(i), lut); + } + i40e_flush(hw); + + return 0; +} + +/** + * i40e_config_rss - Prepare for RSS if used + * @pf: board private structure + **/ +static int i40e_config_rss(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + u8 seed[I40E_HKEY_ARRAY_SIZE]; + struct i40e_hw *hw = &pf->hw; + u32 reg_val; + u64 hena; + + netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); - hena |= I40E_DEFAULT_RSS_HENA; + hena |= i40e_pf_get_default_rss_hena(pf); + wr32(hw, I40E_PFQF_HENA(0), (u32)hena); wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); - /* Check capability and Set table size and register per hw expectation*/ + /* Determine the RSS table size based on the hardware capabilities */ reg_val = rd32(hw, I40E_PFQF_CTL_0); - if (pf->rss_table_size == 512) - reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512; - else - reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512; + reg_val = (pf->rss_table_size == 512) ? + (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) : + (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); wr32(hw, I40E_PFQF_CTL_0, reg_val); - /* Populate the LUT with max no. of queues in round robin fashion */ - for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) { - - /* The assumption is that lan qp count will be the highest - * qp count for any PF VSI that needs RSS. - * If multiple VSIs need RSS support, all the qp counts - * for those VSIs should be a power of 2 for RSS to work. - * If LAN VSI is the only consumer for RSS then this requirement - * is not necessary. - */ - if (j == vsi->rss_size) - j = 0; - /* lut = 4-byte sliding window of 4 lut entries */ - lut = (lut << 8) | (j & - ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1)); - /* On i = 3, we have 4 entries in lut; write to the register */ - if ((i & 3) == 3) - wr32(hw, I40E_PFQF_HLUT(i >> 2), lut); - } - i40e_flush(hw); - - return 0; + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) + return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed); + else + return i40e_config_rss_reg(pf, seed); } /** @@ -7533,7 +7735,7 @@ i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) i40e_status status; /* Set the valid bit for this PF */ - bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id); + bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; @@ -7567,8 +7769,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) last_aq_status = pf->hw.aq.asq_last_status; if (ret) { dev_info(&pf->pdev->dev, - "Cannot acquire NVM for read access, err %d: aq_err %d\n", - ret, last_aq_status); + "Cannot acquire NVM for read access, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } @@ -7583,8 +7786,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) last_aq_status = pf->hw.aq.asq_last_status; i40e_release_nvm(&pf->hw); if (ret) { - dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n", - ret, last_aq_status); + dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } @@ -7596,8 +7800,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) last_aq_status = pf->hw.aq.asq_last_status; if (ret) { dev_info(&pf->pdev->dev, - "Cannot acquire NVM for write access, err %d: aq_err %d\n", - ret, last_aq_status); + "Cannot acquire NVM for write access, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } /* Write it back out unchanged to initiate update NVM, @@ -7615,8 +7820,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) i40e_release_nvm(&pf->hw); if (ret) dev_info(&pf->pdev->dev, - "BW settings NOT SAVED, err %d aq_err %d\n", - ret, last_aq_status); + "BW settings NOT SAVED, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, last_aq_status)); bw_commit_out: return ret; @@ -7662,7 +7868,7 @@ static int i40e_sw_init(struct i40e_pf *pf) /* Depending on PF configurations, it is possible that the RSS * maximum might end up larger than the available queues */ - pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; + pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); pf->rss_size = 1; pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, @@ -7673,7 +7879,7 @@ static int i40e_sw_init(struct i40e_pf *pf) } /* MFP mode enabled */ - if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { + if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { pf->flags |= I40E_FLAG_MFP_ENABLED; dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); if (i40e_get_npar_bw_setting(pf)) @@ -7703,9 +7909,9 @@ static int i40e_sw_init(struct i40e_pf *pf) } if (pf->hw.func_caps.vmdq) { - pf->flags |= I40E_FLAG_VMDQ_ENABLED; pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; - pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ; + pf->flags |= I40E_FLAG_VMDQ_ENABLED; + pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); } #ifdef I40E_FCOE @@ -7723,6 +7929,14 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_MAX_VF_COUNT); } #endif /* CONFIG_PCI_IOV */ + if (pf->hw.mac.type == I40E_MAC_X722) { + pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE | + I40E_FLAG_128_QP_RSS_CAPABLE | + I40E_FLAG_HW_ATR_EVICT_CAPABLE | + I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | + I40E_FLAG_WB_ON_ITR_CAPABLE | + I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE; + } pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; @@ -7812,7 +8026,7 @@ static int i40e_set_features(struct net_device *netdev, need_reset = i40e_set_ntuple(pf, features); if (need_reset) - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); return 0; } @@ -7875,10 +8089,8 @@ static void i40e_add_vxlan_port(struct net_device *netdev, /* New port: add it and mark its index in the bitmap */ pf->vxlan_ports[next_idx] = port; - pf->pending_vxlan_bitmap |= (1 << next_idx); + pf->pending_vxlan_bitmap |= BIT_ULL(next_idx); pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; - - dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port)); } /** @@ -7906,7 +8118,7 @@ static void i40e_del_vxlan_port(struct net_device *netdev, * and make it pending */ pf->vxlan_ports[idx] = 0; - pf->pending_vxlan_bitmap |= (1 << idx); + pf->pending_vxlan_bitmap |= BIT_ULL(idx); pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; dev_info(&pf->pdev->dev, "deleting vxlan port %d\n", @@ -7981,7 +8193,6 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return err; } -#ifdef HAVE_BRIDGE_ATTRIBS /** * i40e_ndo_bridge_setlink - Set the hardware bridge mode * @dev: the netdev being configured @@ -7995,7 +8206,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], * bridge mode enabled. **/ static int i40e_ndo_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh) + struct nlmsghdr *nlh, + u16 flags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; @@ -8066,14 +8278,9 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, * Return the mode in which the hardware bridge is operating in * i.e VEB or VEPA. **/ -#ifdef HAVE_BRIDGE_FILTER static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask, int nlflags) -#else -static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, int nlflags) -#endif /* HAVE_BRIDGE_FILTER */ { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; @@ -8097,7 +8304,25 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, nlflags, 0, 0, filter_mask, NULL); } -#endif /* HAVE_BRIDGE_ATTRIBS */ + +#define I40E_MAX_TUNNEL_HDR_LEN 80 +/** + * i40e_features_check - Validate encapsulated packet conforms to limits + * @skb: skb buff + * @netdev: This physical port's netdev + * @features: Offload features that the stack believes apply + **/ +static netdev_features_t i40e_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (skb->encapsulation && + (skb_inner_mac_header(skb) - skb_transport_header(skb) > + I40E_MAX_TUNNEL_HDR_LEN)) + return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + + return features; +} static const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, @@ -8133,10 +8358,9 @@ static const struct net_device_ops i40e_netdev_ops = { #endif .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, -#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_features_check = i40e_features_check, .ndo_bridge_getlink = i40e_ndo_bridge_getlink, .ndo_bridge_setlink = i40e_ndo_bridge_setlink, -#endif /* HAVE_BRIDGE_ATTRIBS */ }; /** @@ -8166,6 +8390,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | NETIF_F_TSO; netdev->features = NETIF_F_SG | @@ -8173,6 +8398,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_SCTP_CSUM | NETIF_F_HIGHDMA | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | @@ -8304,8 +8530,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.flags = I40E_AQ_VSI_TYPE_PF; if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi config, err %d, aq_err %d\n", - ret, pf->hw.aq.asq_last_status); + "couldn't get PF vsi config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); return -ENOENT; } vsi->info = ctxt.info; @@ -8327,8 +8555,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "update vsi failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + "update vsi failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } @@ -8345,9 +8575,11 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_vsi_config_tc(vsi, enabled_tc); if (ret) { dev_info(&pf->pdev->dev, - "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n", - enabled_tc, ret, - pf->hw.aq.asq_last_status); + "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", + enabled_tc, + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); ret = -ENOENT; } } @@ -8438,8 +8670,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_aq_add_vsi(hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "add vsi failed, aq_err=%d\n", - vsi->back->hw.aq.asq_last_status); + "add vsi failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } @@ -8484,8 +8718,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_vsi_get_bw_info(vsi); if (ret) { dev_info(&pf->pdev->dev, - "couldn't get vsi bw info, err %d, aq_err %d\n", - ret, pf->hw.aq.asq_last_status); + "couldn't get vsi bw info, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* VSI is already added so not tearing that up */ ret = 0; } @@ -8615,6 +8850,11 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) goto vector_setup_out; } + /* In Legacy mode, we do not have to get any other vector since we + * piggyback on the misc/ICR0 for queue interrupts. + */ + if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) + return ret; if (vsi->num_q_vectors) vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, vsi->num_q_vectors, vsi->idx); @@ -8658,7 +8898,7 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); if (ret < 0) { dev_info(&pf->pdev->dev, - "failed to get tracking for %d queues for VSI %d err=%d\n", + "failed to get tracking for %d queues for VSI %d err %d\n", vsi->alloc_queue_pairs, vsi->seid, ret); goto err_vsi; } @@ -8857,6 +9097,10 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, break; } + if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && + (vsi->type == I40E_VSI_VMDQ2)) { + ret = i40e_vsi_config_rss(vsi); + } return vsi; err_rings: @@ -8896,8 +9140,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb) &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, - "query veb bw config failed, aq_err=%d\n", - hw->aq.asq_last_status); + "query veb bw config failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); goto out; } @@ -8905,8 +9150,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb) &ets_data, NULL); if (ret) { dev_info(&pf->pdev->dev, - "query veb bw ets config failed, aq_err=%d\n", - hw->aq.asq_last_status); + "query veb bw ets config failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); goto out; } @@ -9090,36 +9336,40 @@ void i40e_veb_release(struct i40e_veb *veb) **/ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { - bool is_default = false; + struct i40e_pf *pf = veb->pf; + bool is_default = veb->pf->cur_promisc; bool is_cloud = false; int ret; /* get a VEB from the hardware */ - ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid, + ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, veb->enabled_tc, is_default, is_cloud, &veb->seid, NULL); if (ret) { - dev_info(&veb->pf->pdev->dev, - "couldn't add VEB, err %d, aq_err %d\n", - ret, veb->pf->hw.aq.asq_last_status); + dev_info(&pf->pdev->dev, + "couldn't add VEB, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EPERM; } /* get statistics counter */ - ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL, + ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, &veb->stats_idx, NULL, NULL, NULL); if (ret) { - dev_info(&veb->pf->pdev->dev, - "couldn't get VEB statistics idx, err %d, aq_err %d\n", - ret, veb->pf->hw.aq.asq_last_status); + dev_info(&pf->pdev->dev, + "couldn't get VEB statistics idx, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EPERM; } ret = i40e_veb_get_bw_info(veb); if (ret) { - dev_info(&veb->pf->pdev->dev, - "couldn't get VEB bw info, err %d, aq_err %d\n", - ret, veb->pf->hw.aq.asq_last_status); - i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL); + dev_info(&pf->pdev->dev, + "couldn't get VEB bw info, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + i40e_aq_delete_element(&pf->hw, veb->seid, NULL); return -ENOENT; } @@ -9325,8 +9575,10 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) &next_seid, NULL); if (ret) { dev_info(&pf->pdev->dev, - "get switch config failed %d aq_err=%x\n", - ret, pf->hw.aq.asq_last_status); + "get switch config failed err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); kfree(aq_buf); return -ENOENT; } @@ -9367,8 +9619,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) ret = i40e_fetch_switch_configuration(pf, false); if (ret) { dev_info(&pf->pdev->dev, - "couldn't fetch switch config, err %d, aq_err %d\n", - ret, pf->hw.aq.asq_last_status); + "couldn't fetch switch config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; } i40e_pf_reset_stats(pf); @@ -9743,7 +9996,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = i40e_init_shared_code(hw); if (err) { - dev_info(&pdev->dev, "init_shared_code failed: %d\n", err); + dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", + err); goto err_pf_reset; } @@ -9910,15 +10164,19 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); if (err) - dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); + dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4)) { msleep(75); err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (err) - dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); } /* The main driver is (mostly) up and happy. We need to set this state * before setting up the misc vector or we get a race and the vector @@ -10006,8 +10264,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* get the requested speeds from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (err) - dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n", - err); + dev_info(&pf->pdev->dev, + "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); pf->hw.phy.link_info.requested_speeds = abilities.link_speed; /* print a string summarizing features */ @@ -10247,6 +10507,19 @@ static void i40e_shutdown(struct pci_dev *pdev) wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + del_timer_sync(&pf->service_timer); + cancel_work_sync(&pf->service_task); + i40e_fdir_teardown(pf); + + rtnl_lock(); + i40e_prep_for_reset(pf); + rtnl_unlock(); + + wr32(hw, I40E_PFPM_APM, + (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); + wr32(hw, I40E_PFPM_WUFC, + (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + i40e_clear_interrupt_scheme(pf); if (system_state == SYSTEM_POWER_OFF) { @@ -10267,9 +10540,6 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_DOWN, &pf->state); - del_timer_sync(&pf->service_timer); - cancel_work_sync(&pf->service_task); - i40e_fdir_teardown(pf); rtnl_lock(); i40e_prep_for_reset(pf); diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 554e49d02..9b83abc0e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -50,7 +50,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw) sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> I40E_GLNVM_GENS_SR_SIZE_SHIFT); /* Switching to words (sr_size contains power of 2KB) */ - nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB; + nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB; /* Check if we are in the normal or blank NVM programming mode */ fla = rd32(hw, I40E_GLNVM_FLA); @@ -189,8 +189,8 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, ret_code = i40e_poll_sr_srctl_done_bit(hw); if (!ret_code) { /* Write the address and start reading */ - sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | - (1 << I40E_GLNVM_SRCTL_START_SHIFT); + sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | + BIT(I40E_GLNVM_SRCTL_START_SHIFT); wr32(hw, I40E_GLNVM_SRCTL, sr_reg); /* Poll I40E_GLNVM_SRCTL until the done bit is set */ @@ -211,6 +211,74 @@ read_nvm_exit: return ret_code; } +/** + * i40e_read_nvm_aq - Read Shadow RAM. + * @hw: pointer to the HW structure. + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset in words from module start + * @words: number of words to write + * @data: buffer with words to write to the Shadow RAM + * @last_command: tells the AdminQ that this is the last command + * + * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + **/ +static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 words, void *data, + bool last_command) +{ + i40e_status ret_code = I40E_ERR_NVM; + struct i40e_asq_cmd_details cmd_details; + + memset(&cmd_details, 0, sizeof(cmd_details)); + + /* Here we are checking the SR limit only for the flat memory model. + * We cannot do it for the module-based model, as we did not acquire + * the NVM resource yet (we cannot get the module pointer value). + * Firmware will check the module-based model. + */ + if ((offset + words) > hw->nvm.sr_size) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: offset %d beyond Shadow RAM limit %d\n", + (offset + words), hw->nvm.sr_size); + else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) + /* We can write only up to 4KB (one sector), in one AQ write */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write fail error: tried to write %d words, limit is %d.\n", + words, I40E_SR_SECTOR_SIZE_IN_WORDS); + else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) + != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) + /* A single write cannot spread over two sectors */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", + offset, words); + else + ret_code = i40e_aq_read_nvm(hw, module_pointer, + 2 * offset, /*bytes*/ + 2 * words, /*bytes*/ + data, last_command, &cmd_details); + + return ret_code; +} + +/** + * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + **/ +static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, + u16 *data) +{ + i40e_status ret_code = I40E_ERR_TIMEOUT; + + ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true); + *data = le16_to_cpu(*(__le16 *)data); + + return ret_code; +} + /** * i40e_read_nvm_word - Reads Shadow RAM * @hw: pointer to the HW structure @@ -222,6 +290,8 @@ read_nvm_exit: i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) { + if (hw->mac.type == I40E_MAC_X722) + return i40e_read_nvm_word_aq(hw, offset, data); return i40e_read_nvm_word_srctl(hw, offset, data); } @@ -256,6 +326,63 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, return ret_code; } +/** + * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + i40e_status ret_code; + u16 read_size = *words; + bool last_cmd = false; + u16 words_read = 0; + u16 i = 0; + + do { + /* Calculate number of bytes we should read in this step. + * FVL AQ do not allow to read more than one page at a time or + * to cross page boundaries. + */ + if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS) + read_size = min(*words, + (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS - + (offset % I40E_SR_SECTOR_SIZE_IN_WORDS))); + else + read_size = min((*words - words_read), + I40E_SR_SECTOR_SIZE_IN_WORDS); + + /* Check if this is last command, if so set proper flag */ + if ((words_read + read_size) >= *words) + last_cmd = true; + + ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size, + data + words_read, last_cmd); + if (ret_code) + goto read_nvm_buffer_aq_exit; + + /* Increment counter for words already read and move offset to + * new read location + */ + words_read += read_size; + offset += read_size; + } while (words_read < *words); + + for (i = 0; i < *words; i++) + data[i] = le16_to_cpu(((__le16 *)data)[i]); + +read_nvm_buffer_aq_exit: + *words = words_read; + return ret_code; +} + /** * i40e_read_nvm_buffer - Reads Shadow RAM buffer * @hw: pointer to the HW structure @@ -270,6 +397,8 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { + if (hw->mac.type == I40E_MAC_X722) + return i40e_read_nvm_buffer_aq(hw, offset, words, data); return i40e_read_nvm_buffer_srctl(hw, offset, words, data); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 7b34f1e66..dcb72a8ee 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -58,6 +58,19 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void i40e_idle_aq(struct i40e_hw *hw); bool i40e_check_asq_alive(struct i40e_hw *hw); i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); +char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); +char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err); + +i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); +i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); u32 i40e_led_get(struct i40e_hw *hw); void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index a92b7725d..8c40d6ea1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -43,9 +43,8 @@ #define I40E_PTP_10GB_INCVAL 0x0333333333ULL #define I40E_PTP_1GB_INCVAL 0x2000000000ULL -#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 (0x1 << \ - I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) -#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \ +#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \ I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) /** @@ -357,7 +356,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index) prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); - if (!(prttsyn_stat & (1 << index))) + if (!(prttsyn_stat & BIT(index))) return; lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index)); diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 522d6df51..dc0402fe3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -873,6 +873,13 @@ #define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT) #define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31 #define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT) +#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */ +#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0 +#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT) +#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1 +#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT) +#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2 +#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT) #define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */ #define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0 #define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT) @@ -3366,4 +3373,1933 @@ #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) #define I40E_VFQF_HREGION_REGION_7_SHIFT 29 #define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) -#endif + +#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */ +#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0 +#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT) +#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */ +#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2 +#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT) +#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3 +#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT) +#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4 +#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT) +#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8 +#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT) +#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */ +#define I40E_MNGSB_FDS_START_BC_SHIFT 0 +#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT) +#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16 +#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT) + +#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127 +#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0 +#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT) +#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127 +#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0 +#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT) + +#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */ +#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12 +#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT) +#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16 +#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT) +#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */ +#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12 +#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT) +#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16 +#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT) +#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */ +#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT) +#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT) +#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */ +#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT) +#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) + +#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7 +#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0 +#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT) +#define I40E_GL_FWSTS_FWROWD_SHIFT 8 +#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT) +#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */ +#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT) +#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15 +#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 +#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) +#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_CEQPART_MAX_INDEX 15 +#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0 +#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT) +#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16 +#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT) +#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */ +#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0 +#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT) +#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_DBCQPART_MAX_INDEX 15 +#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0 +#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT) +#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16 +#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT) +#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */ +#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0 +#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT) +#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_DBQPPART_MAX_INDEX 15 +#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0 +#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT) +#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16 +#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT) +#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0 +#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT) +#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0 +#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT) +#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */ +#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0 +#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT) +#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */ +#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT) +#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PECQBASE_MAX_INDEX 15 +#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0 +#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT) +#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PECQCNT_MAX_INDEX 15 +#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0 +#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT) +#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */ +#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0 +#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT) +#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0 +#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT) +#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0 +#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT) +#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */ +#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT) +#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */ +#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0 +#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT) +#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0 +#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT) +#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0 +#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT) +#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */ +#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0 +#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT) +#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */ +#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0 +#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT) +#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0 +#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT) +#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0 +#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT) +#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */ +#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0 +#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT) +#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */ +#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0 +#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT) +#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0 +#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT) +#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0 +#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT) +#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 +#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) +#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */ +#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0 +#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT) +#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */ +#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0 +#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT) +#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */ +#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0 +#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT) +#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0 +#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT) +#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0 +#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT) +#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */ +#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT) +#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15 +#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0 +#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT) +#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15 +#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0 +#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT) +#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */ +#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0 +#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT) +#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */ +#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0 +#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT) +#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15 +#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0 +#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT) +#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15 +#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0 +#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT) +#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */ +#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0 +#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT) +#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */ +#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0 +#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT) +#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0 +#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT) +#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0 +#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT) +#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 +#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT) +#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */ +#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0 +#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT) +#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */ +#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0 +#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT) +#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */ +#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT) +#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15 +#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0 +#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT) +#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16 +#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT) +#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 +#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) +#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31 +#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0 +#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT) +#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16 +#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT) +#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31 +#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0 +#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT) +#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16 +#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT) +#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31 +#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0 +#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT) +#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16 +#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT) +#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0 +#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT) +#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0 +#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT) +#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPDINV_MAX_INDEX 31 +#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0 +#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT) +#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15 +#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT) +#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16 +#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT) +#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0 +#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT) +#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0 +#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT) +#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0 +#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT) +#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0 +#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT) +#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0 +#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT) +#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0 +#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT) +#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0 +#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT) +#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0 +#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT) +#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT) +#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0 +#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT) +#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0 +#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT) +#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0 +#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT) +#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) +#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0 +#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT) +#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0 +#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT) +#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0 +#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT) +#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0 +#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT) +#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0 +#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT) +#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0 +#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT) +#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0 +#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT) +#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0 +#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT) +#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT) +#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFSDPART_MAX_INDEX 31 +#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0 +#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT) +#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16 +#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT) +#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */ +#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT) +#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */ +#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT) +#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */ +#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT) +#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15 +#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) +#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15 +#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT) +#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */ +#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0 +#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT) +#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */ +#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0 +#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT) +#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */ +#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0 +#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT) +#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */ +#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0 +#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2 +#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4 +#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9 +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10 +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11 +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12 +#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13 +#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14 +#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT) +#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */ +#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0 +#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT) +#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */ +#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0 +#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT) +#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2 +#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT) +#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3 +#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT) +#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 +#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT) +#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 +#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT) +#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPLAN_QBASE_MAX_INDEX 127 +#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0 +#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT) +#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11 +#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT) +#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31 +#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT) +#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */ +#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0 +#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT) +#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */ +#define I40E_GLNVM_AL_REQ_POR_SHIFT 0 +#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT) +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1 +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT) +#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2 +#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT) +#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3 +#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT) +#define I40E_GLNVM_AL_REQ_PE_SHIFT 4 +#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT) +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5 +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT) +#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */ +#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0 +#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT) +#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12 +#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT) +#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ +#define I40E_GLNVM_FLA_LOCKED_SHIFT 6 +#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) + +#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ +#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0 +#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT) +#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1 +#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT) +#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3 +#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT) +#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4 +#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT) +#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5 +#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT) +#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8 +#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT) +#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9 +#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT) +#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10 +#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT) +#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */ +#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0 +#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1 +#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2 +#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT) +#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3 +#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4 +#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5 +#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6 +#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7 +#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT) +#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8 +#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9 +#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10 +#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT) +#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */ +#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0 +#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1 +#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2 +#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3 +#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4 +#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5 +#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6 +#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7 +#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8 +#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9 +#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10 +#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11 +#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12 +#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13 +#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14 +#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15 +#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16 +#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT) +#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */ +#define I40E_MNGSB_DADD_ADDR_SHIFT 0 +#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT) +#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */ +#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0 +#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT) +#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */ +#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0 +#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT) +#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8 +#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT) +#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26 +#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT) +#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28 +#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT) +#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30 +#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT) +#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31 +#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT) +#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */ +#define I40E_MNGSB_RDATA_DATA_SHIFT 0 +#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT) +#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */ +#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0 +#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT) +#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8 +#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT) +#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16 +#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT) +#define I40E_MNGSB_RHDR0_TAG_SHIFT 24 +#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT) +#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27 +#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT) +#define I40E_MNGSB_RHDR0_EH_SHIFT 31 +#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT) +#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */ +#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0 +#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT) +#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26 +#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT) +#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30 +#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT) +#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31 +#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT) +#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */ +#define I40E_MNGSB_WDATA_DATA_SHIFT 0 +#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT) +#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */ +#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0 +#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT) +#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12 +#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT) +#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16 +#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT) +#define I40E_MNGSB_WHDR0_TAG_SHIFT 24 +#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT) +#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */ +#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0 +#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT) +#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */ +#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0 +#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT) + +#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21 +#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT) + +#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT) +#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT) + +#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4 +#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT) +#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10 +#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT) +#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */ +#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0 +#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT) +#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1 +#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT) +#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2 +#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT) +#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6 +#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT) +#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16 +#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT) +#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT) +#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0 +#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT) +#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0 +#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT) +#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0 +#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT) +#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0 +#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT) +#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17 +#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT) +#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18 +#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT) +#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */ +#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0 +#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT) +#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT) +#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT) +#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT) +#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT) +#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT) +#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0 +#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26 +#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27 +#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28 +#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29 +#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30 +#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31 +#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT) +#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT) +#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT) +#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0 +#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT) +#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT) +#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT) +#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT) +#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31 +#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0 +#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT) +#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8 +#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT) +#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31 +#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31 +#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31 +#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0 +#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1 +#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2 +#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3 +#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 +#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT) +#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31 +#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0 +#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT) +#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31 +#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT) +#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */ +#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0 +#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT) +#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */ +#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 +#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT) +#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */ +#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0 +#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT) +#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */ +#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 +#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT) +#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 +#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) +#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 +#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) +#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 +#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT) +#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */ +#define I40E_PFPE_CQACK_PECQID_SHIFT 0 +#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT) +#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */ +#define I40E_PFPE_CQARM_PECQID_SHIFT 0 +#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT) +#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */ +#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0 +#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT) +#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */ +#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 +#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) +#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) +#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */ +#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0 +#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT) +#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 +#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT) +#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */ +#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */ +#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */ +#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0 +#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT) +#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 +#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) +#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */ +#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) +#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */ +#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */ +#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 +#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT) +#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */ +#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0 +#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1 +#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2 +#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3 +#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 +#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT) +#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */ +#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0 +#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT) +#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31 +#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT) +#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */ +#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0 +#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT) +#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 +#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) +#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */ +#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0 +#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT) +#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */ +#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7 +#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0 +#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT) +#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */ +#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0 +#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT) +#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13 +#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT) +#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30 +#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */ +#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0 +#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8 +#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16 +#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24 +#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT) +#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT) +#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13 +#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT) +#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30 +#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT) +#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0 +#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT) +#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0 +#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT) +#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0 +#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT) +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16 +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT) +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31 +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8 +#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16 +#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24 +#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT) +#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0 +#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT) +#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8 +#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT) +#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16 +#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT) +#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC_MAX_INDEX 127 +#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127 +#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW_MAX_INDEX 127 +#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127 +#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQACK_MAX_INDEX 127 +#define I40E_VFPE_CQACK_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT) +#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQARM_MAX_INDEX 127 +#define I40E_VFPE_CQARM_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT) +#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPDB_MAX_INDEX 127 +#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127 +#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL_MAX_INDEX 127 +#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127 +#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127 +#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127 +#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127 +#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC_MAX_INDEX 127 +#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) +#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 +#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT) +#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 +#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT) +#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 +#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) +#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 +#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT) +#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 +#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT) +#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 +#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) +#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) +#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 +#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT) +#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 +#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT) +#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0 +#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT) +#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0 +#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT) +#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15 +#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0 +#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT) +#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0 +#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT) +#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 +#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT) +#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 +#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) +#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 +#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) +#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 +#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) +#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15 +#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 +#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT) +#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15 +#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 +#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT) +#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) +#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) +#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) +#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) +#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0 +#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT) +#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0 +#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT) +#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0 +#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT) +#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0 +#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT) +#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0 +#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT) +#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0 +#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT) +#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT) +#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT) +#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0 +#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT) +#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0 +#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT) +#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT) +#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT) +#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0 +#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT) +#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0 +#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT) +#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT) +#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT) +#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT) +#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 +#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT) +#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 +#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT) +#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 +#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) +#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 +#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT) +#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 +#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT) +#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 +#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) +#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) +#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 +#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT) +#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 +#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT) +#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0 +#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT) +#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0 +#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT) +#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31 +#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0 +#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT) +#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0 +#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT) +#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 +#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT) +#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 +#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) +#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 +#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) +#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 +#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) +#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31 +#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 +#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT) +#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31 +#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 +#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT) +#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) +#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) +#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) +#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) +#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */ +#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0 +#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT) +#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */ +#define I40E_GLQF_APBVT_MAX_INDEX 2047 +#define I40E_GLQF_APBVT_APBVT_SHIFT 0 +#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT) +#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */ +#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63 +#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0 +#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT) +#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1 +#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0 +#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT) +#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */ +#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0 +#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT) +#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8 +#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT) +#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */ +#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0 +#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT) +#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5 +#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT) +/* Redefined for X722 family */ +#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_X722_PFQF_HLUT_MAX_INDEX 127 +#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0 +#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT) +#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8 +#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT) +#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16 +#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT) +#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24 +#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT) +#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PFQF_HREGION_MAX_INDEX 7 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT) +#define I40E_PFQF_HREGION_REGION_0_SHIFT 1 +#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT) +#define I40E_PFQF_HREGION_REGION_1_SHIFT 5 +#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT) +#define I40E_PFQF_HREGION_REGION_2_SHIFT 9 +#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT) +#define I40E_PFQF_HREGION_REGION_3_SHIFT 13 +#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT) +#define I40E_PFQF_HREGION_REGION_4_SHIFT 17 +#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT) +#define I40E_PFQF_HREGION_REGION_5_SHIFT 21 +#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT) +#define I40E_PFQF_HREGION_REGION_6_SHIFT 25 +#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT) +#define I40E_PFQF_HREGION_REGION_7_SHIFT 29 +#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT) +#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8 +#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT) +#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */ +#define I40E_VSIQF_HKEY_MAX_INDEX 12 +#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0 +#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT) +#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8 +#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT) +#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16 +#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT) +#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24 +#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT) +#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */ +#define I40E_VSIQF_HLUT_MAX_INDEX 15 +#define I40E_VSIQF_HLUT_LUT0_SHIFT 0 +#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT) +#define I40E_VSIQF_HLUT_LUT1_SHIFT 8 +#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT) +#define I40E_VSIQF_HLUT_LUT2_SHIFT 16 +#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT) +#define I40E_VSIQF_HLUT_LUT3_SHIFT 24 +#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT) +#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */ +#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0 +#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT) +#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */ +#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0 +#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT) +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) +#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ +#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) +#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ +#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) +#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ +#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) +#endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 9a4f2bc70..738aca68f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -464,7 +464,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; - if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { + if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) || (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", @@ -509,8 +509,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, dev_info(&pdev->dev, "FD filter programming failed due to incorrect filter parameters\n"); } - } else if (error == - (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { + } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", rx_desc->wb.qword0.hi_dword.fd_id); @@ -854,15 +853,40 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) **/ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { - u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ - I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | - I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; - /* allow 00 to be written to the index */ - - wr32(&vsi->back->hw, - I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1), - val); + u16 flags = q_vector->tx.ring[0].flags; + + if (flags & I40E_TXR_FLAGS_WB_ON_ITR) { + u32 val; + + if (q_vector->arm_wb_state) + return; + + val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK; + + wr32(&vsi->back->hw, + I40E_PFINT_DYN_CTLN(q_vector->v_idx + + vsi->base_vector - 1), + val); + q_vector->arm_wb_state = true; + } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ + I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; + /* allow 00 to be written to the index */ + + wr32(&vsi->back->hw, + I40E_PFINT_DYN_CTLN(q_vector->v_idx + + vsi->base_vector - 1), val); + } else { + u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */ + I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK; + /* allow 00 to be written to the index */ + + wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); + } } /** @@ -892,7 +916,7 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) * 20-1249MB/s bulk (8000 ints/s) */ bytes_per_int = rc->total_bytes / rc->itr; - switch (rc->itr) { + switch (new_latency_range) { case I40E_LOWEST_LATENCY: if (bytes_per_int > 10) new_latency_range = I40E_LOW_LATENCY; @@ -905,9 +929,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) break; case I40E_BULK_LATENCY: if (bytes_per_int <= 20) - rc->latency_range = I40E_LOW_LATENCY; + new_latency_range = I40E_LOW_LATENCY; + break; + default: + if (bytes_per_int <= 20) + new_latency_range = I40E_LOW_LATENCY; break; } + rc->latency_range = new_latency_range; switch (new_latency_range) { case I40E_LOWEST_LATENCY: @@ -923,41 +952,13 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) break; } - if (new_itr != rc->itr) { - /* do an exponential smoothing */ - new_itr = (10 * new_itr * rc->itr) / - ((9 * new_itr) + rc->itr); - rc->itr = new_itr & I40E_MAX_ITR; - } + if (new_itr != rc->itr) + rc->itr = new_itr; rc->total_bytes = 0; rc->total_packets = 0; } -/** - * i40e_update_dynamic_itr - Adjust ITR based on bytes per int - * @q_vector: the vector to adjust - **/ -static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector) -{ - u16 vector = q_vector->vsi->base_vector + q_vector->v_idx; - struct i40e_hw *hw = &q_vector->vsi->back->hw; - u32 reg_addr; - u16 old_itr; - - reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1); - old_itr = q_vector->rx.itr; - i40e_set_new_dynamic_itr(&q_vector->rx); - if (old_itr != q_vector->rx.itr) - wr32(hw, reg_addr, q_vector->rx.itr); - - reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1); - old_itr = q_vector->tx.itr; - i40e_set_new_dynamic_itr(&q_vector->tx); - if (old_itr != q_vector->tx.itr) - wr32(hw, reg_addr, q_vector->tx.itr); -} - /** * i40e_clean_programming_status - clean the programming status descriptor * @rx_ring: the rx ring that has this descriptor @@ -1386,7 +1387,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, return; /* did the hardware decode the packet and checksum? */ - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) return; /* both known and outer_ip must be set for the below code to work */ @@ -1401,25 +1402,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ipv6 = true; if (ipv4 && - (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | - (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))) + (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | + BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) goto checksum_fail; /* likely incorrect csum if alternate IP extension headers found */ if (ipv6 && - rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) + rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) /* don't increment checksum err here, non-fatal err */ return; /* there was some L4 error, count error and punt packet to the stack */ - if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)) + if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) goto checksum_fail; /* handle packets that were not able to be checksummed due * to arrival speed, in this case the stack can compute * the csum. */ - if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT)) + if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; /* If VXLAN traffic has an outer UDPv4 checksum we need to check @@ -1428,7 +1429,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, * so the total length of IPv4 header is IHL*4 bytes * The UDP_0 bit *may* bet set if the *inner* header is UDP */ - if (ipv4_tunnel) { + if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) && + (ipv4_tunnel)) { skb->transport_header = skb->mac_header + sizeof(struct ethhdr) + (ip_hdr(skb)->ihl * 4); @@ -1543,7 +1545,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) break; /* This memory barrier is needed to keep us from reading @@ -1584,8 +1586,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; - rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT); - rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; @@ -1637,7 +1639,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) I40E_RX_INCREMENT(rx_ring, i); if (unlikely( - !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { struct i40e_rx_buffer *next_buffer; next_buffer = &rx_ring->rx_bi[i]; @@ -1647,7 +1649,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) } /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); continue; } @@ -1669,7 +1671,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); - vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; #ifdef I40E_FCOE @@ -1730,7 +1732,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) break; /* This memory barrier is needed to keep us from reading @@ -1753,7 +1755,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; - rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; @@ -1771,13 +1773,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) I40E_RX_INCREMENT(rx_ring, i); if (unlikely( - !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { rx_ring->rx_stats.non_eop_descs++; continue; } /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); /* TODO: shouldn't we increment a counter indicating the * drop? @@ -1802,7 +1804,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); - vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; #ifdef I40E_FCOE @@ -1826,6 +1828,68 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) return total_rx_packets; } +/** + * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt + * @vsi: the VSI we care about + * @q_vector: q_vector for which itr is being updated and interrupt enabled + * + **/ +static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, + struct i40e_q_vector *q_vector) +{ + struct i40e_hw *hw = &vsi->back->hw; + u16 old_itr; + int vector; + u32 val; + + vector = (q_vector->v_idx + vsi->base_vector); + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) { + old_itr = q_vector->rx.itr; + i40e_set_new_dynamic_itr(&q_vector->rx); + if (old_itr != q_vector->rx.itr) { + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (I40E_RX_ITR << + I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | + (q_vector->rx.itr << + I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); + } else { + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (I40E_ITR_NONE << + I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); + } + if (!test_bit(__I40E_DOWN, &vsi->state)) + wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); + } else { + i40e_irq_dynamic_enable(vsi, + q_vector->v_idx + vsi->base_vector); + } + if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { + old_itr = q_vector->tx.itr; + i40e_set_new_dynamic_itr(&q_vector->tx); + if (old_itr != q_vector->tx.itr) { + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (I40E_TX_ITR << + I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | + (q_vector->tx.itr << + I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); + } else { + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (I40E_ITR_NONE << + I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); + } + if (!test_bit(__I40E_DOWN, &vsi->state)) + wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx + + vsi->base_vector - 1), val); + } else { + i40e_irq_dynamic_enable(vsi, + q_vector->v_idx + vsi->base_vector); + } +} + /** * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine * @napi: napi struct with our devices info in it @@ -1880,35 +1944,29 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) return budget; } + if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) + q_vector->arm_wb_state = false; + /* Work is done so exit the polling mode and re-enable the interrupt */ napi_complete(napi); - if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) || - ITR_IS_DYNAMIC(vsi->tx_itr_setting)) - i40e_update_dynamic_itr(q_vector); - - if (!test_bit(__I40E_DOWN, &vsi->state)) { - if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { - i40e_irq_dynamic_enable(vsi, - q_vector->v_idx + vsi->base_vector); - } else { - struct i40e_hw *hw = &vsi->back->hw; - /* We re-enable the queue 0 cause, but - * don't worry about dynamic_enable - * because we left it on for the other - * possible interrupts during napi - */ - u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); - qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK; - wr32(hw, I40E_QINT_RQCTL(0), qval); - - qval = rd32(hw, I40E_QINT_TQCTL(0)); - qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK; - wr32(hw, I40E_QINT_TQCTL(0), qval); - - i40e_irq_dynamic_enable_icr0(vsi->back); - } + if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + i40e_update_enable_itr(vsi, q_vector); + } else { /* Legacy mode */ + struct i40e_hw *hw = &vsi->back->hw; + /* We re-enable the queue 0 cause, but + * don't worry about dynamic_enable + * because we left it on for the other + * possible interrupts during napi + */ + u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) | + I40E_QINT_RQCTL_CAUSE_ENA_MASK; + + wr32(hw, I40E_QINT_RQCTL(0), qval); + qval = rd32(hw, I40E_QINT_TQCTL(0)) | + I40E_QINT_TQCTL_CAUSE_ENA_MASK; + wr32(hw, I40E_QINT_TQCTL(0), qval); + i40e_irq_dynamic_enable_icr0(vsi->back); } - return 0; } @@ -1982,6 +2040,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, /* Due to lack of space, no more new filters can be programmed */ if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) return; + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) { + /* HW ATR eviction will take care of removing filters on FIN + * and RST packets. + */ + if (th->fin || th->rst) + return; + } tx_ring->atr_count++; @@ -2037,6 +2102,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & I40E_TXD_FLTR_QW1_CNTINDEX_MASK; + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) + dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; + fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); fdir_desc->rsvd = cpu_to_le32(0); fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); @@ -2244,11 +2312,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; + struct udphdr *oudph; + struct iphdr *oiph; u32 l4_tunnel = 0; if (skb->encapsulation) { switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: + oudph = udp_hdr(skb); + oiph = ip_hdr(skb); l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; @@ -2285,6 +2357,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, *tx_flags &= ~I40E_TX_FLAGS_IPV4; *tx_flags |= I40E_TX_FLAGS_IPV6; } + if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && + (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) && + (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) { + oudph->check = ~csum_tcpudp_magic(oiph->saddr, + oiph->daddr, + (skb->len - skb_transport_offset(skb)), + IPPROTO_UDP, 0); + *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; + } } else { network_hdr_len = skb_network_header_len(skb); this_ip_hdr = ip_hdr(skb); @@ -2616,6 +2697,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index))) writel(i, tx_ring->tail); + else + prefetchw(tx_desc + 1); return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 0dc48dc9c..f1385a198 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -66,17 +66,29 @@ enum i40e_dyn_idx_t { /* Supported RSS offloads */ #define I40E_DEFAULT_RSS_HENA ( \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \ - ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD)) + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ + BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) + +#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ + BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) + +#define i40e_pf_get_default_rss_hena(pf) \ + (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ + I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) /* Supported Rx Buffer Sizes */ #define I40E_RXBUFFER_512 512 /* Used for packet split */ @@ -129,17 +141,17 @@ enum i40e_dyn_idx_t { #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 -#define I40E_TX_FLAGS_CSUM (u32)(1) -#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) -#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2) -#define I40E_TX_FLAGS_TSO (u32)(1 << 3) -#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4) -#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) -#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) -#define I40E_TX_FLAGS_FSO (u32)(1 << 7) -#define I40E_TX_FLAGS_TSYN (u32)(1 << 8) -#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) -#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10) +#define I40E_TX_FLAGS_CSUM BIT(0) +#define I40E_TX_FLAGS_HW_VLAN BIT(1) +#define I40E_TX_FLAGS_SW_VLAN BIT(2) +#define I40E_TX_FLAGS_TSO BIT(3) +#define I40E_TX_FLAGS_IPV4 BIT(4) +#define I40E_TX_FLAGS_IPV6 BIT(5) +#define I40E_TX_FLAGS_FCCRC BIT(6) +#define I40E_TX_FLAGS_FSO BIT(7) +#define I40E_TX_FLAGS_TSYN BIT(8) +#define I40E_TX_FLAGS_FD_SB BIT(9) +#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -253,6 +265,10 @@ struct i40e_ring { bool ring_active; /* is ring online or not */ bool arm_wb; /* do something to arm write back */ + u16 flags; +#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) +#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1) + /* stats structs */ struct i40e_queue_stats stats; struct u64_stats_sync syncp; diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 9a5a75b1e..4842239ee 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -47,6 +47,11 @@ #define I40E_DEV_ID_20G_KR2 0x1587 #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 +#define I40E_DEV_ID_SFP_X722 0x37D0 +#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 +#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_X722_VF 0x37CD +#define I40E_DEV_ID_X722_VF_HV 0x37D9 #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ (d) == I40E_DEV_ID_QSFP_B || \ @@ -120,6 +125,8 @@ enum i40e_mac_type { I40E_MAC_X710, I40E_MAC_XL710, I40E_MAC_VF, + I40E_MAC_X722, + I40E_MAC_X722_VF, I40E_MAC_GENERIC, }; @@ -213,7 +220,17 @@ struct i40e_hw_capabilities { bool dcb; bool fcoe; bool iscsi; /* Indicates iSCSI enabled */ - bool mfp_mode_1; + bool flex10_enable; + bool flex10_capable; + u32 flex10_mode; +#define I40E_FLEX10_MODE_UNKNOWN 0x0 +#define I40E_FLEX10_MODE_DCC 0x1 +#define I40E_FLEX10_MODE_DCI 0x2 + + u32 flex10_status; +#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 +#define I40E_FLEX10_STATUS_VC_MODE 0x2 + bool mgmt_cem; bool ieee_1588; bool iwarp; @@ -423,6 +440,7 @@ struct i40e_dcbx_config { #define I40E_DCBX_MODE_CEE 0x1 #define I40E_DCBX_MODE_IEEE 0x2 u32 numapps; + u32 tlv_status; /* CEE mode TLV status */ struct i40e_dcb_ets_config etscfg; struct i40e_dcb_ets_config etsrec; struct i40e_dcb_pfc_config pfc; @@ -487,11 +505,13 @@ struct i40e_hw { /* debug mask */ u32 debug_mask; + char err_str[16]; }; static inline bool i40e_is_vf(struct i40e_hw *hw) { - return hw->mac.type == I40E_MAC_VF; + return (hw->mac.type == I40E_MAC_VF || + hw->mac.type == I40E_MAC_X722_VF); } struct i40e_driver_version { @@ -588,19 +608,23 @@ enum i40e_rx_desc_status_bits { I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, - I40E_RX_DESC_STATUS_PIF_SHIFT = 8, + /* Note: Bit 8 is reserved in X710 and XL710 */ + I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ I40E_RX_DESC_STATUS_FLM_SHIFT = 11, I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ - I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18, + /* Note: For non-tunnel packets INT_UDP_0 is the right status for + * UDP header + */ + I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ }; #define I40E_RXD_QW1_STATUS_SHIFT 0 -#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \ +#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \ << I40E_RXD_QW1_STATUS_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT @@ -608,8 +632,8 @@ enum i40e_rx_desc_status_bits { I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT -#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \ - I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) +#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \ + BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) enum i40e_rx_desc_fltstat_values { I40E_RX_DESC_FLTSTAT_NO_DATA = 0, @@ -743,8 +767,7 @@ enum i40e_rx_ptype_payload_layer { I40E_RXD_QW1_LENGTH_HBUF_SHIFT) #define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 -#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \ - I40E_RXD_QW1_LENGTH_SPH_SHIFT) +#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) enum i40e_rx_desc_ext_status_bits { /* Note: These are predefined bit offsets */ @@ -920,12 +943,12 @@ enum i40e_tx_ctx_desc_eipt_offload { #define I40E_TXD_CTX_QW0_NATT_SHIFT 9 #define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) -#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) +#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 -#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \ - I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) +#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \ + BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK @@ -937,6 +960,8 @@ enum i40e_tx_ctx_desc_eipt_offload { #define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ I40E_TXD_CTX_QW0_DECTTL_SHIFT) +#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 +#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) struct i40e_filter_program_desc { __le32 qindex_flex_ptype_vsi; __le32 rsvd; @@ -955,15 +980,24 @@ struct i40e_filter_program_desc { /* Packet Classifier Types for filters */ enum i40e_filter_pctype { - /* Note: Values 0-30 are reserved for future use */ + /* Note: Values 0-28 are reserved for future use. + * Value 29, 30, 32 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, - /* Note: Value 32 is reserved for future use */ + I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, - /* Note: Values 37-40 are reserved for future use */ + /* Note: Values 37-38 are reserved for future use. + * Value 39, 40, 42 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, @@ -990,8 +1024,8 @@ enum i40e_filter_program_desc_fd_status { }; #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 -#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ - I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) +#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \ + BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 #define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ @@ -1009,14 +1043,17 @@ enum i40e_filter_program_desc_pcmd { #define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \ - I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) +#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) +#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT) + #define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 #define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) @@ -1069,6 +1106,14 @@ struct i40e_eth_stats { u64 tx_errors; /* tepc */ }; +/* Statistics collected per VEB per TC */ +struct i40e_veb_tc_stats { + u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; +}; + #ifdef I40E_FCOE /* Statistics collected per function for FCoE */ struct i40e_fcoe_stats { @@ -1134,6 +1179,8 @@ struct i40e_hw_port_stats { u64 fd_atr_match; u64 fd_sb_match; u64 fd_atr_tunnel_match; + u32 fd_atr_status; + u32 fd_sb_status; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index 2d20af290..0f8d4156f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -110,7 +110,9 @@ struct i40e_virtchnl_msg { * error regardless of version mismatch. */ #define I40E_VIRTCHNL_VERSION_MAJOR 1 -#define I40E_VIRTCHNL_VERSION_MINOR 0 +#define I40E_VIRTCHNL_VERSION_MINOR 1 +#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 + struct i40e_virtchnl_version_info { u32 major; u32 minor; @@ -129,7 +131,8 @@ struct i40e_virtchnl_version_info { */ /* I40E_VIRTCHNL_OP_GET_VF_RESOURCES - * VF sends this request to PF with no parameters + * Version 1.0 VF sends this request to PF with no parameters + * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities * PF responds with an indirect message containing * i40e_virtchnl_vf_resource and one or more * i40e_virtchnl_vsi_resource structures. @@ -143,9 +146,13 @@ struct i40e_virtchnl_vsi_resource { u8 default_mac_addr[ETH_ALEN]; }; /* VF offload flags */ -#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 -#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 -#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 +#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 struct i40e_virtchnl_vf_resource { u16 num_vsis; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 23f95cdbd..d99c11603 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -160,13 +160,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) **/ static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) { - struct i40e_hw *hw = &pf->hw; - u32 reg; - - reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); - reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; - wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); - i40e_flush(hw); + i40e_vc_notify_vf_reset(vf); + i40e_reset_vf(vf, false); } /** @@ -282,16 +277,14 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, } tempmap = vecmap->rxq_map; for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { - linklistmap |= (1 << - (I40E_VIRTCHNL_SUPPORTED_QTYPES * - vsi_queue_id)); + linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * + vsi_queue_id)); } tempmap = vecmap->txq_map; for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { - linklistmap |= (1 << - (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id - + 1)); + linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * + vsi_queue_id + 1)); } next_q = find_first_bit(&linklistmap, @@ -337,11 +330,23 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, reg = (vector_id) | (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | - (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | + BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); wr32(hw, reg_idx, reg); } + /* if the vf is running in polling mode and using interrupt zero, + * need to disable auto-mask on enabling zero interrupt for VFs. + */ + if ((vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) && + (vector_id == 0)) { + reg = rd32(hw, I40E_GLINT_CTL); + if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { + reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; + wr32(hw, I40E_GLINT_CTL, reg); + } + } + irq_list_done: i40e_flush(hw); } @@ -542,11 +547,13 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) if (vf->port_vlan_id) i40e_vsi_add_pvid(vsi, vf->port_vlan_id); f = i40e_add_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id, true, false); + vf->port_vlan_id ? vf->port_vlan_id : -1, + true, false); if (!f) dev_info(&pf->pdev->dev, "Could not allocate VF MAC addr\n"); - f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id, + f = i40e_add_filter(vsi, brdcast, + vf->port_vlan_id ? vf->port_vlan_id : -1, true, false); if (!f) dev_info(&pf->pdev->dev, @@ -835,6 +842,7 @@ complete_reset: i40e_alloc_vf_res(vf); i40e_enable_vf_mappings(vf); set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); /* tell the VF the reset is done */ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); @@ -899,7 +907,7 @@ void i40e_free_vfs(struct i40e_pf *pf) for (vf_id = 0; vf_id < tmp; vf_id++) { reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; - wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); + wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); } } clear_bit(__I40E_VF_DISABLE, &pf->state); @@ -925,8 +933,6 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) if (pci_num_vf(pf->pdev) != num_alloc_vfs) { ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); if (ret) { - dev_err(&pf->pdev->dev, - "Failed to enable SR-IOV, error %d.\n", ret); pf->num_alloc_vfs = 0; goto err_iov; } @@ -1123,12 +1129,16 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, * * called from the VF to request the API version used by the PF **/ -static int i40e_vc_get_version_msg(struct i40e_vf *vf) +static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) { struct i40e_virtchnl_version_info info = { I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR }; + vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg; + /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ + if (VF_IS_V10(vf)) + info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, (u8 *)&info, sizeof(struct @@ -1143,7 +1153,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf) * * called from the VF to request its resources **/ -static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) +static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) { struct i40e_virtchnl_vf_resource *vfres = NULL; struct i40e_pf *pf = vf->pf; @@ -1167,12 +1177,24 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) len = 0; goto err; } + if (VF_IS_V11(vf)) + vf->driver_caps = *(u32 *)msg; + else + vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | + I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | + I40E_VIRTCHNL_VF_OFFLOAD_VLAN; vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi->info.pvid) vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; - + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ) + vfres->vf_offload_flags |= + I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ; + } else { + vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; + } vfres->num_vsis = num_vsis; vfres->num_queue_pairs = vf->num_queue_pairs; vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; @@ -1773,9 +1795,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, valid_len = sizeof(struct i40e_virtchnl_version_info); break; case I40E_VIRTCHNL_OP_RESET_VF: - case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: valid_len = 0; break; + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + if (VF_IS_V11(vf)) + valid_len = sizeof(u32); + else + valid_len = 0; + break; case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: valid_len = sizeof(struct i40e_virtchnl_txq_info); break; @@ -1888,10 +1915,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, switch (v_opcode) { case I40E_VIRTCHNL_OP_VERSION: - ret = i40e_vc_get_version_msg(vf); + ret = i40e_vc_get_version_msg(vf, msg); break; case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: - ret = i40e_vc_get_vf_resources_msg(vf); + ret = i40e_vc_get_vf_resources_msg(vf, msg); break; case I40E_VIRTCHNL_OP_RESET_VF: i40e_vc_reset_vf_msg(vf); @@ -1969,9 +1996,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) /* read GLGEN_VFLRSTAT register to find out the flr VFs */ vf = &pf->vf[vf_id]; reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); - if (reg & (1 << bit_idx)) { + if (reg & BIT(bit_idx)) { /* clear the bit in GLGEN_VFLRSTAT */ - wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); + wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); if (!test_bit(__I40E_DOWN, &pf->state)) i40e_reset_vf(vf, true); @@ -2023,7 +2050,8 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) } /* delete the temporary mac address */ - i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id, + i40e_del_filter(vsi, vf->default_lan_addr.addr, + vf->port_vlan_id ? vf->port_vlan_id : -1, true, false); /* Delete all the filters for this VSI - we're going to kill it @@ -2088,7 +2116,12 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, goto error_pvid; } - if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) { + if (le16_to_cpu(vsi->info.pvid) == + (vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT))) + /* duplicate request, so just return success */ + goto error_pvid; + + if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) { dev_err(&pf->pdev->dev, "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", vf_id); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 09043c1aa..736f6f08b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -42,6 +42,9 @@ #define I40E_VLAN_MASK 0xFFF #define I40E_PRIORITY_MASK 0x7000 +#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0)) +#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1)) + /* Various queue ctrls */ enum i40e_queue_ctrl { I40E_QUEUE_CTRL_UNKNOWN = 0, @@ -75,6 +78,8 @@ struct i40e_vf { u16 vf_id; /* all VF vsis connect to the same parent */ enum i40e_switch_element_types parent_type; + struct i40e_virtchnl_version_info vf_ver; + u32 driver_caps; /* reported by VF driver */ /* VF Port Extender (PE) stag if used */ u16 stag; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index c1d25f8c1..a23ebfd5c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -60,17 +60,6 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw) hw->aq.arq.len = I40E_VF_ARQLEN1; hw->aq.arq.bal = I40E_VF_ARQBAL1; hw->aq.arq.bah = I40E_VF_ARQBAH1; - } else { - hw->aq.asq.tail = I40E_PF_ATQT; - hw->aq.asq.head = I40E_PF_ATQH; - hw->aq.asq.len = I40E_PF_ATQLEN; - hw->aq.asq.bal = I40E_PF_ATQBAL; - hw->aq.asq.bah = I40E_PF_ATQBAH; - hw->aq.arq.tail = I40E_PF_ARQT; - hw->aq.arq.head = I40E_PF_ARQH; - hw->aq.arq.len = I40E_PF_ARQLEN; - hw->aq.arq.bal = I40E_PF_ARQBAL; - hw->aq.arq.bah = I40E_PF_ARQBAH; } } @@ -308,7 +297,7 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) /* set starting point */ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | - I40E_PF_ATQLEN_ATQENABLE_MASK)); + I40E_VF_ATQLEN1_ATQENABLE_MASK)); wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); @@ -337,7 +326,7 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) /* set starting point */ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | - I40E_PF_ARQLEN_ARQENABLE_MASK)); + I40E_VF_ARQLEN1_ARQENABLE_MASK)); wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); @@ -384,7 +373,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; - hw->aq.asq.count = hw->aq.num_asq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_asq_ring(hw); @@ -402,6 +390,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -443,7 +432,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; - hw->aq.arq.count = hw->aq.num_arq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_arq_ring(hw); @@ -461,6 +449,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -898,8 +887,15 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); + if (hw->aq.arq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQRX: Admin queue not initialized.\n"); + ret_code = I40E_ERR_QUEUE_EMPTY; + goto clean_arq_element_err; + } + /* set next_to_use to head */ - ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); + ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; @@ -959,6 +955,8 @@ clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); + +clean_arq_element_err: mutex_unlock(&hw->aq.arq_mutex); return ret_code; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index e715bccfb..c8022092d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -34,8 +34,7 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0002 -#define I40E_FW_API_VERSION_A0_MINOR 0x0000 +#define I40E_FW_API_VERSION_MINOR 0x0004 struct i40e_aq_desc { __le16 flags; @@ -133,12 +132,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_list_func_capabilities = 0x000A, i40e_aqc_opc_list_dev_capabilities = 0x000B, - i40e_aqc_opc_set_cppm_configuration = 0x0103, - i40e_aqc_opc_set_arp_proxy_entry = 0x0104, - i40e_aqc_opc_set_ns_proxy_entry = 0x0105, - /* LAA */ - i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ i40e_aqc_opc_mac_address_read = 0x0107, i40e_aqc_opc_mac_address_write = 0x0108, @@ -260,7 +254,10 @@ enum i40e_admin_queue_opc { /* Tunnel commands */ i40e_aqc_opc_add_udp_tunnel = 0x0B00, i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_tunnel_key_structure = 0x0B10, + i40e_aqc_opc_set_rss_key = 0x0B02, + i40e_aqc_opc_set_rss_lut = 0x0B03, + i40e_aqc_opc_get_rss_key = 0x0B04, + i40e_aqc_opc_get_rss_lut = 0x0B05, /* Async Events */ i40e_aqc_opc_event_lan_overflow = 0x1001, @@ -272,8 +269,6 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, /* debug commands */ - i40e_aqc_opc_debug_get_deviceid = 0xFF00, - i40e_aqc_opc_debug_set_mode = 0xFF01, i40e_aqc_opc_debug_read_reg = 0xFF03, i40e_aqc_opc_debug_write_reg = 0xFF04, i40e_aqc_opc_debug_modify_reg = 0xFF07, @@ -507,7 +502,8 @@ struct i40e_aqc_mac_address_read { #define I40E_AQC_SAN_ADDR_VALID 0x20 #define I40E_AQC_PORT_ADDR_VALID 0x40 #define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_ADDR_VALID_MASK 0xf0 +#define I40E_AQC_MC_MAG_EN_VALID 0x100 +#define I40E_AQC_ADDR_VALID_MASK 0x1F0 u8 reserved[6]; __le32 addr_high; __le32 addr_low; @@ -530,7 +526,9 @@ struct i40e_aqc_mac_address_write { #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 #define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_MASK 0xc000 +#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 +#define I40E_AQC_WRITE_TYPE_MASK 0xC000 + __le16 mac_sah; __le32 mac_sal; u8 reserved[8]; @@ -824,8 +822,12 @@ struct i40e_aqc_vsi_properties_data { I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) /* queueing option section */ u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 +#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 u8 queueing_opt_reserved[3]; /* scheduler section */ u8 up_enable_bits; @@ -1066,6 +1068,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 u8 reserved[8]; }; @@ -2093,6 +2096,46 @@ struct i40e_aqc_del_udp_tunnel_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); +struct i40e_aqc_get_set_rss_key { +#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) + __le16 vsi_id; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); + +struct i40e_aqc_get_set_rss_key_data { + u8 standard_rss_key[0x28]; + u8 extended_hash_key[0xc]; +}; + +I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); + +struct i40e_aqc_get_set_rss_lut { +#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) + __le16 vsi_id; +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \ + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) + +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 + __le16 flags; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); + /* tunnel key structure 0x0B10 */ struct i40e_aqc_tunnel_key_structure_A0 { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 39fcb1dc4..d45d0ae6b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -54,6 +54,15 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_20G_KR2: hw->mac.type = I40E_MAC_XL710; break; + case I40E_DEV_ID_SFP_X722: + case I40E_DEV_ID_1G_BASE_T_X722: + case I40E_DEV_ID_10G_BASE_T_X722: + hw->mac.type = I40E_MAC_X722; + break; + case I40E_DEV_ID_X722_VF: + case I40E_DEV_ID_X722_VF_HV: + hw->mac.type = I40E_MAC_X722_VF; + break; case I40E_DEV_ID_VF: case I40E_DEV_ID_VF_HV: hw->mac.type = I40E_MAC_VF; @@ -71,6 +80,212 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) return status; } +/** + * i40evf_aq_str - convert AQ err code to a string + * @hw: pointer to the HW structure + * @aq_err: the AQ error code to convert + **/ +char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) +{ + switch (aq_err) { + case I40E_AQ_RC_OK: + return "OK"; + case I40E_AQ_RC_EPERM: + return "I40E_AQ_RC_EPERM"; + case I40E_AQ_RC_ENOENT: + return "I40E_AQ_RC_ENOENT"; + case I40E_AQ_RC_ESRCH: + return "I40E_AQ_RC_ESRCH"; + case I40E_AQ_RC_EINTR: + return "I40E_AQ_RC_EINTR"; + case I40E_AQ_RC_EIO: + return "I40E_AQ_RC_EIO"; + case I40E_AQ_RC_ENXIO: + return "I40E_AQ_RC_ENXIO"; + case I40E_AQ_RC_E2BIG: + return "I40E_AQ_RC_E2BIG"; + case I40E_AQ_RC_EAGAIN: + return "I40E_AQ_RC_EAGAIN"; + case I40E_AQ_RC_ENOMEM: + return "I40E_AQ_RC_ENOMEM"; + case I40E_AQ_RC_EACCES: + return "I40E_AQ_RC_EACCES"; + case I40E_AQ_RC_EFAULT: + return "I40E_AQ_RC_EFAULT"; + case I40E_AQ_RC_EBUSY: + return "I40E_AQ_RC_EBUSY"; + case I40E_AQ_RC_EEXIST: + return "I40E_AQ_RC_EEXIST"; + case I40E_AQ_RC_EINVAL: + return "I40E_AQ_RC_EINVAL"; + case I40E_AQ_RC_ENOTTY: + return "I40E_AQ_RC_ENOTTY"; + case I40E_AQ_RC_ENOSPC: + return "I40E_AQ_RC_ENOSPC"; + case I40E_AQ_RC_ENOSYS: + return "I40E_AQ_RC_ENOSYS"; + case I40E_AQ_RC_ERANGE: + return "I40E_AQ_RC_ERANGE"; + case I40E_AQ_RC_EFLUSHED: + return "I40E_AQ_RC_EFLUSHED"; + case I40E_AQ_RC_BAD_ADDR: + return "I40E_AQ_RC_BAD_ADDR"; + case I40E_AQ_RC_EMODE: + return "I40E_AQ_RC_EMODE"; + case I40E_AQ_RC_EFBIG: + return "I40E_AQ_RC_EFBIG"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); + return hw->err_str; +} + +/** + * i40evf_stat_str - convert status err code to a string + * @hw: pointer to the HW structure + * @stat_err: the status error code to convert + **/ +char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err) +{ + switch (stat_err) { + case 0: + return "OK"; + case I40E_ERR_NVM: + return "I40E_ERR_NVM"; + case I40E_ERR_NVM_CHECKSUM: + return "I40E_ERR_NVM_CHECKSUM"; + case I40E_ERR_PHY: + return "I40E_ERR_PHY"; + case I40E_ERR_CONFIG: + return "I40E_ERR_CONFIG"; + case I40E_ERR_PARAM: + return "I40E_ERR_PARAM"; + case I40E_ERR_MAC_TYPE: + return "I40E_ERR_MAC_TYPE"; + case I40E_ERR_UNKNOWN_PHY: + return "I40E_ERR_UNKNOWN_PHY"; + case I40E_ERR_LINK_SETUP: + return "I40E_ERR_LINK_SETUP"; + case I40E_ERR_ADAPTER_STOPPED: + return "I40E_ERR_ADAPTER_STOPPED"; + case I40E_ERR_INVALID_MAC_ADDR: + return "I40E_ERR_INVALID_MAC_ADDR"; + case I40E_ERR_DEVICE_NOT_SUPPORTED: + return "I40E_ERR_DEVICE_NOT_SUPPORTED"; + case I40E_ERR_MASTER_REQUESTS_PENDING: + return "I40E_ERR_MASTER_REQUESTS_PENDING"; + case I40E_ERR_INVALID_LINK_SETTINGS: + return "I40E_ERR_INVALID_LINK_SETTINGS"; + case I40E_ERR_AUTONEG_NOT_COMPLETE: + return "I40E_ERR_AUTONEG_NOT_COMPLETE"; + case I40E_ERR_RESET_FAILED: + return "I40E_ERR_RESET_FAILED"; + case I40E_ERR_SWFW_SYNC: + return "I40E_ERR_SWFW_SYNC"; + case I40E_ERR_NO_AVAILABLE_VSI: + return "I40E_ERR_NO_AVAILABLE_VSI"; + case I40E_ERR_NO_MEMORY: + return "I40E_ERR_NO_MEMORY"; + case I40E_ERR_BAD_PTR: + return "I40E_ERR_BAD_PTR"; + case I40E_ERR_RING_FULL: + return "I40E_ERR_RING_FULL"; + case I40E_ERR_INVALID_PD_ID: + return "I40E_ERR_INVALID_PD_ID"; + case I40E_ERR_INVALID_QP_ID: + return "I40E_ERR_INVALID_QP_ID"; + case I40E_ERR_INVALID_CQ_ID: + return "I40E_ERR_INVALID_CQ_ID"; + case I40E_ERR_INVALID_CEQ_ID: + return "I40E_ERR_INVALID_CEQ_ID"; + case I40E_ERR_INVALID_AEQ_ID: + return "I40E_ERR_INVALID_AEQ_ID"; + case I40E_ERR_INVALID_SIZE: + return "I40E_ERR_INVALID_SIZE"; + case I40E_ERR_INVALID_ARP_INDEX: + return "I40E_ERR_INVALID_ARP_INDEX"; + case I40E_ERR_INVALID_FPM_FUNC_ID: + return "I40E_ERR_INVALID_FPM_FUNC_ID"; + case I40E_ERR_QP_INVALID_MSG_SIZE: + return "I40E_ERR_QP_INVALID_MSG_SIZE"; + case I40E_ERR_QP_TOOMANY_WRS_POSTED: + return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; + case I40E_ERR_INVALID_FRAG_COUNT: + return "I40E_ERR_INVALID_FRAG_COUNT"; + case I40E_ERR_QUEUE_EMPTY: + return "I40E_ERR_QUEUE_EMPTY"; + case I40E_ERR_INVALID_ALIGNMENT: + return "I40E_ERR_INVALID_ALIGNMENT"; + case I40E_ERR_FLUSHED_QUEUE: + return "I40E_ERR_FLUSHED_QUEUE"; + case I40E_ERR_INVALID_PUSH_PAGE_INDEX: + return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; + case I40E_ERR_INVALID_IMM_DATA_SIZE: + return "I40E_ERR_INVALID_IMM_DATA_SIZE"; + case I40E_ERR_TIMEOUT: + return "I40E_ERR_TIMEOUT"; + case I40E_ERR_OPCODE_MISMATCH: + return "I40E_ERR_OPCODE_MISMATCH"; + case I40E_ERR_CQP_COMPL_ERROR: + return "I40E_ERR_CQP_COMPL_ERROR"; + case I40E_ERR_INVALID_VF_ID: + return "I40E_ERR_INVALID_VF_ID"; + case I40E_ERR_INVALID_HMCFN_ID: + return "I40E_ERR_INVALID_HMCFN_ID"; + case I40E_ERR_BACKING_PAGE_ERROR: + return "I40E_ERR_BACKING_PAGE_ERROR"; + case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: + return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; + case I40E_ERR_INVALID_PBLE_INDEX: + return "I40E_ERR_INVALID_PBLE_INDEX"; + case I40E_ERR_INVALID_SD_INDEX: + return "I40E_ERR_INVALID_SD_INDEX"; + case I40E_ERR_INVALID_PAGE_DESC_INDEX: + return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; + case I40E_ERR_INVALID_SD_TYPE: + return "I40E_ERR_INVALID_SD_TYPE"; + case I40E_ERR_MEMCPY_FAILED: + return "I40E_ERR_MEMCPY_FAILED"; + case I40E_ERR_INVALID_HMC_OBJ_INDEX: + return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; + case I40E_ERR_INVALID_HMC_OBJ_COUNT: + return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; + case I40E_ERR_INVALID_SRQ_ARM_LIMIT: + return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; + case I40E_ERR_SRQ_ENABLED: + return "I40E_ERR_SRQ_ENABLED"; + case I40E_ERR_ADMIN_QUEUE_ERROR: + return "I40E_ERR_ADMIN_QUEUE_ERROR"; + case I40E_ERR_ADMIN_QUEUE_TIMEOUT: + return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; + case I40E_ERR_BUF_TOO_SHORT: + return "I40E_ERR_BUF_TOO_SHORT"; + case I40E_ERR_ADMIN_QUEUE_FULL: + return "I40E_ERR_ADMIN_QUEUE_FULL"; + case I40E_ERR_ADMIN_QUEUE_NO_WORK: + return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; + case I40E_ERR_BAD_IWARP_CQE: + return "I40E_ERR_BAD_IWARP_CQE"; + case I40E_ERR_NVM_BLANK_MODE: + return "I40E_ERR_NVM_BLANK_MODE"; + case I40E_ERR_NOT_IMPLEMENTED: + return "I40E_ERR_NOT_IMPLEMENTED"; + case I40E_ERR_PE_DOORBELL_NOT_ENABLED: + return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; + case I40E_ERR_DIAG_TEST_FAILED: + return "I40E_ERR_DIAG_TEST_FAILED"; + case I40E_ERR_NOT_READY: + return "I40E_ERR_NOT_READY"; + case I40E_NOT_SUPPORTED: + return "I40E_NOT_SUPPORTED"; + case I40E_ERR_FIRMWARE_API_VERSION: + return "I40E_ERR_FIRMWARE_API_VERSION"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); + return hw->err_str; +} + /** * i40evf_debug_aq * @hw: debug mask related to admin queue @@ -146,7 +361,7 @@ bool i40evf_check_asq_alive(struct i40e_hw *hw) { if (hw->aq.asq.len) return !!(rd32(hw, hw->aq.asq.len) & - I40E_PF_ATQLEN_ATQENABLE_MASK); + I40E_VF_ATQLEN1_ATQENABLE_MASK); else return false; } @@ -177,6 +392,169 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, return status; } +/** + * i40e_aq_get_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * @set: set true to set the table, false to get the table + * + * Internal function to get or set RSS look up table + **/ +static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, + u16 vsi_id, bool pf_lut, + u8 *lut, u16 lut_size, + bool set) +{ + i40e_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_lut *cmd_resp = + (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; + + if (set) + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_lut); + else + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_lut); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); + + if (pf_lut) + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + else + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + + cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut)); + cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut)); + + status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL); + + return status; +} + +/** + * i40evf_aq_get_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * get the RSS lookup table, PF or VSI type + **/ +i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, + false); +} + +/** + * i40evf_aq_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * set the RSS lookup table, PF or VSI type + **/ +i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); +} + +/** + * i40e_aq_get_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * @set: set true to set the key, false to get the key + * + * get the RSS key per VSI + **/ +static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key, + bool set) +{ + i40e_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_key *cmd_resp = + (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; + u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); + + if (set) + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_key); + else + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_key); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); + cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key)); + cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key)); + + status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL); + + return status; +} + +/** + * i40evf_aq_get_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + **/ +i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); +} + +/** + * i40evf_aq_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + * set the RSS key per VSI + **/ +i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); +} + /* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the * hardware to a bit-field that can be used by SW to more easily determine the diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h index 931c88044..00ed24bfc 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h @@ -62,6 +62,7 @@ struct i40e_hmc_bp { struct i40e_hmc_pd_entry { struct i40e_hmc_bp bp; u32 sd_index; + bool rsrc_pg; bool valid; }; @@ -126,8 +127,8 @@ struct i40e_hmc_info { I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ - (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ - val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \ @@ -146,7 +147,7 @@ struct i40e_hmc_info { I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ - val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \ @@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, - u32 pd_index); + u32 pd_index, + struct i40e_dma_mem *rsrc_pg); i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 58e37a44b..55ae4b0f8 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -60,6 +60,19 @@ void i40e_idle_aq(struct i40e_hw *hw); void i40evf_resume_aq(struct i40e_hw *hw); bool i40evf_check_asq_alive(struct i40e_hw *hw); i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); +char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); +char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err); + +i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); +i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); i40e_status i40e_set_mac_type(struct i40e_hw *hw); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h index 3cc737629..10febcfd7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_register.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h @@ -27,1580 +27,6 @@ #ifndef _I40E_REGISTER_H_ #define _I40E_REGISTER_H_ -#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */ -#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0 -#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT) -#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */ -#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0 -#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT) -#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */ -#define I40E_GL_ARQH_ARQH_SHIFT 0 -#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT) -#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */ -#define I40E_GL_ARQT_ARQT_SHIFT 0 -#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT) -#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */ -#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0 -#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT) -#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */ -#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0 -#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT) -#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */ -#define I40E_GL_ATQH_ATQH_SHIFT 0 -#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT) -#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */ -#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0 -#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT) -#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28 -#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT) -#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29 -#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT) -#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30 -#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT) -#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT) -#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */ -#define I40E_GL_ATQT_ATQT_SHIFT 0 -#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT) -#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */ -#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0 -#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT) -#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */ -#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0 -#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT) -#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */ -#define I40E_PF_ARQH_ARQH_SHIFT 0 -#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT) -#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */ -#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0 -#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT) -#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28 -#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT) -#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29 -#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT) -#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30 -#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT) -#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT) -#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ -#define I40E_PF_ARQT_ARQT_SHIFT 0 -#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) -#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */ -#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0 -#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT) -#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */ -#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0 -#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT) -#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */ -#define I40E_PF_ATQH_ATQH_SHIFT 0 -#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT) -#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */ -#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0 -#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT) -#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28 -#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT) -#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29 -#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT) -#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30 -#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT) -#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT) -#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ -#define I40E_PF_ATQT_ATQT_SHIFT 0 -#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) -#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQBAH_MAX_INDEX 127 -#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0 -#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT) -#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQBAL_MAX_INDEX 127 -#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0 -#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT) -#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQH_MAX_INDEX 127 -#define I40E_VF_ARQH_ARQH_SHIFT 0 -#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT) -#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQLEN_MAX_INDEX 127 -#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0 -#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT) -#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28 -#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT) -#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29 -#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT) -#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 -#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) -#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT) -#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQT_MAX_INDEX 127 -#define I40E_VF_ARQT_ARQT_SHIFT 0 -#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT) -#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQBAH_MAX_INDEX 127 -#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0 -#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT) -#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQBAL_MAX_INDEX 127 -#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0 -#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT) -#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQH_MAX_INDEX 127 -#define I40E_VF_ATQH_ATQH_SHIFT 0 -#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT) -#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQLEN_MAX_INDEX 127 -#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0 -#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT) -#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28 -#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT) -#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29 -#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT) -#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 -#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) -#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT) -#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQT_MAX_INDEX 127 -#define I40E_VF_ATQT_ATQT_SHIFT 0 -#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT) -#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */ -#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0 -#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT) -#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */ -#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0 -#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT) -#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4 -#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT) -#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8 -#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT) -#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */ -#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0 -#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4 -#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8 -#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16 -#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24 -#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT) -#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */ -#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0 -#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT) -#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12 -#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT) -#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15 -#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT) -#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17 -#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT) -#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3 -#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0 -#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT) -#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */ -#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0 -#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT) -#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1 -#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT) -#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127 -#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0 -#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT) -#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4 -#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT) -#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8 -#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT) -#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127 -#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0 -#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4 -#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8 -#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16 -#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24 -#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT) -#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */ -#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0 -#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT) -#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */ -#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0 -#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT) -#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */ -#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3 -#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT) -#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */ -#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0 -#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT) -#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */ -#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3 -#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0 -#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT) -#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16 -#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT) -#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */ -#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0 -#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT) -#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2 -#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT) -#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6 -#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT) -#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9 -#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT) -#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16 -#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT) -#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */ -#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0 -#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT) -#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */ -#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0 -#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT) -#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1 -#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT) -#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2 -#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT) -#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3 -#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT) -#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4 -#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT) -#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */ -#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0 -#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) -#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1 -#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) -#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2 -#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) -#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8 -#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT) -#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7 -#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0 -#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) -#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 -#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) -#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 -#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) -#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ -#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 -#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) -#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8 -#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT) -#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16 -#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) -#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */ -#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0 -#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT) -#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */ -#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0 -#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3 -#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6 -#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9 -#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12 -#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15 -#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18 -#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 -#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) -#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 -#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 -#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) -#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ -#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 -#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) -#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7 -#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0 -#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT) -#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */ -#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0 -#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT) -#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13 -#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT) -#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30 -#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT) -#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7 -#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0 -#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT) -#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */ -#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0 -#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT) -#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30 -#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT) -#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */ -#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0 -#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT) -#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8 -#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT) -#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */ -#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0 -#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT) -#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8 -#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT) -#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */ -#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0 -#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8 -#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9 -#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10 -#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11 -#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12 -#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13 -#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14 -#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15 -#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT) -#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */ -#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7 -#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0 -#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT) -#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */ -#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0 -#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT) -#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4 -#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT) -#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5 -#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT) -#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16 -#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT) -#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */ -#define I40E_GL_FWSTS_FWS0B_SHIFT 0 -#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT) -#define I40E_GL_FWSTS_FWRI_SHIFT 9 -#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT) -#define I40E_GL_FWSTS_FWS1B_SHIFT 16 -#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT) -#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */ -#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0 -#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT) -#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4 -#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8 -#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12 -#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16 -#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20 -#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT) -#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */ -#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29 -#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0 -#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3 -#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4 -#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT) -#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5 -#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT) -#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6 -#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7 -#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) -#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10 -#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT) -#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11 -#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT) -#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12 -#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) -#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17 -#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT) -#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19 -#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 -#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 -#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) -#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ -#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 -#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) -#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5 -#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT) -#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6 -#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT) -#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */ -#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0 -#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT) -#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */ -#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0 -#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT) -#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_I2CCMD_MAX_INDEX 3 -#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0 -#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT) -#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16 -#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT) -#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24 -#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT) -#define I40E_GLGEN_I2CCMD_OP_SHIFT 27 -#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT) -#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28 -#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT) -#define I40E_GLGEN_I2CCMD_R_SHIFT 29 -#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT) -#define I40E_GLGEN_I2CCMD_E_SHIFT 31 -#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT) -#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3 -#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0 -#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT) -#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5 -#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT) -#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8 -#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9 -#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT) -#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10 -#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT) -#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11 -#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT) -#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12 -#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13 -#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14 -#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15 -#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT) -#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31 -#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT) -#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */ -#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0 -#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT) -#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT) -#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 -#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25 -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31 -#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT) -#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MSCA_MAX_INDEX 3 -#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0 -#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT) -#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16 -#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT) -#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21 -#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT) -#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26 -#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT) -#define I40E_GLGEN_MSCA_STCODE_SHIFT 28 -#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT) -#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 -#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) -#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 -#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) -#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MSRWD_MAX_INDEX 3 -#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 -#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT) -#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16 -#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT) -#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */ -#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0 -#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT) -#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16 -#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT) -#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */ -#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0 -#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT) -#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2 -#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT) -#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4 -#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT) -#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6 -#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT) -#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8 -#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT) -#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10 -#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT) -#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */ -#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0 -#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) -#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 -#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) -#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ -#define I40E_GLGEN_RTRIG_CORER_SHIFT 0 -#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) -#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1 -#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT) -#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2 -#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT) -#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */ -#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0 -#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT) -#define I40E_GLGEN_STAT_DCBEN_SHIFT 2 -#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT) -#define I40E_GLGEN_STAT_VTEN_SHIFT 3 -#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT) -#define I40E_GLGEN_STAT_FCOEN_SHIFT 4 -#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT) -#define I40E_GLGEN_STAT_EVBEN_SHIFT 5 -#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT) -#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6 -#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT) -#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3 -#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0 -#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT) -#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */ -#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0 -#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT) -#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */ -#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0 -#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT) -#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */ -#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0 -#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT) -#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */ -#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0 -#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT) -#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */ -#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0 -#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT) -#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1 -#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT) -#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2 -#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT) -#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3 -#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT) -#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */ -#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0 -#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT) -#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1 -#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT) -#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2 -#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT) -#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */ -#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0 -#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT) -#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */ -#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0 -#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT) -#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1 -#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT) -#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFGEN_RSTAT1_MAX_INDEX 127 -#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0 -#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT) -#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127 -#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0 -#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT) -#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127 -#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0 -#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT) -#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_VSIGEN_RSTAT_MAX_INDEX 383 -#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0 -#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT) -#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_VSIGEN_RTRIG_MAX_INDEX 383 -#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0 -#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT) -#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15 -#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0 -#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT) -#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15 -#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0 -#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT) -#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */ -#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0 -#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT) -#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15 -#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0 -#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT) -#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15 -#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0 -#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT) -#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */ -#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0 -#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT) -#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */ -#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0 -#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT) -#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */ -#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0 -#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT) -#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15 -#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0 -#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT) -#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15 -#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0 -#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT) -#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29 -#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT) -#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */ -#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0 -#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT) -#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */ -#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0 -#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT) -#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15 -#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0 -#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT) -#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15 -#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0 -#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT) -#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */ -#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0 -#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT) -#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */ -#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0 -#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT) -#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */ -#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0 -#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT) -#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15 -#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0 -#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT) -#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15 -#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0 -#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT) -#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */ -#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0 -#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT) -#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15 -#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0 -#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT) -#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24 -#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT) -#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15 -#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0 -#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT) -#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */ -#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0 -#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT) -#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15 -#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0 -#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT) -#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_SDPART_MAX_INDEX 15 -#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0 -#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT) -#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16 -#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT) -#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */ -#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0 -#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT) -#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */ -#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0 -#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT) -#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7 -#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT) -#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8 -#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT) -#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16 -#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT) -#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31 -#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT) -#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */ -#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0 -#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT) -#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16 -#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT) -#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */ -#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0 -#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT) -#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31 -#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT) -#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */ -#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0 -#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT) -#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */ -#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0 -#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT) -#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1 -#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) -#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2 -#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) -#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12 -#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT) -#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */ -#define I40E_GL_GP_FUSE_MAX_INDEX 28 -#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0 -#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT) -#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */ -#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1 -#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT) -#define I40E_GL_UFUSE_NIC_ID_SHIFT 2 -#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT) -#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10 -#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT) -#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11 -#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT) -#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */ -#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 -#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 -#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 -#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 -#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 -#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 -#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 -#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 -#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 -#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 -#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 -#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 -#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 -#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 -#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 -#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 -#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 -#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 -#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 -#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 -#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 -#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 -#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 -#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 -#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 -#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 -#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 -#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 -#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 -#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 -#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT) -#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */ -#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0 -#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT) -#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4 -#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT) -#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */ -#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) -#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11 -#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT) -#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT) -#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30 -#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT) -#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31 -#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT) -#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */ -#define I40E_PFINT_CEQCTL_MAX_INDEX 511 -#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) -#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11 -#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) -#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT) -#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16 -#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT) -#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 -#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT) -#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30 -#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT) -#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31 -#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT) -#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */ -#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0 -#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT) -#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1 -#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT) -#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 -#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT) -#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3 -#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) -#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5 -#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT) -#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) -#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 -#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) -#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 -#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT) -#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511 -#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0 -#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT) -#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1 -#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT) -#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 -#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT) -#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3 -#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) -#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5 -#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT) -#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) -#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 -#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) -#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 -#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT) -#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */ -#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 -#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 -#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 -#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 -#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 -#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 -#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 -#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 -#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 -#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 -#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 -#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 -#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 -#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 -#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 -#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 -#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 -#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 -#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 -#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 -#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 -#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 -#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 -#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 -#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 -#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 -#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 -#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 -#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 -#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 -#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT) -#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */ -#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0 -#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1 -#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2 -#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3 -#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4 -#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5 -#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6 -#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7 -#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8 -#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT) -#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16 -#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT) -#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19 -#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT) -#define I40E_PFINT_ICR0_GRST_SHIFT 20 -#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT) -#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21 -#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT) -#define I40E_PFINT_ICR0_GPIO_SHIFT 22 -#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT) -#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 -#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT) -#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24 -#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT) -#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) -#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 -#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT) -#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28 -#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT) -#define I40E_PFINT_ICR0_VFLR_SHIFT 29 -#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT) -#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30 -#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT) -#define I40E_PFINT_ICR0_SWINT_SHIFT 31 -#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT) -#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */ -#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16 -#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT) -#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19 -#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT) -#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20 -#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT) -#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21 -#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT) -#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22 -#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT) -#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 -#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) -#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24 -#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT) -#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) -#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 -#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT) -#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28 -#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT) -#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29 -#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT) -#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30 -#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT) -#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31 -#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT) -#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */ -#define I40E_PFINT_ITR0_MAX_INDEX 2 -#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0 -#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT) -#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_ITRN_MAX_INDEX 2 -#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0 -#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT) -#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */ -#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 -#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) -#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 -#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT) -#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_LNKLSTN_MAX_INDEX 511 -#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 -#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) -#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 -#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) -#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */ -#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0 -#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT) -#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6 -#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT) -#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_RATEN_MAX_INDEX 511 -#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0 -#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) -#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 -#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) -#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ -#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 -#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) -#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QINT_RQCTL_MAX_INDEX 1535 -#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0 -#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT) -#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11 -#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT) -#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) -#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16 -#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) -#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27 -#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) -#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30 -#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) -#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31 -#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT) -#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QINT_TQCTL_MAX_INDEX 1535 -#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0 -#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT) -#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11 -#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT) -#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) -#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16 -#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) -#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27 -#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) -#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30 -#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT) -#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31 -#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT) -#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127 -#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT) -#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511 -#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT) -#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VFINT_ICR0_MAX_INDEX 127 -#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0 -#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1 -#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2 -#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3 -#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4 -#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT) -#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT) -#define I40E_VFINT_ICR0_SWINT_SHIFT 31 -#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT) -#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127 -#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT) -#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31 -#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT) -#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */ -#define I40E_VFINT_ITR0_MAX_INDEX 2 -#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT) -#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */ -#define I40E_VFINT_ITRN_MAX_INDEX 2 -#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 -#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 -#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) -#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VPINT_AEQCTL_MAX_INDEX 127 -#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) -#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11 -#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT) -#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT) -#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30 -#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT) -#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31 -#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT) -#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */ -#define I40E_VPINT_CEQCTL_MAX_INDEX 511 -#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) -#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11 -#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) -#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT) -#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16 -#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT) -#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 -#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) -#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30 -#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT) -#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31 -#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT) -#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPINT_LNKLST0_MAX_INDEX 127 -#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 -#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) -#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 -#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT) -#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ -#define I40E_VPINT_LNKLSTN_MAX_INDEX 511 -#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 -#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) -#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 -#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) -#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPINT_RATE0_MAX_INDEX 127 -#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0 -#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT) -#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6 -#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT) -#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ -#define I40E_VPINT_RATEN_MAX_INDEX 511 -#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0 -#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT) -#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6 -#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT) -#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */ -#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0 -#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT) -#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1 -#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT) -#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */ -#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0 -#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT) -#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */ -#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0 -#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT) -#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */ -#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0 -#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT) -#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */ -#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 -#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) -#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */ -#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11 -#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0 -#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT) -#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16 -#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT) -#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 -#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) -#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 -#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) -#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */ -#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 -#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) -#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16 -#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT) -#define I40E_PFLAN_QALLOC_VALID_SHIFT 31 -#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT) -#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ -#define I40E_QRX_ENA_MAX_INDEX 1535 -#define I40E_QRX_ENA_QENA_REQ_SHIFT 0 -#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT) -#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1 -#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT) -#define I40E_QRX_ENA_QENA_STAT_SHIFT 2 -#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT) -#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QRX_TAIL_MAX_INDEX 1535 -#define I40E_QRX_TAIL_TAIL_SHIFT 0 -#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT) -#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QTX_CTL_MAX_INDEX 1535 -#define I40E_QTX_CTL_PFVF_Q_SHIFT 0 -#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT) -#define I40E_QTX_CTL_PF_INDX_SHIFT 2 -#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT) -#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7 -#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT) -#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ -#define I40E_QTX_ENA_MAX_INDEX 1535 -#define I40E_QTX_ENA_QENA_REQ_SHIFT 0 -#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT) -#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1 -#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT) -#define I40E_QTX_ENA_QENA_STAT_SHIFT 2 -#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT) -#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QTX_HEAD_MAX_INDEX 1535 -#define I40E_QTX_HEAD_HEAD_SHIFT 0 -#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT) -#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16 -#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT) -#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ -#define I40E_QTX_TAIL_MAX_INDEX 1535 -#define I40E_QTX_TAIL_TAIL_SHIFT 0 -#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT) -#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPLAN_MAPENA_MAX_INDEX 127 -#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0 -#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT) -#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */ -#define I40E_VPLAN_QTABLE_MAX_INDEX 15 -#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0 -#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT) -#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ -#define I40E_VSILAN_QBASE_MAX_INDEX 383 -#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0 -#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT) -#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11 -#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT) -#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */ -#define I40E_VSILAN_QTABLE_MAX_INDEX 7 -#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0 -#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT) -#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16 -#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) -#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */ -#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0 -#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT) -#define I40E_PRTGL_SAH_MFS_SHIFT 16 -#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT) -#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */ -#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0 -#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */ -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */ -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT) -#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */ -#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0 -#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT) -#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */ -#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0 -#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT) -#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10 -#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT) -#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11 -#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT) -#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15 -#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT) -#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16 -#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT) -#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19 -#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26 -#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27 -#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28 -#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29 -#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */ -#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0 -#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT) -#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */ -#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31 -#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0 -#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT) -#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */ -#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0 -#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT) -#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7 -#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0 -#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT) -#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */ -#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0 -#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT) -#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1 -#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT) -#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17 -#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT) -#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19 -#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT) -#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25 -#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT) -#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26 -#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT) -#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28 -#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT) -#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29 -#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT) -#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7 -#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0 -#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT) -#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_MDEF_MAX_INDEX 7 -#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0 -#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4 -#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5 -#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13 -#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17 -#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21 -#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25 -#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26 -#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27 -#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28 -#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29 -#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30 -#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31 -#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7 -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0 -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4 -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8 -#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24 -#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25 -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26 -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27 -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28 -#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29 -#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30 -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31 -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT) -#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3 -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0 -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT) -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16 -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT) -#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_METF_MAX_INDEX 3 -#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0 -#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT) -#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30 -#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT) -#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ -#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15 -#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0 -#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT) -#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16 -#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT) -#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17 -#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT) -#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18 -#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT) -#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3 -#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0 -#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT) -#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ -#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15 -#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0 -#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT) -#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MMAH_MAX_INDEX 3 -#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0 -#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT) -#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MMAL_MAX_INDEX 3 -#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0 -#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT) -#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */ -#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0 -#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT) -#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */ -#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0 -#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT) -#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1 -#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT) -#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2 -#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT) -#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3 -#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4 -#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5 -#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6 -#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7 -#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT) -#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */ -#define I40E_MSIX_PBA_MAX_INDEX 5 -#define I40E_MSIX_PBA_PENBIT_SHIFT 0 -#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT) -#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TADD_MAX_INDEX 128 -#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0 -#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT) -#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2 -#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT) -#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TMSG_MAX_INDEX 128 -#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0 -#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT) -#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TUADD_MAX_INDEX 128 -#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0 -#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT) -#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TVCTRL_MAX_INDEX 128 -#define I40E_MSIX_TVCTRL_MASK_SHIFT 0 -#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT) #define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */ #define I40E_VFMSIX_PBA1_MAX_INDEX 19 #define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 @@ -1623,1525 +49,6 @@ #define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 #define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 #define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT) -#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ -#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0 -#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT) -#define I40E_GLNVM_FLA_FL_CE_SHIFT 1 -#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT) -#define I40E_GLNVM_FLA_FL_SI_SHIFT 2 -#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT) -#define I40E_GLNVM_FLA_FL_SO_SHIFT 3 -#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT) -#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4 -#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT) -#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5 -#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT) -#define I40E_GLNVM_FLA_LOCKED_SHIFT 6 -#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) -#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18 -#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT) -#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30 -#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT) -#define I40E_GLNVM_FLA_FL_DER_SHIFT 31 -#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT) -#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */ -#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0 -#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT) -#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31 -#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT) -#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */ -#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0 -#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT) -#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5 -#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT) -#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8 -#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT) -#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23 -#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT) -#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25 -#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT) -#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */ -#define I40E_GLNVM_PROTCSR_MAX_INDEX 59 -#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0 -#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT) -#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */ -#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0 -#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT) -#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14 -#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT) -#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29 -#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT) -#define I40E_GLNVM_SRCTL_START_SHIFT 30 -#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) -#define I40E_GLNVM_SRCTL_DONE_SHIFT 31 -#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT) -#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ -#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 -#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) -#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16 -#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT) -#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ -#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0 -#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1 -#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2 -#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3 -#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4 -#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5 -#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6 -#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7 -#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8 -#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9 -#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT) -#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */ -#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0 -#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT) -#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */ -#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0 -#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT) -#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */ -#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0 -#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT) -#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */ -#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0 -#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT) -#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2 -#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3 -#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4 -#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5 -#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6 -#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7 -#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16 -#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17 -#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18 -#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19 -#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT) -#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20 -#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30 -#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT) -#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31 -#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT) -#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */ -#define I40E_GLPCI_CNF_FLEX10_SHIFT 1 -#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT) -#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2 -#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT) -#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */ -#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0 -#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT) -#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1 -#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT) -#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2 -#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT) -#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13 -#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT) -#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */ -#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0 -#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT) -#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */ -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28 -#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT) -#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */ -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT) -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT) -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT) -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT) -#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ -#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3 -#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0 -#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT) -#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16 -#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT) -#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ -#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 -#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 -#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) -#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ -#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 -#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) -#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1 -#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT) -#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3 -#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT) -#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4 -#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT) -#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6 -#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT) -#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10 -#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT) -#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11 -#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT) -#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */ -#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0 -#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT) -#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6 -#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT) -#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9 -#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT) -#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */ -#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0 -#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT) -#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */ -#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0 -#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT) -#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */ -#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0 -#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT) -#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16 -#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT) -#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */ -#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0 -#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT) -#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16 -#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT) -#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */ -#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0 -#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT) -#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2 -#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5 -#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8 -#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11 -#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14 -#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT) -#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15 -#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT) -#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */ -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0 -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT) -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8 -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT) -#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */ -#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0 -#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT) -#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8 -#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT) -#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16 -#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT) -#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24 -#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT) -#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */ -#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0 -#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT) -#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */ -#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0 -#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT) -#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */ -#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0 -#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT) -#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */ -#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0 -#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT) -#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */ -#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0 -#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT) -#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */ -#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0 -#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT) -#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */ -#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1 -#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT) -#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */ -#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0 -#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT) -#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */ -#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0 -#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) -#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 -#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) -#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ -#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 -#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) -#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 -#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) -#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ -#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 -#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) -#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3 -#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT) -#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8 -#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT) -#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */ -#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0 -#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT) -#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12 -#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT) -#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */ -#define I40E_PF_PCI_CIAD_DATA_SHIFT 0 -#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT) -#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */ -#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0 -#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT) -#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1 -#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT) -#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2 -#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT) -#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */ -#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2 -#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT) -#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3 -#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT) -#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4 -#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT) -#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5 -#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT) -#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */ -#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0 -#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT) -#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16 -#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT) -#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */ -#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0 -#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT) -#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3 -#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT) -#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */ -#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0 -#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT) -#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1 -#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT) -#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2 -#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT) -#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */ -#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0 -#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT) -#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */ -#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0 -#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT) -#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */ -#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0 -#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT) -#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */ -#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */ -#define I40E_PFPCI_PM_PME_EN_SHIFT 0 -#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT) -#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */ -#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0 -#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT) -#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */ -#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0 -#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT) -#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16 -#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT) -#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */ -#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */ -#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127 -#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */ -#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */ -#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0 -#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT) -#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */ -#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0 -#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT) -#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */ -#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29 -#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT) -#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30 -#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT) -#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31 -#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT) -#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */ -#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16 -#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT) -#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24 -#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT) -#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26 -#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT) -#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */ -#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31 -#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT) -#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */ -#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0 -#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT) -#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16 -#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT) -#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */ -#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0 -#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT) -#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */ -#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0 -#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT) -#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1 -#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT) -#define I40E_PRTPM_GC_RATD_SHIFT 2 -#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT) -#define I40E_PRTPM_GC_LCDMP_SHIFT 3 -#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT) -#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31 -#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT) -#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */ -#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0 -#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT) -#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */ -#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0 -#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT) -#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */ -#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0 -#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT) -#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */ -#define I40E_GLRPB_GHW_GHW_SHIFT 0 -#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT) -#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */ -#define I40E_GLRPB_GLW_GLW_SHIFT 0 -#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT) -#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */ -#define I40E_GLRPB_PHW_PHW_SHIFT 0 -#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT) -#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */ -#define I40E_GLRPB_PLW_PLW_SHIFT 0 -#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT) -#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_DHW_MAX_INDEX 7 -#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0 -#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT) -#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_DLW_MAX_INDEX 7 -#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0 -#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT) -#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_DPS_MAX_INDEX 7 -#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0 -#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT) -#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_SHT_MAX_INDEX 7 -#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0 -#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT) -#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */ -#define I40E_PRTRPB_SHW_SHW_SHIFT 0 -#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT) -#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_SLT_MAX_INDEX 7 -#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0 -#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT) -#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */ -#define I40E_PRTRPB_SLW_SLW_SHIFT 0 -#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT) -#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */ -#define I40E_PRTRPB_SPS_SPS_SHIFT 0 -#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT) -#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */ -#define I40E_GLQF_CTL_HTOEP_SHIFT 1 -#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT) -#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2 -#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT) -#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3 -#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT) -#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6 -#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT) -#define I40E_GLQF_CTL_RSVD_SHIFT 7 -#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT) -#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8 -#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT) -#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11 -#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT) -#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14 -#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT) -#define I40E_GLQF_CTL_FDBEST_SHIFT 17 -#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT) -#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25 -#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT) -#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26 -#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT) -#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27 -#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT) -#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */ -#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0 -#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT) -#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13 -#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT) -#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ -#define I40E_GLQF_HKEY_MAX_INDEX 12 -#define I40E_GLQF_HKEY_KEY_0_SHIFT 0 -#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT) -#define I40E_GLQF_HKEY_KEY_1_SHIFT 8 -#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT) -#define I40E_GLQF_HKEY_KEY_2_SHIFT 16 -#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT) -#define I40E_GLQF_HKEY_KEY_3_SHIFT 24 -#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT) -#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_HSYM_MAX_INDEX 63 -#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0 -#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT) -#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */ -#define I40E_GLQF_PCNT_MAX_INDEX 511 -#define I40E_GLQF_PCNT_PCNT_SHIFT 0 -#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT) -#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_SWAP_MAX_INDEX 1 -#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0 -#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT) -#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6 -#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT) -#define I40E_GLQF_SWAP_FLEN0_SHIFT 12 -#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT) -#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16 -#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT) -#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22 -#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT) -#define I40E_GLQF_SWAP_FLEN1_SHIFT 28 -#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT) -#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */ -#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0 -#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT) -#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5 -#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT) -#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10 -#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) -#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14 -#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) -#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16 -#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) -#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17 -#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT) -#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18 -#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT) -#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19 -#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT) -#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20 -#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT) -#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24 -#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT) -#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */ -#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0 -#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT) -#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */ -#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0 -#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT) -#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8 -#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT) -#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */ -#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0 -#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT) -#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16 -#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT) -#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_PFQF_HENA_MAX_INDEX 1 -#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0 -#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT) -#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */ -#define I40E_PFQF_HKEY_MAX_INDEX 12 -#define I40E_PFQF_HKEY_KEY_0_SHIFT 0 -#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT) -#define I40E_PFQF_HKEY_KEY_1_SHIFT 8 -#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT) -#define I40E_PFQF_HKEY_KEY_2_SHIFT 16 -#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT) -#define I40E_PFQF_HKEY_KEY_3_SHIFT 24 -#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT) -#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_PFQF_HLUT_MAX_INDEX 127 -#define I40E_PFQF_HLUT_LUT0_SHIFT 0 -#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT) -#define I40E_PFQF_HLUT_LUT1_SHIFT 8 -#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT) -#define I40E_PFQF_HLUT_LUT2_SHIFT 16 -#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT) -#define I40E_PFQF_HLUT_LUT3_SHIFT 24 -#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT) -#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */ -#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0 -#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT) -#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */ -#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63 -#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0 -#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) -#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ -#define I40E_PRTQF_FD_MSK_MAX_INDEX 63 -#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0 -#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT) -#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16 -#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT) -#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */ -#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8 -#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0 -#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) -#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5 -#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) -#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10 -#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) -#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */ -#define I40E_VFQF_HENA1_MAX_INDEX 1 -#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0 -#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT) -#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */ -#define I40E_VFQF_HKEY1_MAX_INDEX 12 -#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0 -#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT) -#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8 -#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT) -#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16 -#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT) -#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24 -#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT) -#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */ -#define I40E_VFQF_HLUT1_MAX_INDEX 15 -#define I40E_VFQF_HLUT1_LUT0_SHIFT 0 -#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT) -#define I40E_VFQF_HLUT1_LUT1_SHIFT 8 -#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT) -#define I40E_VFQF_HLUT1_LUT2_SHIFT 16 -#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT) -#define I40E_VFQF_HLUT1_LUT3_SHIFT 24 -#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT) -#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */ -#define I40E_VFQF_HREGION1_MAX_INDEX 7 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT) -#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1 -#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT) -#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5 -#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT) -#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9 -#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT) -#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13 -#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT) -#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17 -#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT) -#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21 -#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT) -#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25 -#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT) -#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29 -#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT) -#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPQF_CTL_MAX_INDEX 127 -#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0 -#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT) -#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5 -#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT) -#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10 -#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT) -#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14 -#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT) -#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ -#define I40E_VSIQF_CTL_MAX_INDEX 383 -#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0 -#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT) -#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1 -#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2 -#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3 -#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4 -#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5 -#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT) -#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */ -#define I40E_VSIQF_TCREGION_MAX_INDEX 3 -#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0 -#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT) -#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9 -#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT) -#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16 -#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT) -#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25 -#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT) -#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOECRC_MAX_INDEX 143 -#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0 -#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT) -#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDDPC_MAX_INDEX 143 -#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0 -#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT) -#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIFEC_MAX_INDEX 143 -#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0 -#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT) -#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143 -#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0 -#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT) -#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIXEC_MAX_INDEX 143 -#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0 -#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT) -#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIXVC_MAX_INDEX 143 -#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0 -#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT) -#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWRCH_MAX_INDEX 143 -#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0 -#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT) -#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWRCL_MAX_INDEX 143 -#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0 -#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT) -#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWTCH_MAX_INDEX 143 -#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0 -#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT) -#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWTCL_MAX_INDEX 143 -#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0 -#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT) -#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOELAST_MAX_INDEX 143 -#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0 -#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT) -#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEPRC_MAX_INDEX 143 -#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0 -#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT) -#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEPTC_MAX_INDEX 143 -#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0 -#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT) -#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOERPDC_MAX_INDEX 143 -#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0 -#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT) -#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_RXERR1_L_MAX_INDEX 143 -#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0 -#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT) -#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_RXERR2_L_MAX_INDEX 143 -#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0 -#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) -#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPRCH_MAX_INDEX 3 -#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) -#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPRCL_MAX_INDEX 3 -#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 -#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) -#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPTCH_MAX_INDEX 3 -#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 -#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) -#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPTCL_MAX_INDEX 3 -#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 -#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) -#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_CRCERRS_MAX_INDEX 3 -#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 -#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT) -#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GORCH_MAX_INDEX 3 -#define I40E_GLPRT_GORCH_GORCH_SHIFT 0 -#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT) -#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GORCL_MAX_INDEX 3 -#define I40E_GLPRT_GORCL_GORCL_SHIFT 0 -#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT) -#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GOTCH_MAX_INDEX 3 -#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0 -#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT) -#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GOTCL_MAX_INDEX 3 -#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0 -#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT) -#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_ILLERRC_MAX_INDEX 3 -#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0 -#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT) -#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LDPC_MAX_INDEX 3 -#define I40E_GLPRT_LDPC_LDPC_SHIFT 0 -#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT) -#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3 -#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0 -#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT) -#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3 -#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0 -#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT) -#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXONRXC_MAX_INDEX 3 -#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0 -#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT) -#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXONTXC_MAX_INDEX 3 -#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0 -#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT) -#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MLFC_MAX_INDEX 3 -#define I40E_GLPRT_MLFC_MLFC_SHIFT 0 -#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT) -#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPRCH_MAX_INDEX 3 -#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0 -#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT) -#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPRCL_MAX_INDEX 3 -#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0 -#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT) -#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPTCH_MAX_INDEX 3 -#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0 -#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT) -#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPTCL_MAX_INDEX 3 -#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0 -#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT) -#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MRFC_MAX_INDEX 3 -#define I40E_GLPRT_MRFC_MRFC_SHIFT 0 -#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT) -#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1023H_MAX_INDEX 3 -#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0 -#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT) -#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1023L_MAX_INDEX 3 -#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0 -#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT) -#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC127H_MAX_INDEX 3 -#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0 -#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT) -#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC127L_MAX_INDEX 3 -#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0 -#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT) -#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1522H_MAX_INDEX 3 -#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0 -#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT) -#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1522L_MAX_INDEX 3 -#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0 -#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT) -#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC255H_MAX_INDEX 3 -#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0 -#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT) -#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC255L_MAX_INDEX 3 -#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0 -#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT) -#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC511H_MAX_INDEX 3 -#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0 -#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT) -#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC511L_MAX_INDEX 3 -#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0 -#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT) -#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC64H_MAX_INDEX 3 -#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0 -#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT) -#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC64L_MAX_INDEX 3 -#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0 -#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT) -#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC9522H_MAX_INDEX 3 -#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0 -#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT) -#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC9522L_MAX_INDEX 3 -#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0 -#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT) -#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1023H_MAX_INDEX 3 -#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0 -#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT) -#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1023L_MAX_INDEX 3 -#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0 -#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT) -#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC127H_MAX_INDEX 3 -#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0 -#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT) -#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC127L_MAX_INDEX 3 -#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0 -#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT) -#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1522H_MAX_INDEX 3 -#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0 -#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT) -#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1522L_MAX_INDEX 3 -#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0 -#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT) -#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC255H_MAX_INDEX 3 -#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0 -#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT) -#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC255L_MAX_INDEX 3 -#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0 -#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT) -#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC511H_MAX_INDEX 3 -#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0 -#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT) -#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC511L_MAX_INDEX 3 -#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0 -#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT) -#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC64H_MAX_INDEX 3 -#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0 -#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT) -#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC64L_MAX_INDEX 3 -#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0 -#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT) -#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC9522H_MAX_INDEX 3 -#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0 -#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT) -#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC9522L_MAX_INDEX 3 -#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0 -#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT) -#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3 -#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0 -#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT) -#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3 -#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0 -#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT) -#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXONRXC_MAX_INDEX 3 -#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0 -#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT) -#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXONTXC_MAX_INDEX 3 -#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0 -#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT) -#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RDPC_MAX_INDEX 3 -#define I40E_GLPRT_RDPC_RDPC_SHIFT 0 -#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT) -#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RFC_MAX_INDEX 3 -#define I40E_GLPRT_RFC_RFC_SHIFT 0 -#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT) -#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RJC_MAX_INDEX 3 -#define I40E_GLPRT_RJC_RJC_SHIFT 0 -#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT) -#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RLEC_MAX_INDEX 3 -#define I40E_GLPRT_RLEC_RLEC_SHIFT 0 -#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT) -#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_ROC_MAX_INDEX 3 -#define I40E_GLPRT_ROC_ROC_SHIFT 0 -#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT) -#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RUC_MAX_INDEX 3 -#define I40E_GLPRT_RUC_RUC_SHIFT 0 -#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT) -#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RUPP_MAX_INDEX 3 -#define I40E_GLPRT_RUPP_RUPP_SHIFT 0 -#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT) -#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3 -#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0 -#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT) -#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_TDOLD_MAX_INDEX 3 -#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 -#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) -#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPRCH_MAX_INDEX 3 -#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT) -#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPRCL_MAX_INDEX 3 -#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0 -#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT) -#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPTCH_MAX_INDEX 3 -#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0 -#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT) -#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPTCL_MAX_INDEX 3 -#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0 -#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT) -#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPRCH_MAX_INDEX 15 -#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0 -#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT) -#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPRCL_MAX_INDEX 15 -#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0 -#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT) -#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPTCH_MAX_INDEX 15 -#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0 -#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT) -#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPTCL_MAX_INDEX 15 -#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0 -#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT) -#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GORCH_MAX_INDEX 15 -#define I40E_GLSW_GORCH_GORCH_SHIFT 0 -#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT) -#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GORCL_MAX_INDEX 15 -#define I40E_GLSW_GORCL_GORCL_SHIFT 0 -#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT) -#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GOTCH_MAX_INDEX 15 -#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0 -#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT) -#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GOTCL_MAX_INDEX 15 -#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0 -#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT) -#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPRCH_MAX_INDEX 15 -#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0 -#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT) -#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPRCL_MAX_INDEX 15 -#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0 -#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT) -#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPTCH_MAX_INDEX 15 -#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0 -#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT) -#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPTCL_MAX_INDEX 15 -#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0 -#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT) -#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_RUPP_MAX_INDEX 15 -#define I40E_GLSW_RUPP_RUPP_SHIFT 0 -#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT) -#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_TDPC_MAX_INDEX 15 -#define I40E_GLSW_TDPC_TDPC_SHIFT 0 -#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT) -#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPRCH_MAX_INDEX 15 -#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0 -#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT) -#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPRCL_MAX_INDEX 15 -#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0 -#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT) -#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPTCH_MAX_INDEX 15 -#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0 -#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT) -#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPTCL_MAX_INDEX 15 -#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0 -#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT) -#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPRCH_MAX_INDEX 383 -#define I40E_GLV_BPRCH_BPRCH_SHIFT 0 -#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT) -#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPRCL_MAX_INDEX 383 -#define I40E_GLV_BPRCL_BPRCL_SHIFT 0 -#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT) -#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPTCH_MAX_INDEX 383 -#define I40E_GLV_BPTCH_BPTCH_SHIFT 0 -#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT) -#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPTCL_MAX_INDEX 383 -#define I40E_GLV_BPTCL_BPTCL_SHIFT 0 -#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT) -#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GORCH_MAX_INDEX 383 -#define I40E_GLV_GORCH_GORCH_SHIFT 0 -#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT) -#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GORCL_MAX_INDEX 383 -#define I40E_GLV_GORCL_GORCL_SHIFT 0 -#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT) -#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GOTCH_MAX_INDEX 383 -#define I40E_GLV_GOTCH_GOTCH_SHIFT 0 -#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT) -#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GOTCL_MAX_INDEX 383 -#define I40E_GLV_GOTCL_GOTCL_SHIFT 0 -#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT) -#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPRCH_MAX_INDEX 383 -#define I40E_GLV_MPRCH_MPRCH_SHIFT 0 -#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT) -#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPRCL_MAX_INDEX 383 -#define I40E_GLV_MPRCL_MPRCL_SHIFT 0 -#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT) -#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPTCH_MAX_INDEX 383 -#define I40E_GLV_MPTCH_MPTCH_SHIFT 0 -#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT) -#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPTCL_MAX_INDEX 383 -#define I40E_GLV_MPTCL_MPTCL_SHIFT 0 -#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT) -#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_RDPC_MAX_INDEX 383 -#define I40E_GLV_RDPC_RDPC_SHIFT 0 -#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT) -#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_RUPP_MAX_INDEX 383 -#define I40E_GLV_RUPP_RUPP_SHIFT 0 -#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT) -#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_TEPC_MAX_INDEX 383 -#define I40E_GLV_TEPC_TEPC_SHIFT 0 -#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT) -#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPRCH_MAX_INDEX 383 -#define I40E_GLV_UPRCH_UPRCH_SHIFT 0 -#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT) -#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPRCL_MAX_INDEX 383 -#define I40E_GLV_UPRCL_UPRCL_SHIFT 0 -#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT) -#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPTCH_MAX_INDEX 383 -#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0 -#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT) -#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPTCL_MAX_INDEX 383 -#define I40E_GLV_UPTCL_UPTCL_SHIFT 0 -#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT) -#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RBCH_MAX_INDEX 7 -#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0 -#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT) -#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RBCL_MAX_INDEX 7 -#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0 -#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT) -#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RPCH_MAX_INDEX 7 -#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0 -#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT) -#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RPCL_MAX_INDEX 7 -#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0 -#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT) -#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TBCH_MAX_INDEX 7 -#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0 -#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT) -#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TBCL_MAX_INDEX 7 -#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0 -#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT) -#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TPCH_MAX_INDEX 7 -#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0 -#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT) -#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TPCL_MAX_INDEX 7 -#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0 -#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT) -#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_BPCH_MAX_INDEX 127 -#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0 -#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT) -#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_BPCL_MAX_INDEX 127 -#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0 -#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT) -#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GORCH_MAX_INDEX 127 -#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0 -#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT) -#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GORCL_MAX_INDEX 127 -#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0 -#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT) -#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127 -#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0 -#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT) -#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127 -#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0 -#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT) -#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_MPCH_MAX_INDEX 127 -#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0 -#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT) -#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_MPCL_MAX_INDEX 127 -#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0 -#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT) -#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_UPCH_MAX_INDEX 127 -#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0 -#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT) -#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_UPCL_MAX_INDEX 127 -#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0 -#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT) -#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */ -#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0 -#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT) -#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */ -#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35 -#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0 -#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT) -#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1 -#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0 -#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT) -#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */ -#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0 -#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT) -#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31 -#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT) -#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1 -#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0 -#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT) -#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1 -#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT) -#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3 -#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT) -#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8 -#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT) -#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16 -#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT) -#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1 -#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0 -#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT) -#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1 -#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT) -#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_CLKO_MAX_INDEX 1 -#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0 -#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT) -#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */ -#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0 -#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT) -#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1 -#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT) -#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2 -#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT) -#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3 -#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT) -#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8 -#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT) -#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12 -#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT) -#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31 -#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT) -#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */ -#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0 -#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT) -#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8 -#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT) -#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16 -#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT) -#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20 -#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT) -#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24 -#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) -#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26 -#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT) -#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31 -#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT) -#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1 -#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0 -#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT) -#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1 -#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0 -#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT) -#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */ -#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0 -#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT) -#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */ -#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0 -#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT) -#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3 -#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0 -#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT) -#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3 -#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0 -#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT) -#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */ -#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0 -#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT) -#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1 -#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT) -#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2 -#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT) -#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3 -#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT) -#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4 -#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT) -#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */ -#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0 -#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT) -#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1 -#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT) -#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2 -#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT) -#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3 -#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT) -#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1 -#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0 -#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT) -#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1 -#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0 -#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT) -#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0 -#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT) -#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0 -#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT) -#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0 -#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT) -#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 -#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) -#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ -#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 -#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) -#define I40E_GL_MDET_RX_EVENT_SHIFT 8 -#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT) -#define I40E_GL_MDET_RX_QUEUE_SHIFT 17 -#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT) -#define I40E_GL_MDET_RX_VALID_SHIFT 31 -#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT) -#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */ -#define I40E_GL_MDET_TX_QUEUE_SHIFT 0 -#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT) -#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12 -#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT) -#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21 -#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT) -#define I40E_GL_MDET_TX_EVENT_SHIFT 25 -#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT) -#define I40E_GL_MDET_TX_VALID_SHIFT 31 -#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT) -#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */ -#define I40E_PF_MDET_RX_VALID_SHIFT 0 -#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT) -#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */ -#define I40E_PF_MDET_TX_VALID_SHIFT 0 -#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT) -#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */ -#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0 -#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT) -#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8 -#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT) -#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 -#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT) -#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VP_MDET_RX_MAX_INDEX 127 -#define I40E_VP_MDET_RX_VALID_SHIFT 0 -#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT) -#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VP_MDET_TX_MAX_INDEX 127 -#define I40E_VP_MDET_TX_VALID_SHIFT 0 -#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT) -#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */ -#define I40E_GLPM_WUMC_NOTCO_SHIFT 0 -#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT) -#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1 -#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT) -#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2 -#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT) -#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3 -#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT) -#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16 -#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT) -#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */ -#define I40E_PFPM_APM_APME_SHIFT 0 -#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT) -#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7 -#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0 -#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT) -#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */ -#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5 -#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT) -#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */ -#define I40E_PFPM_WUFC_LNKC_SHIFT 0 -#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT) -#define I40E_PFPM_WUFC_MAG_SHIFT 1 -#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT) -#define I40E_PFPM_WUFC_MNG_SHIFT 3 -#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT) -#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4 -#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5 -#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6 -#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7 -#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8 -#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9 -#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10 -#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11 -#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX0_SHIFT 16 -#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT) -#define I40E_PFPM_WUFC_FLX1_SHIFT 17 -#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT) -#define I40E_PFPM_WUFC_FLX2_SHIFT 18 -#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT) -#define I40E_PFPM_WUFC_FLX3_SHIFT 19 -#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT) -#define I40E_PFPM_WUFC_FLX4_SHIFT 20 -#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT) -#define I40E_PFPM_WUFC_FLX5_SHIFT 21 -#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT) -#define I40E_PFPM_WUFC_FLX6_SHIFT 22 -#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT) -#define I40E_PFPM_WUFC_FLX7_SHIFT 23 -#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT) -#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31 -#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT) -#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */ -#define I40E_PFPM_WUS_LNKC_SHIFT 0 -#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT) -#define I40E_PFPM_WUS_MAG_SHIFT 1 -#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT) -#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2 -#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT) -#define I40E_PFPM_WUS_MNG_SHIFT 3 -#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT) -#define I40E_PFPM_WUS_FLX0_SHIFT 16 -#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT) -#define I40E_PFPM_WUS_FLX1_SHIFT 17 -#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT) -#define I40E_PFPM_WUS_FLX2_SHIFT 18 -#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT) -#define I40E_PFPM_WUS_FLX3_SHIFT 19 -#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT) -#define I40E_PFPM_WUS_FLX4_SHIFT 20 -#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT) -#define I40E_PFPM_WUS_FLX5_SHIFT 21 -#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT) -#define I40E_PFPM_WUS_FLX6_SHIFT 22 -#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT) -#define I40E_PFPM_WUS_FLX7_SHIFT 23 -#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT) -#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31 -#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT) -#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */ -#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0 -#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT) -#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1 -#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT) -#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ -#define I40E_PRTPM_SAH_MAX_INDEX 3 -#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0 -#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT) -#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26 -#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT) -#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30 -#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT) -#define I40E_PRTPM_SAH_AV_SHIFT 31 -#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT) -#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ -#define I40E_PRTPM_SAL_MAX_INDEX 3 -#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0 -#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT) #define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ #define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 #define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT) @@ -3366,4 +273,64 @@ #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) #define I40E_VFQF_HREGION_REGION_7_SHIFT 29 #define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) -#endif +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) +#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ +#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) +#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ +#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) +#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ +#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) +#endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 395f32f22..7e91d825c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -366,15 +366,32 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) **/ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { - u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ - I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | - I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; - /* allow 00 to be written to the index */ - - wr32(&vsi->back->hw, - I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1), - val); + u16 flags = q_vector->tx.ring[0].flags; + + if (flags & I40E_TXR_FLAGS_WB_ON_ITR) { + u32 val; + + if (q_vector->arm_wb_state) + return; + + val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK; + + wr32(&vsi->back->hw, + I40E_VFINT_DYN_CTLN1(q_vector->v_idx + + vsi->base_vector - 1), + val); + q_vector->arm_wb_state = true; + } else { + u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ + I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK; + /* allow 00 to be written to the index */ + + wr32(&vsi->back->hw, + I40E_VFINT_DYN_CTLN1(q_vector->v_idx + + vsi->base_vector - 1), val); + } } /** @@ -404,7 +421,7 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) * 20-1249MB/s bulk (8000 ints/s) */ bytes_per_int = rc->total_bytes / rc->itr; - switch (rc->itr) { + switch (new_latency_range) { case I40E_LOWEST_LATENCY: if (bytes_per_int > 10) new_latency_range = I40E_LOW_LATENCY; @@ -417,9 +434,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) break; case I40E_BULK_LATENCY: if (bytes_per_int <= 20) - rc->latency_range = I40E_LOW_LATENCY; + new_latency_range = I40E_LOW_LATENCY; + break; + default: + if (bytes_per_int <= 20) + new_latency_range = I40E_LOW_LATENCY; break; } + rc->latency_range = new_latency_range; switch (new_latency_range) { case I40E_LOWEST_LATENCY: @@ -435,42 +457,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) break; } - if (new_itr != rc->itr) { - /* do an exponential smoothing */ - new_itr = (10 * new_itr * rc->itr) / - ((9 * new_itr) + rc->itr); - rc->itr = new_itr & I40E_MAX_ITR; - } + if (new_itr != rc->itr) + rc->itr = new_itr; rc->total_bytes = 0; rc->total_packets = 0; } -/** - * i40e_update_dynamic_itr - Adjust ITR based on bytes per int - * @q_vector: the vector to adjust - **/ -static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector) -{ - u16 vector = q_vector->vsi->base_vector + q_vector->v_idx; - struct i40e_hw *hw = &q_vector->vsi->back->hw; - u32 reg_addr; - u16 old_itr; - - reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1); - old_itr = q_vector->rx.itr; - i40e_set_new_dynamic_itr(&q_vector->rx); - if (old_itr != q_vector->rx.itr) - wr32(hw, reg_addr, q_vector->rx.itr); - - reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1); - old_itr = q_vector->tx.itr; - i40e_set_new_dynamic_itr(&q_vector->tx); - if (old_itr != q_vector->tx.itr) - wr32(hw, reg_addr, q_vector->tx.itr); -} - -/** +/* * i40evf_setup_tx_descriptors - Allocate the Tx descriptors * @tx_ring: the tx ring to set up * @@ -873,7 +867,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, return; /* did the hardware decode the packet and checksum? */ - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) return; /* both known and outer_ip must be set for the below code to work */ @@ -888,25 +882,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ipv6 = true; if (ipv4 && - (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | - (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))) + (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | + BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) goto checksum_fail; /* likely incorrect csum if alternate IP extension headers found */ if (ipv6 && - rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) + rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) /* don't increment checksum err here, non-fatal err */ return; /* there was some L4 error, count error and punt packet to the stack */ - if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)) + if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) goto checksum_fail; /* handle packets that were not able to be checksummed due * to arrival speed, in this case the stack can compute * the csum. */ - if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT)) + if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; /* If VXLAN traffic has an outer UDPv4 checksum we need to check @@ -1027,7 +1021,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) break; /* This memory barrier is needed to keep us from reading @@ -1063,8 +1057,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; - rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT); - rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; @@ -1116,7 +1110,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) I40E_RX_INCREMENT(rx_ring, i); if (unlikely( - !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { struct i40e_rx_buffer *next_buffer; next_buffer = &rx_ring->rx_bi[i]; @@ -1126,7 +1120,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) } /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); continue; } @@ -1141,7 +1135,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); - vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; #ifdef I40E_FCOE @@ -1202,7 +1196,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) break; /* This memory barrier is needed to keep us from reading @@ -1220,7 +1214,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; - rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; @@ -1238,13 +1232,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) I40E_RX_INCREMENT(rx_ring, i); if (unlikely( - !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { rx_ring->rx_stats.non_eop_descs++; continue; } /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); /* TODO: shouldn't we increment a counter indicating the * drop? @@ -1262,7 +1256,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); - vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; i40e_receive_skb(rx_ring, skb, vlan_tag); @@ -1280,6 +1274,67 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) return total_rx_packets; } +/** + * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt + * @vsi: the VSI we care about + * @q_vector: q_vector for which itr is being updated and interrupt enabled + * + **/ +static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, + struct i40e_q_vector *q_vector) +{ + struct i40e_hw *hw = &vsi->back->hw; + u16 old_itr; + int vector; + u32 val; + + vector = (q_vector->v_idx + vsi->base_vector); + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) { + old_itr = q_vector->rx.itr; + i40e_set_new_dynamic_itr(&q_vector->rx); + if (old_itr != q_vector->rx.itr) { + val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | + (I40E_RX_ITR << + I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | + (q_vector->rx.itr << + I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); + } else { + val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | + (I40E_ITR_NONE << + I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT); + } + if (!test_bit(__I40E_DOWN, &vsi->state)) + wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val); + } else { + i40evf_irq_enable_queues(vsi->back, 1 + << q_vector->v_idx); + } + if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { + old_itr = q_vector->tx.itr; + i40e_set_new_dynamic_itr(&q_vector->tx); + if (old_itr != q_vector->tx.itr) { + val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | + (I40E_TX_ITR << + I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | + (q_vector->tx.itr << + I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); + + } else { + val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | + (I40E_ITR_NONE << + I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT); + } + if (!test_bit(__I40E_DOWN, &vsi->state)) + wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val); + } else { + i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx)); + } +} + /** * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine * @napi: napi struct with our devices info in it @@ -1334,15 +1389,12 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) return budget; } + if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) + q_vector->arm_wb_state = false; + /* Work is done so exit the polling mode and re-enable the interrupt */ napi_complete(napi); - if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) || - ITR_IS_DYNAMIC(vsi->tx_itr_setting)) - i40e_update_dynamic_itr(q_vector); - - if (!test_bit(__I40E_DOWN, &vsi->state)) - i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx); - + i40e_update_enable_itr(vsi, q_vector); return 0; } @@ -1476,11 +1528,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; + struct udphdr *oudph; + struct iphdr *oiph; u32 l4_tunnel = 0; if (skb->encapsulation) { switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: + oudph = udp_hdr(skb); + oiph = ip_hdr(skb); l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; @@ -1519,6 +1575,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, } + if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && + (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) && + (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) { + oudph->check = ~csum_tcpudp_magic(oiph->saddr, + oiph->daddr, + (skb->len - skb_transport_offset(skb)), + IPPROTO_UDP, 0); + *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; + } } else { network_hdr_len = skb_network_header_len(skb); this_ip_hdr = ip_hdr(skb); @@ -1841,6 +1906,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index))) writel(i, tx_ring->tail); + else + prefetchw(tx_desc + 1); return; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index e7a34f899..9a30f5d8c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -66,17 +66,29 @@ enum i40e_dyn_idx_t { /* Supported RSS offloads */ #define I40E_DEFAULT_RSS_HENA ( \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \ - ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD)) + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ + BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) + +#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ + BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) + +#define i40e_pf_get_default_rss_hena(pf) \ + (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ + I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) /* Supported Rx Buffer Sizes */ #define I40E_RXBUFFER_512 512 /* Used for packet split */ @@ -129,16 +141,16 @@ enum i40e_dyn_idx_t { #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 -#define I40E_TX_FLAGS_CSUM (u32)(1) -#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) -#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2) -#define I40E_TX_FLAGS_TSO (u32)(1 << 3) -#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4) -#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) -#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) -#define I40E_TX_FLAGS_FSO (u32)(1 << 7) -#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) -#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10) +#define I40E_TX_FLAGS_CSUM BIT(0) +#define I40E_TX_FLAGS_HW_VLAN BIT(1) +#define I40E_TX_FLAGS_SW_VLAN BIT(2) +#define I40E_TX_FLAGS_TSO BIT(3) +#define I40E_TX_FLAGS_IPV4 BIT(4) +#define I40E_TX_FLAGS_IPV6 BIT(5) +#define I40E_TX_FLAGS_FCCRC BIT(6) +#define I40E_TX_FLAGS_FSO BIT(7) +#define I40E_TX_FLAGS_FD_SB BIT(9) +#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -250,6 +262,10 @@ struct i40e_ring { bool ring_active; /* is ring online or not */ bool arm_wb; /* do something to arm write back */ + u16 flags; +#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) +#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1) + /* stats structs */ struct i40e_queue_stats stats; struct u64_stats_sync syncp; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index c463ec415..24a269386 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -47,6 +47,11 @@ #define I40E_DEV_ID_20G_KR2 0x1587 #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 +#define I40E_DEV_ID_SFP_X722 0x37D0 +#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 +#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_X722_VF 0x37CD +#define I40E_DEV_ID_X722_VF_HV 0x37D9 #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ (d) == I40E_DEV_ID_QSFP_B || \ @@ -120,6 +125,8 @@ enum i40e_mac_type { I40E_MAC_X710, I40E_MAC_XL710, I40E_MAC_VF, + I40E_MAC_X722, + I40E_MAC_X722_VF, I40E_MAC_GENERIC, }; @@ -213,7 +220,17 @@ struct i40e_hw_capabilities { bool dcb; bool fcoe; bool iscsi; /* Indicates iSCSI enabled */ - bool mfp_mode_1; + bool flex10_enable; + bool flex10_capable; + u32 flex10_mode; +#define I40E_FLEX10_MODE_UNKNOWN 0x0 +#define I40E_FLEX10_MODE_DCC 0x1 +#define I40E_FLEX10_MODE_DCI 0x2 + + u32 flex10_status; +#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 +#define I40E_FLEX10_STATUS_VC_MODE 0x2 + bool mgmt_cem; bool ieee_1588; bool iwarp; @@ -417,6 +434,7 @@ struct i40e_ieee_app_priority_table { struct i40e_dcbx_config { u32 numapps; + u32 tlv_status; /* CEE mode TLV status */ struct i40e_ieee_ets_config etscfg; struct i40e_ieee_ets_recommend etsrec; struct i40e_ieee_pfc_config pfc; @@ -481,11 +499,13 @@ struct i40e_hw { /* debug mask */ u32 debug_mask; + char err_str[16]; }; static inline bool i40e_is_vf(struct i40e_hw *hw) { - return hw->mac.type == I40E_MAC_VF; + return (hw->mac.type == I40E_MAC_VF || + hw->mac.type == I40E_MAC_X722_VF); } struct i40e_driver_version { @@ -582,19 +602,23 @@ enum i40e_rx_desc_status_bits { I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, - I40E_RX_DESC_STATUS_PIF_SHIFT = 8, + /* Note: Bit 8 is reserved in X710 and XL710 */ + I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ I40E_RX_DESC_STATUS_FLM_SHIFT = 11, I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ - I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18, + /* Note: For non-tunnel packets INT_UDP_0 is the right status for + * UDP header + */ + I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ }; #define I40E_RXD_QW1_STATUS_SHIFT 0 -#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \ +#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \ << I40E_RXD_QW1_STATUS_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT @@ -602,8 +626,8 @@ enum i40e_rx_desc_status_bits { I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT -#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \ - I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) +#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \ + BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) enum i40e_rx_desc_fltstat_values { I40E_RX_DESC_FLTSTAT_NO_DATA = 0, @@ -737,8 +761,7 @@ enum i40e_rx_ptype_payload_layer { I40E_RXD_QW1_LENGTH_HBUF_SHIFT) #define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 -#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \ - I40E_RXD_QW1_LENGTH_SPH_SHIFT) +#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) enum i40e_rx_desc_ext_status_bits { /* Note: These are predefined bit offsets */ @@ -914,12 +937,12 @@ enum i40e_tx_ctx_desc_eipt_offload { #define I40E_TXD_CTX_QW0_NATT_SHIFT 9 #define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) -#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) +#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 -#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \ - I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) +#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \ + BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK @@ -931,6 +954,8 @@ enum i40e_tx_ctx_desc_eipt_offload { #define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ I40E_TXD_CTX_QW0_DECTTL_SHIFT) +#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 +#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) struct i40e_filter_program_desc { __le32 qindex_flex_ptype_vsi; __le32 rsvd; @@ -949,15 +974,24 @@ struct i40e_filter_program_desc { /* Packet Classifier Types for filters */ enum i40e_filter_pctype { - /* Note: Values 0-30 are reserved for future use */ + /* Note: Values 0-28 are reserved for future use. + * Value 29, 30, 32 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, - /* Note: Value 32 is reserved for future use */ + I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, - /* Note: Values 37-40 are reserved for future use */ + /* Note: Values 37-38 are reserved for future use. + * Value 39, 40, 42 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, @@ -984,8 +1018,8 @@ enum i40e_filter_program_desc_fd_status { }; #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 -#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ - I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) +#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \ + BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 #define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ @@ -1003,8 +1037,7 @@ enum i40e_filter_program_desc_pcmd { #define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \ - I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) +#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ I40E_TXD_FLTR_QW1_CMD_SHIFT) @@ -1063,6 +1096,14 @@ struct i40e_eth_stats { u64 tx_errors; /* tepc */ }; +/* Statistics collected per VEB per TC */ +struct i40e_veb_tc_stats { + u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; +}; + /* Statistics collected by the MAC */ struct i40e_hw_port_stats { /* eth stats collected by the port */ @@ -1109,6 +1150,8 @@ struct i40e_hw_port_stats { u64 fd_atr_match; u64 fd_sb_match; u64 fd_atr_tunnel_match; + u32 fd_atr_status; + u32 fd_sb_status; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index 59f62f0e6..e6db20e8a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -110,7 +110,9 @@ struct i40e_virtchnl_msg { * error regardless of version mismatch. */ #define I40E_VIRTCHNL_VERSION_MAJOR 1 -#define I40E_VIRTCHNL_VERSION_MINOR 0 +#define I40E_VIRTCHNL_VERSION_MINOR 1 +#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 + struct i40e_virtchnl_version_info { u32 major; u32 minor; @@ -129,7 +131,8 @@ struct i40e_virtchnl_version_info { */ /* I40E_VIRTCHNL_OP_GET_VF_RESOURCES - * VF sends this request to PF with no parameters + * Version 1.0 VF sends this request to PF with no parameters + * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities * PF responds with an indirect message containing * i40e_virtchnl_vf_resource and one or more * i40e_virtchnl_vsi_resource structures. @@ -143,9 +146,13 @@ struct i40e_virtchnl_vsi_resource { u8 default_mac_addr[ETH_ALEN]; }; /* VF offload flags */ -#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 -#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 -#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 +#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 struct i40e_virtchnl_vf_resource { u16 num_vsis; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index fea3b75a9..3817cbbf4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -101,6 +101,8 @@ struct i40e_vsi { #define MAX_RX_QUEUES 8 #define MAX_TX_QUEUES MAX_RX_QUEUES +#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4) + /* MAX_MSIX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector. */ @@ -115,6 +117,7 @@ struct i40e_q_vector { u8 num_ringpairs; /* total number of ring pairs in vector */ int v_idx; /* vector index in list */ char name[IFNAMSIZ + 9]; + bool arm_wb_state; cpumask_var_t affinity_mask; }; @@ -207,33 +210,39 @@ struct i40evf_adapter { struct msix_entry *msix_entries; u32 flags; -#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1) -#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1) -#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2) -#define I40EVF_FLAG_RX_PS_ENABLED (u32)(1 << 3) -#define I40EVF_FLAG_IN_NETPOLL (u32)(1 << 4) -#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5) -#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6) -#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7) -#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8) -#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9) -#define I40EVF_FLAG_RESET_NEEDED (u32)(1 << 10) -/* duplcates for common code */ +#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) +#define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1) +#define I40EVF_FLAG_RX_PS_CAPABLE BIT(2) +#define I40EVF_FLAG_RX_PS_ENABLED BIT(3) +#define I40EVF_FLAG_IN_NETPOLL BIT(4) +#define I40EVF_FLAG_IMIR_ENABLED BIT(5) +#define I40EVF_FLAG_MQ_CAPABLE BIT(6) +#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7) +#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8) +#define I40EVF_FLAG_RESET_PENDING BIT(9) +#define I40EVF_FLAG_RESET_NEEDED BIT(10) +#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11) +#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12) +/* duplicates for common code */ #define I40E_FLAG_FDIR_ATR_ENABLED 0 #define I40E_FLAG_DCB_ENABLED 0 #define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED +#define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE +#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE /* flags for admin queue service task */ u32 aq_required; -#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1) -#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1) -#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2) -#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3) -#define I40EVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4) -#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5) -#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6) -#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7) -#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8) +#define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0) +#define I40EVF_FLAG_AQ_DISABLE_QUEUES BIT(1) +#define I40EVF_FLAG_AQ_ADD_MAC_FILTER BIT(2) +#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3) +#define I40EVF_FLAG_AQ_DEL_MAC_FILTER BIT(4) +#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5) +#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) +#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7) +#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8) +#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) +#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10) /* OS defined structs */ struct net_device *netdev; @@ -249,8 +258,17 @@ struct i40evf_adapter { bool netdev_registered; bool link_up; enum i40e_virtchnl_ops current_op; +#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \ + I40E_VIRTCHNL_VF_OFFLOAD_IWARP) +#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \ + I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ) +#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \ + I40E_VIRTCHNL_VF_OFFLOAD_VLAN) struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */ struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ + struct i40e_virtchnl_version_info pf_version; +#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \ + ((_a)->pf_version.minor == 1)) u16 msg_enable; struct i40e_eth_stats current_stats; struct i40e_vsi vsi; @@ -264,6 +282,7 @@ extern const char i40evf_driver_version[]; int i40evf_up(struct i40evf_adapter *adapter); void i40evf_down(struct i40evf_adapter *adapter); +int i40evf_process_config(struct i40evf_adapter *adapter); void i40evf_reset(struct i40evf_adapter *adapter); void i40evf_set_ethtool_ops(struct net_device *netdev); void i40evf_update_stats(struct i40evf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 2b53c870e..4790437a5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -381,11 +381,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter, switch (cmd->flow_type) { case TCP_V4_FLOW: - if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) + if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V4_FLOW: - if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) + if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; @@ -397,11 +397,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter, break; case TCP_V6_FLOW: - if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) + if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V6_FLOW: - if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) + if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; @@ -479,10 +479,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, case TCP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); break; default: return -EINVAL; @@ -491,10 +491,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, case TCP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); break; default: return -EINVAL; @@ -503,12 +503,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, case UDP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; default: return -EINVAL; @@ -517,12 +517,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, case UDP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; default: return -EINVAL; @@ -535,7 +535,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); break; case AH_ESP_V6_FLOW: case AH_V6_FLOW: @@ -544,15 +544,15 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); break; case IPV4_FLOW: - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; case IPV6_FLOW: - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; default: return -EINVAL; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 4ab4ebba0..e85849b9f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -34,10 +34,10 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.2.25" +#define DRV_VERSION "1.3.5" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = - "Copyright (c) 2013 - 2014 Intel Corporation."; + "Copyright (c) 2013 - 2015 Intel Corporation."; /* i40evf_pci_tbl - PCI Device ID Table * @@ -49,6 +49,7 @@ static const char i40evf_copyright[] = */ static const struct pci_device_id i40evf_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0}, /* required last entry */ {0, } }; @@ -203,7 +204,7 @@ static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter) wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); - wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK); + wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK); /* read flush */ rd32(hw, I40E_VFGEN_RSTAT); @@ -240,11 +241,11 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) int i; for (i = 1; i < adapter->num_msix_vectors; i++) { - if (mask & (1 << (i - 1))) { + if (mask & BIT(i - 1)) { wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), I40E_VFINT_DYN_CTLN1_INTENA_MASK | I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | - I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK); } } } @@ -262,17 +263,17 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) if (mask & 1) { dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01); - dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | + dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | - I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl); } for (i = 1; i < adapter->num_msix_vectors; i++) { - if (mask & (1 << i)) { + if (mask & BIT(i)) { dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1)); - dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | + dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | - I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl); } } @@ -312,7 +313,7 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data) val = rd32(hw, I40E_VFINT_DYN_CTL01); - val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; + val = val | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTL01, val); /* schedule work on the private workqueue */ @@ -377,7 +378,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) q_vector->tx.count++; q_vector->tx.latency_range = I40E_LOW_LATENCY; q_vector->num_ringpairs++; - q_vector->ring_mask |= (1 << t_idx); + q_vector->ring_mask |= BIT(t_idx); } /** @@ -406,7 +407,7 @@ static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) /* The ideal configuration... * We have enough vectors to map one per queue. */ - if (q_vectors == (rxr_remaining * 2)) { + if (q_vectors >= (rxr_remaining * 2)) { for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx); @@ -892,8 +893,10 @@ static void i40evf_set_rx_mode(struct net_device *netdev) break; } } + if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) + found = true; } - if (found) { + if (!found) { f->remove = true; adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; } @@ -1169,6 +1172,113 @@ out: return err; } +/** + * i40e_configure_rss_aq - Prepare for RSS using AQ commands + * @vsi: vsi structure + * @seed: RSS hash seed + **/ +static void i40evf_configure_rss_aq(struct i40e_vsi *vsi, const u8 *seed) +{ + struct i40e_aqc_get_set_rss_key_data rss_key; + struct i40evf_adapter *adapter = vsi->back; + struct i40e_hw *hw = &adapter->hw; + int ret = 0, i; + u8 *rss_lut; + + if (!vsi->id) + return; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot confiure RSS, command %d pending\n", + adapter->current_op); + return; + } + + memset(&rss_key, 0, sizeof(rss_key)); + memcpy(&rss_key, seed, sizeof(rss_key)); + + rss_lut = kzalloc(((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4), GFP_KERNEL); + if (!rss_lut) + return; + + /* Populate the LUT with max no. PF queues in round robin fashion */ + for (i = 0; i <= (I40E_VFQF_HLUT_MAX_INDEX * 4); i++) + rss_lut[i] = i % adapter->num_active_queues; + + ret = i40evf_aq_set_rss_key(hw, vsi->id, &rss_key); + if (ret) { + dev_err(&adapter->pdev->dev, + "Cannot set RSS key, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return; + } + + ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, rss_lut, + (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4); + if (ret) + dev_err(&adapter->pdev->dev, + "Cannot set RSS lut, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); +} + +/** + * i40e_configure_rss_reg - Prepare for RSS if used + * @adapter: board private structure + * @seed: RSS hash seed + **/ +static void i40evf_configure_rss_reg(struct i40evf_adapter *adapter, + const u8 *seed) +{ + struct i40e_hw *hw = &adapter->hw; + u32 *seed_dw = (u32 *)seed; + u32 cqueue = 0; + u32 lut = 0; + int i, j; + + /* Fill out hash function seed */ + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]); + + /* Populate the LUT with max no. PF queues in round robin fashion */ + for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { + lut = 0; + for (j = 0; j < 4; j++) { + if (cqueue == adapter->num_active_queues) + cqueue = 0; + lut |= ((cqueue) << (8 * j)); + cqueue++; + } + wr32(hw, I40E_VFQF_HLUT(i), lut); + } + i40e_flush(hw); +} + +/** + * i40evf_configure_rss - Prepare for RSS + * @adapter: board private structure + **/ +static void i40evf_configure_rss(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + u8 seed[I40EVF_HKEY_ARRAY_SIZE]; + u64 hena; + + netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE); + + /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ + hena = I40E_DEFAULT_RSS_HENA; + wr32(hw, I40E_VFQF_HENA(0), (u32)hena); + wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); + + if (RSS_AQ(adapter)) + i40evf_configure_rss_aq(&adapter->vsi, seed); + else + i40evf_configure_rss_reg(adapter, seed); +} + /** * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors * @adapter: board private structure to initialize @@ -1369,6 +1479,10 @@ static void i40evf_watchdog_task(struct work_struct *work) } goto watchdog_done; } + if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) { + i40evf_send_vf_config_msg(adapter); + goto watchdog_done; + } if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) { i40evf_disable_queues(adapter); @@ -1410,6 +1524,16 @@ static void i40evf_watchdog_task(struct work_struct *work) goto watchdog_done; } + if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) { + /* This message goes straight to the firmware, not the + * PF, so we don't have to set current_op as we will + * not get a response through the ARQ. + */ + i40evf_configure_rss(adapter); + adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS; + goto watchdog_done; + } + if (adapter->state == __I40EVF_RUNNING) i40evf_request_stats(adapter); watchdog_done: @@ -1432,45 +1556,6 @@ restart_watchdog: schedule_work(&adapter->adminq_task); } -/** - * i40evf_configure_rss - Prepare for RSS - * @adapter: board private structure - **/ -static void i40evf_configure_rss(struct i40evf_adapter *adapter) -{ - u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1]; - struct i40e_hw *hw = &adapter->hw; - u32 cqueue = 0; - u32 lut = 0; - int i, j; - u64 hena; - - /* Hash type is configured by the PF - we just supply the key */ - netdev_rss_key_fill(rss_key, sizeof(rss_key)); - - /* Fill out hash function seed */ - for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]); - - /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ - hena = I40E_DEFAULT_RSS_HENA; - wr32(hw, I40E_VFQF_HENA(0), (u32)hena); - wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); - - /* Populate the LUT with max no. of queues in round robin fashion */ - for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - lut = 0; - for (j = 0; j < 4; j++) { - if (cqueue == adapter->num_active_queues) - cqueue = 0; - lut |= ((cqueue) << (8 * j)); - cqueue++; - } - wr32(hw, I40E_VFQF_HLUT(i), lut); - } - i40e_flush(hw); -} - #define I40EVF_RESET_WAIT_MS 10 #define I40EVF_RESET_WAIT_COUNT 500 /** @@ -1604,7 +1689,8 @@ continue_reset: dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", err); - i40evf_map_queues(adapter); + adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG; + adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; /* re-add all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { @@ -1614,7 +1700,7 @@ continue_reset: list_for_each_entry(f, &adapter->vlan_filter_list, list) { f->add = true; } - adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); i40evf_misc_irq_enable(adapter); @@ -1693,34 +1779,34 @@ static void i40evf_adminq_task(struct work_struct *work) /* check for error indications */ val = rd32(hw, hw->aq.arq.len); oldval = val; - if (val & I40E_VF_ARQLEN_ARQVFE_MASK) { + if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) { dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); - val &= ~I40E_VF_ARQLEN_ARQVFE_MASK; + val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK; } - if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) { + if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) { dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); - val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK; + val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK; } - if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) { + if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) { dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); - val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK; + val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK; } if (oldval != val) wr32(hw, hw->aq.arq.len, val); val = rd32(hw, hw->aq.asq.len); oldval = val; - if (val & I40E_VF_ATQLEN_ATQVFE_MASK) { + if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) { dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); - val &= ~I40E_VF_ATQLEN_ATQVFE_MASK; + val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK; } - if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) { + if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) { dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); - val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK; + val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK; } - if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) { + if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) { dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); - val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK; + val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK; } if (oldval != val) wr32(hw, hw->aq.asq.len, val); @@ -1856,6 +1942,7 @@ static int i40evf_open(struct net_device *netdev) if (err) goto err_req_irq; + i40evf_add_filter(adapter, adapter->hw.mac.addr); i40evf_configure(adapter); err = i40evf_up_complete(adapter); @@ -1978,6 +2065,62 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) return -EBUSY; } +/** + * i40evf_process_config - Process the config information we got from the PF + * @adapter: board private structure + * + * Verify that we have a valid config struct, and set up our netdev features + * and our VSI struct. + **/ +int i40evf_process_config(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + /* got VF config message back from PF, now we can parse it */ + for (i = 0; i < adapter->vf_res->num_vsis; i++) { + if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) + adapter->vsi_res = &adapter->vf_res->vsi_res[i]; + } + if (!adapter->vsi_res) { + dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); + return -ENODEV; + } + + if (adapter->vf_res->vf_offload_flags + & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) { + netdev->vlan_features = netdev->features; + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + } + netdev->features |= NETIF_F_HIGHDMA | + NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_SCTP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM | + NETIF_F_GRO; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features; + netdev->hw_features &= ~NETIF_F_RXCSUM; + + adapter->vsi.id = adapter->vsi_res->vsi_id; + + adapter->vsi.back = adapter; + adapter->vsi.base_vector = 1; + adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; + adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC | + ITR_REG_TO_USEC(I40E_ITR_RX_DEF)); + adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC | + ITR_REG_TO_USEC(I40E_ITR_TX_DEF)); + adapter->vsi.netdev = adapter->netdev; + return 0; +} + /** * i40evf_init_task - worker thread to perform delayed initialization * @work: pointer to work_struct containing our data @@ -1996,10 +2139,9 @@ static void i40evf_init_task(struct work_struct *work) struct i40evf_adapter, init_task.work); struct net_device *netdev = adapter->netdev; - struct i40evf_mac_filter *f; struct i40e_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - int i, err, bufsz; + int err, bufsz; switch (adapter->state) { case __I40EVF_STARTUP: @@ -2050,6 +2192,12 @@ static void i40evf_init_task(struct work_struct *work) if (err) { if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) err = i40evf_send_api_ver(adapter); + else + dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", + adapter->pf_version.major, + adapter->pf_version.minor, + I40E_VIRTCHNL_VERSION_MAJOR, + I40E_VIRTCHNL_VERSION_MINOR); goto err; } err = i40evf_send_vf_config_msg(adapter); @@ -2085,42 +2233,15 @@ static void i40evf_init_task(struct work_struct *work) default: goto err_alloc; } - /* got VF config message back from PF, now we can parse it */ - for (i = 0; i < adapter->vf_res->num_vsis; i++) { - if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) - adapter->vsi_res = &adapter->vf_res->vsi_res[i]; - } - if (!adapter->vsi_res) { - dev_err(&pdev->dev, "No LAN VSI found\n"); + if (i40evf_process_config(adapter)) goto err_alloc; - } + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; netdev->netdev_ops = &i40evf_netdev_ops; i40evf_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; - netdev->features |= NETIF_F_HIGHDMA | - NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_SCTP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_RXCSUM | - NETIF_F_GRO; - - if (adapter->vf_res->vf_offload_flags - & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) { - netdev->vlan_features = netdev->features; - netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; - } - - /* copy netdev features into list of user selectable features */ - netdev->hw_features |= netdev->features; - netdev->hw_features &= ~NETIF_F_RXCSUM; if (!is_valid_ether_addr(adapter->hw.mac.addr)) { dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", @@ -2130,16 +2251,6 @@ static void i40evf_init_task(struct work_struct *work) ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); - f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - goto err_sw_init; - - ether_addr_copy(f->macaddr, adapter->hw.mac.addr); - f->add = true; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; - - list_add(&f->list, &adapter->mac_filter_list); - init_timer(&adapter->watchdog_timer); adapter->watchdog_timer.function = &i40evf_watchdog_timer; adapter->watchdog_timer.data = (unsigned long)adapter; @@ -2154,24 +2265,14 @@ static void i40evf_init_task(struct work_struct *work) if (err) goto err_sw_init; i40evf_map_rings_to_vectors(adapter); - i40evf_configure_rss(adapter); + if (!RSS_AQ(adapter)) + i40evf_configure_rss(adapter); err = i40evf_request_misc_irq(adapter); if (err) goto err_sw_init; netif_carrier_off(netdev); - adapter->vsi.id = adapter->vsi_res->vsi_id; - adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */ - adapter->vsi.back = adapter; - adapter->vsi.base_vector = 1; - adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; - adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC | - ITR_REG_TO_USEC(I40E_ITR_RX_DEF)); - adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC | - ITR_REG_TO_USEC(I40E_ITR_TX_DEF)); - adapter->vsi.netdev = adapter->netdev; - if (!adapter->netdev_registered) { err = register_netdev(netdev); if (err) @@ -2190,6 +2291,13 @@ static void i40evf_init_task(struct work_struct *work) adapter->state = __I40EVF_DOWN; set_bit(__I40E_DOWN, &adapter->vsi.state); i40evf_misc_irq_enable(adapter); + + if (RSS_AQ(adapter)) { + adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; + mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); + } else { + i40evf_configure_rss(adapter); + } return; restart: schedule_delayed_work(&adapter->init_task, @@ -2299,7 +2407,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw = &adapter->hw; hw->back = adapter; - adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; adapter->state = __I40EVF_STARTUP; /* Call save state here because it relies on the adapter struct. */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 61e090558..d4eb1a5e7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -51,8 +51,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); if (err) - dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n", - op, err, hw->aq.asq_last_status); + dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n", + op, i40evf_stat_str(hw, err), + i40evf_aq_str(hw, hw->aq.asq_last_status)); return err; } @@ -125,8 +126,11 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) } pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf; - if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) || - (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR)) + adapter->pf_version = *pf_vvi; + + if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) || + ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) && + (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR))) err = -EIO; out_alloc: @@ -145,8 +149,24 @@ out: **/ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) { - return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, - NULL, 0); + u32 caps; + + adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; + adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; + caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | + I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | + I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | + I40E_VIRTCHNL_VF_OFFLOAD_VLAN; + adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; + adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; + if (PF_IS_V11(adapter)) + return i40evf_send_pf_msg(adapter, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + (u8 *)&caps, sizeof(caps)); + else + return i40evf_send_pf_msg(adapter, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + NULL, 0); } /** @@ -274,7 +294,7 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter) } adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; - vqs.tx_queues = (1 << adapter->num_active_queues) - 1; + vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES, @@ -299,7 +319,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter) } adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; - vqs.tx_queues = (1 << adapter->num_active_queues) - 1; + vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES, @@ -708,8 +728,9 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, return; } if (v_retval) { - dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n", - __func__, v_retval, v_opcode); + dev_err(&adapter->pdev->dev, "%s: PF returned error %d (%s) to our request %d\n", + __func__, v_retval, + i40evf_stat_str(&adapter->hw, v_retval), v_opcode); } switch (v_opcode) { case I40E_VIRTCHNL_OP_GET_STATS: { @@ -729,6 +750,15 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, adapter->current_stats = *stats; } break; + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: { + u16 len = sizeof(struct i40e_virtchnl_vf_resource) + + I40E_MAX_VF_VSI * + sizeof(struct i40e_virtchnl_vsi_resource); + memcpy(adapter->vf_res, msg, min(msglen, len)); + i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res); + i40evf_process_config(adapter); + } + break; case I40E_VIRTCHNL_OP_ENABLE_QUEUES: /* enable transmits */ i40evf_irq_enable(adapter, true); @@ -740,7 +770,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, i40evf_free_all_rx_resources(adapter); break; case I40E_VIRTCHNL_OP_VERSION: - case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: /* Don't display an error if we get these out of sequence. * If the firmware needed to get kicked, we'll get these and diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index b0182dd31..7a73510e5 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -139,10 +139,6 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) if (ret_val) return ret_val; - /* reset page to 0 */ - ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); - if (ret_val) - return ret_val; if (data & E1000_M88E1112_STATUS_LINK) port = E1000_MEDIA_PORT_OTHER; @@ -151,8 +147,20 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) if (port && (hw->dev_spec._82575.media_port != port)) { hw->dev_spec._82575.media_port = port; hw->dev_spec._82575.media_changed = true; + } + + if (port == E1000_MEDIA_PORT_COPPER) { + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + igb_check_for_link_82575(hw); } else { - ret_val = igb_check_for_link_82575(hw); + igb_check_for_link_82575(hw); + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; } return 0; @@ -223,6 +231,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) /* Verify phy id and set remaining function pointers */ switch (phy->id) { case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: case I347AT4_E_PHY_ID: case M88E1112_E_PHY_ID: case M88E1111_I_PHY_ID: @@ -235,7 +244,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) else phy->ops.get_cable_length = igb_get_cable_length_m88; phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; - /* Check if this PHY is confgured for media swap. */ + /* Check if this PHY is configured for media swap. */ if (phy->id == M88E1112_E_PHY_ID) { u16 data; @@ -258,6 +267,11 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) hw->mac.ops.check_for_link = igb_check_for_link_media_swap; } + if (phy->id == M88E1512_E_PHY_ID) { + ret_val = igb_initialize_M88E1512_phy(hw); + if (ret_val) + goto out; + } break; case IGP03E1000_E_PHY_ID: phy->type = e1000_phy_igp_3; @@ -889,6 +903,7 @@ out: **/ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) { + struct e1000_phy_info *phy = &hw->phy; s32 ret_val; /* This isn't a true "hard" reset, but is the only reset @@ -905,7 +920,11 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) goto out; ret_val = igb_phy_sw_reset(hw); + if (ret_val) + goto out; + if (phy->id == M88E1512_E_PHY_ID) + ret_val = igb_initialize_M88E1512_phy(hw); out: return ret_val; } @@ -1579,6 +1598,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) case I347AT4_E_PHY_ID: case M88E1112_E_PHY_ID: case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: case I210_I_PHY_ID: ret_val = igb_copper_link_setup_m88_gen2(hw); break; @@ -2621,7 +2641,8 @@ s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) u16 phy_data; if ((hw->phy.media_type != e1000_media_type_copper) || - (phy->id != M88E1543_E_PHY_ID)) + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) goto out; if (!hw->dev_spec._82575.eee_disable) { @@ -2701,7 +2722,8 @@ s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) /* Check if EEE is supported on this device. */ if ((hw->phy.media_type != e1000_media_type_copper) || - (phy->id != M88E1543_E_PHY_ID)) + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) goto out; ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index f8684aa28..b1915043b 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -604,6 +604,10 @@ #define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 #define E1000_M88E1112_PAGE_ADDR 0x16 #define E1000_M88E1112_STATUS 0x01 +#define E1000_M88E1512_CFG_REG_1 0x0010 +#define E1000_M88E1512_CFG_REG_2 0x0011 +#define E1000_M88E1512_CFG_REG_3 0x0007 +#define E1000_M88E1512_MODE 0x0014 /* PCI Express Control */ #define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 @@ -861,6 +865,7 @@ #define M88_VENDOR 0x0141 #define I210_I_PHY_ID 0x01410C00 #define M88E1543_E_PHY_ID 0x01410EA0 +#define M88E1512_E_PHY_ID 0x01410DD0 /* M88E1000 Specific Registers */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index c1bb64d83..23ec28f43 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1,5 +1,5 @@ /* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. + * Copyright(c) 2007-2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -36,9 +36,6 @@ static s32 igb_set_master_slave_mode(struct e1000_hw *hw); /* Cable length tables */ static const u16 e1000_m88_cable_length_table[] = { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; -#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ - (sizeof(e1000_m88_cable_length_table) / \ - sizeof(e1000_m88_cable_length_table[0])) static const u16 e1000_igp_2_cable_length_table[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, @@ -49,9 +46,6 @@ static const u16 e1000_igp_2_cable_length_table[] = { 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, 124}; -#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ - (sizeof(e1000_igp_2_cable_length_table) / \ - sizeof(e1000_igp_2_cable_length_table[0])) /** * igb_check_reset_block - Check if PHY reset is blocked @@ -1268,6 +1262,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) switch (hw->phy.id) { case I347AT4_E_PHY_ID: case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: case I210_I_PHY_ID: reset_dsp = false; break; @@ -1276,9 +1272,9 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) reset_dsp = false; break; } - if (!reset_dsp) + if (!reset_dsp) { hw_dbg("Link taking longer than expected.\n"); - else { + } else { /* We didn't get link. * Reset the DSP and cross our fingers. */ @@ -1303,6 +1299,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) if (hw->phy.type != e1000_phy_m88 || hw->phy.id == I347AT4_E_PHY_ID || hw->phy.id == M88E1112_E_PHY_ID || + hw->phy.id == M88E1543_E_PHY_ID || + hw->phy.id == M88E1512_E_PHY_ID || hw->phy.id == I210_I_PHY_ID) goto out; @@ -1700,7 +1698,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw) index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT; - if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { + if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) { ret_val = -E1000_ERR_PHY; goto out; } @@ -1743,6 +1741,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) phy->cable_length = phy_data / (is_cm ? 100 : 1); break; case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: case I347AT4_E_PHY_ID: /* Remember the original page select and set it to 7 */ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, @@ -1796,7 +1795,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT; - if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { + if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) { ret_val = -E1000_ERR_PHY; goto out; } @@ -1840,7 +1839,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) s32 ret_val = 0; u16 phy_data, i, agc_value = 0; u16 cur_agc_index, max_agc_index = 0; - u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + u16 min_agc_index = ARRAY_SIZE(e1000_igp_2_cable_length_table) - 1; static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { IGP02E1000_PHY_AGC_A, IGP02E1000_PHY_AGC_B, @@ -1863,7 +1862,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) IGP02E1000_AGC_LENGTH_MASK; /* Array index bound check. */ - if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + if ((cur_agc_index >= ARRAY_SIZE(e1000_igp_2_cable_length_table)) || (cur_agc_index == 0)) { ret_val = -E1000_ERR_PHY; goto out; @@ -2194,6 +2193,90 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw) return 0; } +/** + * igb_initialize_M88E1512_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvel 1512 to work correctly with Avoton. + **/ +s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + + /* msec_delay(1000); */ + usleep_range(1000, 2000); +out: + return ret_val; +} + /** * igb_power_up_phy_copper - Restore copper link in case of PHY power down * @hw: pointer to the HW structure diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 7af4ffab0..24d55edbb 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -61,6 +61,7 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, void igb_power_up_phy_copper(struct e1000_hw *hw); void igb_power_down_phy_copper(struct e1000_hw *hw); s32 igb_phy_init_script_igp3(struct e1000_hw *hw); +s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw); s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 6f0490d0e..4af2870e4 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -104,6 +104,8 @@ #define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ #define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ #define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define E1000_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define E1000_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ #define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ #define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ #define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 0afc0913e..74262768b 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2159,6 +2159,27 @@ static int igb_set_coalesce(struct net_device *netdev, struct igb_adapter *adapter = netdev_priv(netdev); int i; + if (ec->rx_max_coalesced_frames || + ec->rx_coalesce_usecs_irq || + ec->rx_max_coalesced_frames_irq || + ec->tx_max_coalesced_frames || + ec->tx_coalesce_usecs_irq || + ec->stats_block_coalesce_usecs || + ec->use_adaptive_rx_coalesce || + ec->use_adaptive_tx_coalesce || + ec->pkt_rate_low || + ec->rx_coalesce_usecs_low || + ec->rx_max_coalesced_frames_low || + ec->tx_coalesce_usecs_low || + ec->tx_max_coalesced_frames_low || + ec->pkt_rate_high || + ec->rx_coalesce_usecs_high || + ec->rx_max_coalesced_frames_high || + ec->tx_coalesce_usecs_high || + ec->tx_max_coalesced_frames_high || + ec->rate_sample_interval) + return -ENOTSUPP; + if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || ((ec->rx_coalesce_usecs > 3) && (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || @@ -2396,10 +2417,6 @@ static int igb_get_ts_info(struct net_device *dev, info->rx_filters |= (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 5bc9fca67..e174fbbdb 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -57,8 +57,8 @@ #include "igb.h" #define MAJ 5 -#define MIN 2 -#define BUILD 18 +#define MIN 3 +#define BUILD 0 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" char igb_driver_name[] = "igb"; @@ -179,6 +179,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *); #ifdef CONFIG_PCI_IOV static int igb_vf_configure(struct igb_adapter *adapter, int vf); static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); +static int igb_disable_sriov(struct pci_dev *dev); +static int igb_pci_disable_sriov(struct pci_dev *dev); #endif #ifdef CONFIG_PM @@ -2649,7 +2651,11 @@ err_eeprom: if (hw->flash_address) iounmap(hw->flash_address); err_sw_init: + kfree(adapter->shadow_vfta); igb_clear_interrupt_scheme(adapter); +#ifdef CONFIG_PCI_IOV + igb_disable_sriov(pdev); +#endif pci_iounmap(pdev, hw->hw_addr); err_ioremap: free_netdev(netdev); @@ -2809,14 +2815,14 @@ static void igb_remove(struct pci_dev *pdev) */ igb_release_hw_control(adapter); - unregister_netdev(netdev); - - igb_clear_interrupt_scheme(adapter); - #ifdef CONFIG_PCI_IOV igb_disable_sriov(pdev); #endif + unregister_netdev(netdev); + + igb_clear_interrupt_scheme(adapter); + pci_iounmap(pdev, hw->hw_addr); if (hw->flash_address) iounmap(hw->flash_address); @@ -2980,6 +2986,8 @@ static int igb_sw_init(struct igb_adapter *adapter) } #endif /* CONFIG_PCI_IOV */ + igb_probe_vfs(adapter); + igb_init_queue_configuration(adapter); /* Setup and initialize a copy of the hw vlan table array */ @@ -2992,8 +3000,6 @@ static int igb_sw_init(struct igb_adapter *adapter) return -ENOMEM; } - igb_probe_vfs(adapter); - /* Explicitly disable IRQ since the NIC can be in any state. */ igb_irq_disable(adapter); @@ -6633,22 +6639,25 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, struct sk_buff *skb) { struct page *page = rx_buffer->page; + unsigned char *va = page_address(page) + rx_buffer->page_offset; unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = IGB_RX_BUFSZ; #else - unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int truesize = SKB_DATA_ALIGN(size); #endif + unsigned int pull_len; - if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; + if (unlikely(skb_is_nonlinear(skb))) + goto add_tail_frag; - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); - va += IGB_TS_HDR_LEN; - size -= IGB_TS_HDR_LEN; - } + if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { + igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); + va += IGB_TS_HDR_LEN; + size -= IGB_TS_HDR_LEN; + } + if (likely(size <= IGB_RX_HDR_LEN)) { memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); /* page is not reserved, we can reuse buffer as-is */ @@ -6660,8 +6669,21 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, return false; } + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + va += pull_len; + size -= pull_len; + +add_tail_frag: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, truesize); + (unsigned long)va & ~PAGE_MASK, size, truesize); return igb_can_reuse_rx_page(rx_buffer, page, truesize); } @@ -6802,62 +6824,6 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring, return true; } -/** - * igb_pull_tail - igb specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @rx_desc: pointer to the EOP Rx descriptor - * @skb: pointer to current skb being adjusted - * - * This function is an igb specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. - */ -static void igb_pull_tail(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - /* retrieve timestamp from buffer */ - igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); - - /* update pointers to remove timestamp header */ - skb_frag_size_sub(frag, IGB_TS_HDR_LEN); - frag->page_offset += IGB_TS_HDR_LEN; - skb->data_len -= IGB_TS_HDR_LEN; - skb->len -= IGB_TS_HDR_LEN; - - /* move va to start of packet data */ - va += IGB_TS_HDR_LEN; - } - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; -} - /** * igb_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on @@ -6885,10 +6851,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring, } } - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - igb_pull_tail(rx_ring, rx_desc, skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@ -7457,6 +7419,7 @@ static int igb_resume(struct device *dev) if (igb_init_interrupt_scheme(adapter, true)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + rtnl_unlock(); return -ENOMEM; } @@ -7550,6 +7513,7 @@ static int igb_sriov_reinit(struct pci_dev *dev) igb_init_queue_configuration(adapter); if (igb_init_interrupt_scheme(adapter, true)) { + rtnl_unlock(); dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index c3a9392cb..5982f28d5 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -405,7 +405,7 @@ static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin) wr32(E1000_CTRL_EXT, ctrl_ext); } -static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin) +static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq) { static const u32 aux0_sel_sdp[IGB_N_SDP] = { AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, @@ -424,6 +424,14 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin) TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1, TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1, }; + static const u32 ts_sdp_sel_fc0[IGB_N_SDP] = { + TS_SDP0_SEL_FC0, TS_SDP1_SEL_FC0, + TS_SDP2_SEL_FC0, TS_SDP3_SEL_FC0, + }; + static const u32 ts_sdp_sel_fc1[IGB_N_SDP] = { + TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, + TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, + }; static const u32 ts_sdp_sel_clr[IGB_N_SDP] = { TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, @@ -445,11 +453,17 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin) tssdp &= ~AUX1_TS_SDP_EN; tssdp &= ~ts_sdp_sel_clr[pin]; - if (chan == 1) - tssdp |= ts_sdp_sel_tt1[pin]; - else - tssdp |= ts_sdp_sel_tt0[pin]; - + if (freq) { + if (chan == 1) + tssdp |= ts_sdp_sel_fc1[pin]; + else + tssdp |= ts_sdp_sel_fc0[pin]; + } else { + if (chan == 1) + tssdp |= ts_sdp_sel_tt1[pin]; + else + tssdp |= ts_sdp_sel_tt0[pin]; + } tssdp |= ts_sdp_en[pin]; wr32(E1000_TSSDP, tssdp); @@ -463,10 +477,10 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); struct e1000_hw *hw = &igb->hw; - u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh; + u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; unsigned long flags; struct timespec ts; - int pin = -1; + int use_freq = 0, pin = -1; s64 ns; switch (rq->type) { @@ -511,40 +525,58 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, ts.tv_nsec = rq->perout.period.nsec; ns = timespec_to_ns(&ts); ns = ns >> 1; - if (on && ns < 500000LL) { - /* 2k interrupts per second is an awful lot. */ - return -EINVAL; + if (on && ns <= 70000000LL) { + if (ns < 8LL) + return -EINVAL; + use_freq = 1; } ts = ns_to_timespec(ns); if (rq->perout.index == 1) { - tsauxc_mask = TSAUXC_EN_TT1; - tsim_mask = TSINTR_TT1; + if (use_freq) { + tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1; + tsim_mask = 0; + } else { + tsauxc_mask = TSAUXC_EN_TT1; + tsim_mask = TSINTR_TT1; + } trgttiml = E1000_TRGTTIML1; trgttimh = E1000_TRGTTIMH1; + freqout = E1000_FREQOUT1; } else { - tsauxc_mask = TSAUXC_EN_TT0; - tsim_mask = TSINTR_TT0; + if (use_freq) { + tsauxc_mask = TSAUXC_EN_CLK0 | TSAUXC_ST0; + tsim_mask = 0; + } else { + tsauxc_mask = TSAUXC_EN_TT0; + tsim_mask = TSINTR_TT0; + } trgttiml = E1000_TRGTTIML0; trgttimh = E1000_TRGTTIMH0; + freqout = E1000_FREQOUT0; } spin_lock_irqsave(&igb->tmreg_lock, flags); tsauxc = rd32(E1000_TSAUXC); tsim = rd32(E1000_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1); + tsim &= ~TSINTR_TT1; + } else { + tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0); + tsim &= ~TSINTR_TT0; + } if (on) { int i = rq->perout.index; - - igb_pin_perout(igb, i, pin); + igb_pin_perout(igb, i, pin, use_freq); igb->perout[i].start.tv_sec = rq->perout.start.sec; igb->perout[i].start.tv_nsec = rq->perout.start.nsec; igb->perout[i].period.tv_sec = ts.tv_sec; igb->perout[i].period.tv_nsec = ts.tv_nsec; wr32(trgttimh, rq->perout.start.sec); wr32(trgttiml, rq->perout.start.nsec); + if (use_freq) + wr32(freqout, ns); tsauxc |= tsauxc_mask; tsim |= tsim_mask; - } else { - tsauxc &= ~tsauxc_mask; - tsim &= ~tsim_mask; } wr32(E1000_TSAUXC, tsauxc); wr32(E1000_TSIM, tsim); diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 95af14e13..686fa7184 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -319,6 +319,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_ps_hdr_size, DMA_FROM_DEVICE); + buffer_info->dma = 0; skb_put(skb, hlen); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index ac3ac2a20..edf1fb913 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -630,6 +630,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21) #define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22) #define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23) +#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) u32 flags2; #define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) @@ -644,6 +645,9 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) #define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) #define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11) +#ifdef CONFIG_IXGBE_VXLAN +#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) +#endif /* Tx fast path data */ int num_tx_queues; @@ -757,7 +761,9 @@ struct ixgbe_adapter { u32 timer_event_accumulator; u32 vferr_refcount; struct ixgbe_mac_addr *mac_table; +#ifdef CONFIG_IXGBE_VXLAN u16 vxlan_port; +#endif struct kobject *info_kobj; #ifdef CONFIG_IXGBE_HWMON struct hwmon_buff *ixgbe_hwmon_buff; @@ -967,4 +973,5 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring); u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); +void ixgbe_store_reta(struct ixgbe_adapter *adapter); #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 6b87d9634..dd7062fed 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -504,16 +504,12 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) **/ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) { - u32 autoc2_reg, fwsm; + u32 autoc2_reg; u16 ee_ctrl_2 = 0; hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); - /* Check to see if MNG FW could be enabled */ - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); - - if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) && - !hw->wol_enabled && + if (!ixgbe_mng_present(hw) && !hw->wol_enabled && ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; @@ -1245,6 +1241,25 @@ mac_reset_top: return status; } +/** + * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete + * @hw: pointer to hardware structure + * @fdircmd: current value of FDIRCMD register + */ +static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) +{ + int i; + + for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { + *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); + if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) + return 0; + udelay(10); + } + + return IXGBE_ERR_FDIR_CMD_INCOMPLETE; +} + /** * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. * @hw: pointer to hardware structure @@ -1253,6 +1268,8 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) { int i; u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + u32 fdircmd; + s32 err; fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; @@ -1260,15 +1277,10 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) * Before starting reinitialization process, * FDIRCMD.CMD must be zero. */ - for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { - if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & - IXGBE_FDIRCMD_CMD_MASK)) - break; - udelay(10); - } - if (i >= IXGBE_FDIRCMD_CMD_POLL) { - hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n"); - return IXGBE_ERR_FDIR_REINIT_FAILED; + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + hw_dbg(hw, "Flow Director previous command did not complete, aborting table re-initialization.\n"); + return err; } IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); @@ -1394,14 +1406,12 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) /* * Continue setup of fdirctrl register bits: * Turn perfect match filtering on - * Report hash in RSS field of Rx wb descriptor * Initialize the drop queue * Move the flexible bytes to use the ethertype - shift 6 words * Set the maximum length per hash bucket to 0xA filters * Send interrupt when 64 (0x4 * 16) filters are left */ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | - IXGBE_FDIRCTRL_REPORT_STATUS | (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | @@ -1509,20 +1519,28 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, * @input: unique input dword * @common: compressed common input dword * @queue: queue index to direct traffic to + * + * Note that the tunnel bit in input must not be set when the hardware + * tunneling support does not exist. **/ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_hash_dword input, union ixgbe_atr_hash_dword common, u8 queue) { - u64 fdirhashcmd; - u32 fdircmd; + u64 fdirhashcmd; + u8 flow_type; + bool tunnel; + u32 fdircmd; /* * Get the flow_type in order to program FDIRCMD properly * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */ - switch (input.formatted.flow_type) { + tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK); + flow_type = input.formatted.flow_type & + (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1); + switch (flow_type) { case IXGBE_ATR_FLOW_TYPE_TCPV4: case IXGBE_ATR_FLOW_TYPE_UDPV4: case IXGBE_ATR_FLOW_TYPE_SCTPV4: @@ -1538,8 +1556,10 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, /* configure FDIRCMD register */ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; - fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + if (tunnel) + fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; /* * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits @@ -1760,6 +1780,7 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, u16 soft_id, u8 queue) { u32 fdirport, fdirvlan, fdirhash, fdircmd; + s32 err; /* currently IPv6 is not supported, must be programmed with 0 */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), @@ -1808,6 +1829,11 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + hw_dbg(hw, "Flow Director command did not complete!\n"); + return err; + } return 0; } @@ -1817,9 +1843,8 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, u16 soft_id) { u32 fdirhash; - u32 fdircmd = 0; - u32 retry_count; - s32 err = 0; + u32 fdircmd; + s32 err; /* configure FDIRHASH register */ fdirhash = input->formatted.bkt_hash; @@ -1832,18 +1857,12 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, /* Query if filter is present */ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); - for (retry_count = 10; retry_count; retry_count--) { - /* allow 10us for query to process */ - udelay(10); - /* verify query completed successfully */ - fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); - if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK)) - break; + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + hw_dbg(hw, "Flow Director command did not complete!\n"); + return err; } - if (!retry_count) - err = IXGBE_ERR_FDIR_REINIT_FAILED; - /* if filter exists in hardware then remove it */ if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); @@ -1852,7 +1871,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, IXGBE_FDIRCMD_CMD_REMOVE_FLOW); } - return err; + return 0; } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 4c1c26732..3f56a8080 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3905,3 +3905,18 @@ void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) } } } + +/** ixgbe_mng_present - returns true when management capability is present + * @hw: pointer to hardware structure + **/ +bool ixgbe_mng_present(struct ixgbe_hw *hw) +{ + u32 fwsm; + + if (hw->mac.type < ixgbe_mac_82599EB) + return false; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + fwsm &= IXGBE_FWSM_MODE_MASK; + return fwsm == IXGBE_FWSM_FW_MODE_PT; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index ec015fed8..2f779f35d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -113,6 +113,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, u32 length, u32 timeout, bool return_data); void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); +bool ixgbe_mng_present(struct ixgbe_hw *hw); bool ixgbe_mng_enabled(struct ixgbe_hw *hw); void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index ec7b2324b..ab2edc8e7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -166,6 +166,8 @@ static int ixgbe_get_settings(struct net_device *netdev, /* set the supported link speeds */ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) ecmd->supported |= SUPPORTED_10000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL) + ecmd->supported |= SUPPORTED_2500baseX_Full; if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) ecmd->supported |= SUPPORTED_1000baseT_Full; if (supported_link & IXGBE_LINK_SPEED_100_FULL) @@ -177,6 +179,8 @@ static int ixgbe_get_settings(struct net_device *netdev, ecmd->advertising |= ADVERTISED_100baseT_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) + ecmd->advertising |= ADVERTISED_2500baseX_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) ecmd->advertising |= ADVERTISED_1000baseT_Full; } else { @@ -286,6 +290,9 @@ static int ixgbe_get_settings(struct net_device *netdev, case IXGBE_LINK_SPEED_10GB_FULL: ethtool_cmd_speed_set(ecmd, SPEED_10000); break; + case IXGBE_LINK_SPEED_2_5GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_2500); + break; case IXGBE_LINK_SPEED_1GB_FULL: ethtool_cmd_speed_set(ecmd, SPEED_1000); break; @@ -2868,6 +2875,14 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter) +{ + if (adapter->hw.mac.type < ixgbe_mac_X550) + return 16; + else + return 64; +} + static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -2907,6 +2922,44 @@ static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, return 0; } +static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i; + u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); + + if (hfunc) + return -EINVAL; + + /* Fill out the redirection table */ + if (indir) { + int max_queues = min_t(int, adapter->num_rx_queues, + ixgbe_rss_indir_tbl_max(adapter)); + + /*Allow at least 2 queues w/ SR-IOV.*/ + if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && + (max_queues < 2)) + max_queues = 2; + + /* Verify user input. */ + for (i = 0; i < reta_entries; i++) + if (indir[i] >= max_queues) + return -EINVAL; + + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + } + + /* Fill out the rss hash key */ + if (key) + memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev)); + + ixgbe_store_reta(adapter); + + return 0; +} + static int ixgbe_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { @@ -2938,14 +2991,6 @@ static int ixgbe_get_ts_info(struct net_device *dev, (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); break; default: @@ -3167,6 +3212,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .get_rxfh_indir_size = ixgbe_rss_indir_size, .get_rxfh_key_size = ixgbe_get_rxfh_key_size, .get_rxfh = ixgbe_get_rxfh, + .set_rxfh = ixgbe_set_rxfh, .get_channels = ixgbe_get_channels, .set_channels = ixgbe_set_channels, .get_ts_info = ixgbe_get_ts_info, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ae21e0b06..63b2cfe94 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -65,6 +65,9 @@ #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_sriov.h" +#ifdef CONFIG_IXGBE_VXLAN +#include +#endif char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = @@ -79,7 +82,7 @@ static char ixgbe_default_device_descr[] = #define DRV_VERSION "4.0.1-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = - "Copyright (c) 1999-2014 Intel Corporation."; + "Copyright (c) 1999-2015 Intel Corporation."; static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter"; @@ -243,13 +246,20 @@ static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, int expected_gts) { + struct ixgbe_hw *hw = &adapter->hw; int max_gts = 0; enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; struct pci_dev *pdev; - /* determine whether to use the the parent device + /* Some devices are not connected over PCIe and thus do not negotiate + * speed. These devices do not have valid bus info, and thus any report + * we generate may not be correct. */ + if (hw->bus.type == ixgbe_bus_type_internal) + return; + + /* determine whether to use the parent device */ if (ixgbe_pcie_from_parent(&adapter->hw)) pdev = adapter->pdev->bus->parent->self; else @@ -1360,14 +1370,31 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) } #endif /* CONFIG_IXGBE_DCA */ + +#define IXGBE_RSS_L4_TYPES_MASK \ + ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) + static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - if (ring->netdev->features & NETIF_F_RXHASH) - skb_set_hash(skb, - le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), - PKT_HASH_TYPE_L3); + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + IXGBE_RXDADV_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); } #ifdef IXGBE_FCOE @@ -1414,7 +1441,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) { encap_pkt = true; skb->encapsulation = 1; - skb->ip_summed = CHECKSUM_NONE; } /* if IP and error */ @@ -3287,7 +3313,7 @@ u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) * * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. */ -static void ixgbe_store_reta(struct ixgbe_adapter *adapter) +void ixgbe_store_reta(struct ixgbe_adapter *adapter) { u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); struct ixgbe_hw *hw = &adapter->hw; @@ -4245,6 +4271,21 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) } } +static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter) +{ + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0); +#ifdef CONFIG_IXGBE_VXLAN + adapter->vxlan_port = 0; +#endif + break; + default: + break; + } +} + #ifdef CONFIG_IXGBE_DCB /** * ixgbe_configure_dcb - Configure DCB hardware @@ -5285,6 +5326,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: #ifdef CONFIG_IXGBE_DCA adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; +#endif +#ifdef CONFIG_IXGBE_VXLAN + adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; #endif break; default: @@ -5737,10 +5781,11 @@ static int ixgbe_open(struct net_device *netdev) ixgbe_up_complete(adapter); -#if IS_ENABLED(CONFIG_IXGBE_VXLAN) + ixgbe_clear_vxlan_port(adapter); +#ifdef CONFIG_IXGBE_VXLAN vxlan_get_rx_port(netdev); - #endif + return 0; err_set_queues: @@ -5761,7 +5806,15 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) { ixgbe_ptp_suspend(adapter); - ixgbe_down(adapter); + if (adapter->hw.phy.ops.enter_lplu) { + adapter->hw.phy.reset_disable = true; + ixgbe_down(adapter); + adapter->hw.phy.ops.enter_lplu(&adapter->hw); + adapter->hw.phy.reset_disable = false; + } else { + ixgbe_down(adapter); + } + ixgbe_free_irq(adapter); ixgbe_free_all_tx_resources(adapter); @@ -6327,6 +6380,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) struct net_device *upper; struct list_head *iter; u32 link_speed = adapter->link_speed; + const char *speed_str; bool flow_rx, flow_tx; /* only continue if link was previously down */ @@ -6364,14 +6418,24 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_start_cyclecounter(adapter); - e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", - (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? - "10 Gbps" : - (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? - "1 Gbps" : - (link_speed == IXGBE_LINK_SPEED_100_FULL ? - "100 Mbps" : - "unknown speed"))), + switch (link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + speed_str = "10 Gbps"; + break; + case IXGBE_LINK_SPEED_2_5GB_FULL: + speed_str = "2.5 Gbps"; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + speed_str = "1 Gbps"; + break; + case IXGBE_LINK_SPEED_100_FULL: + speed_str = "100 Mbps"; + break; + default: + speed_str = "unknown speed"; + break; + } + e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str, ((flow_rx && flow_tx) ? "RX/TX" : (flow_rx ? "RX" : (flow_tx ? "TX" : "None")))); @@ -6800,6 +6864,12 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_service_event_complete(adapter); return; } +#ifdef CONFIG_IXGBE_VXLAN + if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) { + adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED; + vxlan_get_rx_port(adapter->netdev); + } +#endif /* CONFIG_IXGBE_VXLAN */ ixgbe_reset_subtask(adapter); ixgbe_phy_interrupt_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); @@ -6896,31 +6966,55 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && !(first->tx_flags & IXGBE_TX_FLAGS_CC)) return; + vlan_macip_lens = skb_network_offset(skb) << + IXGBE_ADVTXD_MACLEN_SHIFT; } else { u8 l4_hdr = 0; - switch (first->protocol) { - case htons(ETH_P_IP): - vlan_macip_lens |= skb_network_header_len(skb); + union { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + u8 *raw; + } network_hdr; + union { + struct tcphdr *tcphdr; + u8 *raw; + } transport_hdr; + + if (skb->encapsulation) { + network_hdr.raw = skb_inner_network_header(skb); + transport_hdr.raw = skb_inner_transport_header(skb); + vlan_macip_lens = skb_inner_network_offset(skb) << + IXGBE_ADVTXD_MACLEN_SHIFT; + } else { + network_hdr.raw = skb_network_header(skb); + transport_hdr.raw = skb_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + IXGBE_ADVTXD_MACLEN_SHIFT; + } + + /* use first 4 bits to determine IP version */ + switch (network_hdr.ipv4->version) { + case IPVERSION: + vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; - l4_hdr = ip_hdr(skb)->protocol; + l4_hdr = network_hdr.ipv4->protocol; break; - case htons(ETH_P_IPV6): - vlan_macip_lens |= skb_network_header_len(skb); - l4_hdr = ipv6_hdr(skb)->nexthdr; + case 6: + vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; + l4_hdr = network_hdr.ipv6->nexthdr; break; default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, - "partial checksum but proto=%x!\n", - first->protocol); + "partial checksum but version=%d\n", + network_hdr.ipv4->version); } - break; } switch (l4_hdr) { case IPPROTO_TCP: type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; - mss_l4len_idx = tcp_hdrlen(skb) << + mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << IXGBE_ADVTXD_L4LEN_SHIFT; break; case IPPROTO_SCTP: @@ -6946,7 +7040,6 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, } /* vlan_macip_lens: MACLEN, VLAN tag */ - vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, @@ -7201,6 +7294,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct ipv6hdr *ipv6; } hdr; struct tcphdr *th; + struct sk_buff *skb; +#ifdef CONFIG_IXGBE_VXLAN + u8 encap = false; +#endif /* CONFIG_IXGBE_VXLAN */ __be16 vlan_id; /* if ring doesn't have a interrupt vector, cannot perform ATR */ @@ -7214,16 +7311,36 @@ static void ixgbe_atr(struct ixgbe_ring *ring, ring->atr_count++; /* snag network header to get L4 type and address */ - hdr.network = skb_network_header(first->skb); + skb = first->skb; + hdr.network = skb_network_header(skb); + if (skb->encapsulation) { +#ifdef CONFIG_IXGBE_VXLAN + struct ixgbe_adapter *adapter = q_vector->adapter; - /* Currently only IPv4/IPv6 with TCP is supported */ - if ((first->protocol != htons(ETH_P_IPV6) || - hdr.ipv6->nexthdr != IPPROTO_TCP) && - (first->protocol != htons(ETH_P_IP) || - hdr.ipv4->protocol != IPPROTO_TCP)) + if (!adapter->vxlan_port) + return; + if (first->protocol != htons(ETH_P_IP) || + hdr.ipv4->version != IPVERSION || + hdr.ipv4->protocol != IPPROTO_UDP) { + return; + } + if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port) + return; + encap = true; + hdr.network = skb_inner_network_header(skb); + th = inner_tcp_hdr(skb); +#else return; - - th = tcp_hdr(first->skb); +#endif /* CONFIG_IXGBE_VXLAN */ + } else { + /* Currently only IPv4/IPv6 with TCP is supported */ + if ((first->protocol != htons(ETH_P_IPV6) || + hdr.ipv6->nexthdr != IPPROTO_TCP) && + (first->protocol != htons(ETH_P_IP) || + hdr.ipv4->protocol != IPPROTO_TCP)) + return; + th = tcp_hdr(skb); + } /* skip this packet since it is invalid or the socket is closing */ if (!th || th->fin) @@ -7272,6 +7389,11 @@ static void ixgbe_atr(struct ixgbe_ring *ring, hdr.ipv6->daddr.s6_addr32[3]; } +#ifdef CONFIG_IXGBE_VXLAN + if (encap) + input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK; +#endif /* CONFIG_IXGBE_VXLAN */ + /* This assumes the Rx queue and Tx queue are bound to the same CPU */ ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, input, common, ring->queue_index); @@ -7737,9 +7859,10 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) bool pools; /* Hardware supports up to 8 traffic classes */ - if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || - (hw->mac.type == ixgbe_mac_82598EB && - tc < MAX_TRAFFIC_CLASS)) + if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) + return -EINVAL; + + if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS) return -EINVAL; pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); @@ -7898,12 +8021,23 @@ static int ixgbe_set_features(struct net_device *netdev, need_reset = true; netdev->features = features; + +#ifdef CONFIG_IXGBE_VXLAN + if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { + if (features & NETIF_F_RXCSUM) + adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; + else + ixgbe_clear_vxlan_port(adapter); + } +#endif /* CONFIG_IXGBE_VXLAN */ + if (need_reset) ixgbe_do_reset(netdev); return 0; } +#ifdef CONFIG_IXGBE_VXLAN /** * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up * @dev: The port's netdev @@ -7917,17 +8051,18 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, struct ixgbe_hw *hw = &adapter->hw; u16 new_port = ntohs(port); + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + if (sa_family == AF_INET6) return; - if (adapter->vxlan_port == new_port) { - netdev_info(dev, "Port %d already offloaded\n", new_port); + if (adapter->vxlan_port == new_port) return; - } if (adapter->vxlan_port) { netdev_info(dev, - "Hit Max num of UDP ports, not adding port %d\n", + "Hit Max num of VXLAN ports, not adding port %d\n", new_port); return; } @@ -7946,9 +8081,11 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, __be16 port) { struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_hw *hw = &adapter->hw; u16 new_port = ntohs(port); + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + if (sa_family == AF_INET6) return; @@ -7958,9 +8095,10 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, return; } - adapter->vxlan_port = 0; - IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, 0); + ixgbe_clear_vxlan_port(adapter); + adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; } +#endif /* CONFIG_IXGBE_VXLAN */ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, @@ -8135,7 +8273,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) return ERR_PTR(-EBUSY); - fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL); + fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL); if (!fwd_adapter) return ERR_PTR(-ENOMEM); @@ -8191,6 +8329,21 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) kfree(fwd_adapter); } +#define IXGBE_MAX_TUNNEL_HDR_LEN 80 +static netdev_features_t +ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + if (!skb->encapsulation) + return features; + + if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) > + IXGBE_MAX_TUNNEL_HDR_LEN)) + return features & ~NETIF_F_ALL_CSUM; + + return features; +} + static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, @@ -8236,8 +8389,11 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, .ndo_dfwd_add_station = ixgbe_fwd_add, .ndo_dfwd_del_station = ixgbe_fwd_del, +#ifdef CONFIG_IXGBE_VXLAN .ndo_add_vxlan_port = ixgbe_add_vxlan_port, .ndo_del_vxlan_port = ixgbe_del_vxlan_port, +#endif /* CONFIG_IXGBE_VXLAN */ + .ndo_features_check = ixgbe_features_check, }; /** @@ -8597,17 +8753,24 @@ skip_sriov: netdev->vlan_features |= NETIF_F_IPV6_CSUM; netdev->vlan_features |= NETIF_F_SG; + netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM; + netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; +#ifdef CONFIG_IXGBE_VXLAN switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - netdev->hw_enc_features |= NETIF_F_RXCSUM; + netdev->hw_enc_features |= NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM; break; default: break; } +#endif /* CONFIG_IXGBE_VXLAN */ #ifdef CONFIG_IXGBE_DCB netdev->dcbnl_ops = &dcbnl_ops; @@ -8694,9 +8857,10 @@ skip_sriov: hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); /* pick up the PCI bus settings for reporting later */ - hw->mac.ops.get_bus_info(hw); if (ixgbe_pcie_from_parent(hw)) ixgbe_get_parent_bus_info(adapter); + else + hw->mac.ops.get_bus_info(hw); /* calculate the expected PCIe bandwidth required for optimal * performance. Note that some older parts will never have enough @@ -8859,12 +9023,7 @@ static void ixgbe_remove(struct pci_dev *pdev) unregister_netdev(netdev); #ifdef CONFIG_PCI_IOV - /* - * Only disable SR-IOV on unload if the user specified the now - * deprecated max_vfs module parameter. - */ - if (max_vfs) - ixgbe_disable_sriov(adapter); + ixgbe_disable_sriov(adapter); #endif ixgbe_clear_interrupt_scheme(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 526a20bf7..597d0b1c2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -243,9 +243,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) u16 ext_ability = 0; if (!hw->phy.phy_semaphore_mask) { - hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) & - IXGBE_STATUS_LAN_ID_1; - if (hw->phy.lan_id) + if (hw->bus.lan_id) hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; else hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; @@ -608,12 +606,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { s32 status; - u32 gssr; - - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - gssr = IXGBE_GSSR_PHY1_SM; - else - gssr = IXGBE_GSSR_PHY0_SM; + u32 gssr = hw->phy.phy_semaphore_mask; if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, @@ -737,39 +730,61 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, } /** - * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities + * ixgbe_get_copper_speeds_supported - Get copper link speed from phy * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @autoneg: boolean auto-negotiation value * - * Determines the link capabilities by reading the AUTOC register. + * Determines the supported link capabilities by reading the PHY auto + * negotiation register. */ -s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) +static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) { - s32 status; u16 speed_ability; - - *speed = 0; - *autoneg = true; + s32 status; status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, &speed_ability); + if (status) + return status; - if (status == 0) { - if (speed_ability & MDIO_SPEED_10G) - *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (speed_ability & MDIO_PMA_SPEED_1000) - *speed |= IXGBE_LINK_SPEED_1GB_FULL; - if (speed_ability & MDIO_PMA_SPEED_100) - *speed |= IXGBE_LINK_SPEED_100_FULL; + if (speed_ability & MDIO_SPEED_10G) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed_ability & MDIO_PMA_SPEED_1000) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL; + if (speed_ability & MDIO_PMA_SPEED_100) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; + break; + case ixgbe_mac_X550EM_x: + hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; + break; + default: + break; } - /* Internal PHY does not support 100 Mbps */ - if (hw->mac.type == ixgbe_mac_X550EM_x) - *speed &= ~IXGBE_LINK_SPEED_100_FULL; + return 0; +} + +/** + * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + */ +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + s32 status = 0; + + *autoneg = true; + if (!hw->phy.speeds_supported) + status = ixgbe_get_copper_speeds_supported(hw); + *speed = hw->phy.speeds_supported; return status; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index b6f424f3b..63689192b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -848,6 +848,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */ +#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */ @@ -856,6 +857,24 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */ + +#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */ +#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ +#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400 +#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800 +#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ +#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ +#define IXGBE_MII_RESTART 0x200 +#define IXGBE_MII_AUTONEG_COMPLETE 0x20 +#define IXGBE_MII_AUTONEG_LINK_UP 0x04 +#define IXGBE_MII_AUTONEG_REG 0x0 /* Management */ #define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ @@ -1305,6 +1324,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ #define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */ #define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */ #define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ @@ -1312,7 +1332,8 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */ #define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */ - +#define IXGBE_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG Rx LP Status Reg */ +#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT */ #define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ #define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ #define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */ @@ -2041,6 +2062,11 @@ enum { #define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ #define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define NVM_INIT_CTRL_3 0x38 +#define NVM_INIT_CTRL_3_LPLU 0x8 +#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40 +#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100 + #define IXGBE_EEPROM_PAGE_SIZE_MAX 128 #define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */ #define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */ @@ -2540,9 +2566,11 @@ enum ixgbe_fdir_pballoc_type { #define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 #define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 #define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 +#define IXGBE_FDIRCMD_RX_TUNNEL_FILTER_SHIFT 23 #define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 #define IXGBE_FDIR_INIT_DONE_POLL 10 #define IXGBE_FDIRCMD_CMD_POLL 10 +#define IXGBE_FDIRCMD_TUNNEL_FILTER 0x00800000 #define IXGBE_FDIR_DROP_QUEUE 127 @@ -2833,12 +2861,13 @@ typedef u32 ixgbe_link_speed; #define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 /* Software ATR input stream values and masks */ -#define IXGBE_ATR_HASH_MASK 0x7fff -#define IXGBE_ATR_L4TYPE_MASK 0x3 -#define IXGBE_ATR_L4TYPE_UDP 0x1 -#define IXGBE_ATR_L4TYPE_TCP 0x2 -#define IXGBE_ATR_L4TYPE_SCTP 0x3 -#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define IXGBE_ATR_HASH_MASK 0x7fff +#define IXGBE_ATR_L4TYPE_MASK 0x3 +#define IXGBE_ATR_L4TYPE_UDP 0x1 +#define IXGBE_ATR_L4TYPE_TCP 0x2 +#define IXGBE_ATR_L4TYPE_SCTP 0x3 +#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define IXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 enum ixgbe_atr_flow_type { IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, @@ -3035,9 +3064,8 @@ enum ixgbe_smart_speed { /* PCI bus types */ enum ixgbe_bus_type { ixgbe_bus_type_unknown = 0, - ixgbe_bus_type_pci, - ixgbe_bus_type_pcix, ixgbe_bus_type_pci_express, + ixgbe_bus_type_internal, ixgbe_bus_type_reserved }; @@ -3298,6 +3326,7 @@ struct ixgbe_phy_operations { s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); s32 (*check_overtemp)(struct ixgbe_hw *); s32 (*set_phy_power)(struct ixgbe_hw *, bool on); + s32 (*enter_lplu)(struct ixgbe_hw *); s32 (*handle_lasi)(struct ixgbe_hw *hw); }; @@ -3308,6 +3337,7 @@ struct ixgbe_eeprom_info { u16 word_size; u16 address_bits; u16 word_page_size; + u16 ctrl_word_3; }; #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 @@ -3351,10 +3381,10 @@ struct ixgbe_phy_info { bool sfp_setup_needed; u32 revision; enum ixgbe_media_type media_type; - u8 lan_id; u32 phy_semaphore_mask; bool reset_disable; ixgbe_autoneg_advertised autoneg_advertised; + ixgbe_link_speed speeds_supported; enum ixgbe_smart_speed smart_speed; bool smart_speed_active; bool multispeed_fiber; @@ -3460,16 +3490,21 @@ struct ixgbe_info { #define IXGBE_ERR_PBA_SECTION -31 #define IXGBE_ERR_INVALID_ARGUMENT -32 #define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 +#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF -#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010)) -#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C)) -#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634)) -#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638)) -#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00)) -#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00)) -#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520)) -#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00)) +#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) +#define IXGBE_FUSES0_300MHZ BIT(5) +#define IXGBE_FUSES0_REV1 BIT(6) + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) +#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) +#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) +#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) +#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00) +#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) +#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 032a5870a..4e758435e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -54,6 +54,11 @@ enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + + /* set_phy_power was set by default to NULL */ + if (!ixgbe_mng_present(hw)) + phy->ops.set_phy_power = ixgbe_set_copper_phy_power; mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 7581da13e..9fe9445cd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -26,6 +26,20 @@ #include "ixgbe_common.h" #include "ixgbe_phy.h" +static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + + /* Start with X540 invariants, since so simular */ + ixgbe_get_invariants_X540(hw); + + if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) + phy->ops.set_phy_power = NULL; + + return 0; +} + /** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control * @hw: pointer to hardware structure **/ @@ -597,6 +611,24 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) return status; } +/** + * ixgbe_get_bus_info_X550em - Set PCI bus info + * @hw: pointer to hardware structure + * + * Sets bus link width and speed to unknown because X550em is + * not a PCI device. + **/ +static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) +{ + hw->bus.type = ixgbe_bus_type_internal; + hw->bus.width = ixgbe_bus_width_unknown; + hw->bus.speed = ixgbe_bus_speed_unknown; + + hw->mac.ops.set_lan_id(hw); + + return 0; +} + /** ixgbe_disable_rx_x550 - Disable RX unit * * Enables the Rx DMA unit for x550 @@ -1444,6 +1476,144 @@ static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) return ixgbe_enable_lasi_ext_t_x550em(hw); } +/** ixgbe_get_lcd_x550em - Determine lowest common denominator + * @hw: pointer to hardware structure + * @lcd_speed: pointer to lowest common link speed + * + * Determine lowest common link speed with link partner. + **/ +static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed *lcd_speed) +{ + u16 an_lp_status; + s32 status; + u16 word = hw->eeprom.ctrl_word_3; + + *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; + + status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &an_lp_status); + if (status) + return status; + + /* If link partner advertised 1G, return 1G */ + if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) { + *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL; + return status; + } + + /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */ + if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) || + (word & NVM_INIT_CTRL_3_D10GMP_PORT0)) + return status; + + /* Link partner not capable of lower speeds, return 10G */ + *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL; + return status; +} + +/** ixgbe_enter_lplu_x550em - Transition to low power states + * @hw: pointer to hardware structure + * + * Configures Low Power Link Up on transition to low power states + * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting + * the X557 PHY immediately prior to entering LPLU. + **/ +static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) +{ + u16 an_10g_cntl_reg, autoneg_reg, speed; + s32 status; + ixgbe_link_speed lcd_speed; + u32 save_autoneg; + bool link_up; + + /* SW LPLU not required on later HW revisions. */ + if (IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))) + return 0; + + /* If blocked by MNG FW, then don't restart AN */ + if (ixgbe_check_reset_blocked(hw)) + return 0; + + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status) + return status; + + status = hw->eeprom.ops.read(hw, NVM_INIT_CTRL_3, + &hw->eeprom.ctrl_word_3); + if (status) + return status; + + /* If link is down, LPLU disabled in NVM, WoL disabled, or + * manageability disabled, then force link down by entering + * low power mode. + */ + if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) || + !(hw->wol_enabled || ixgbe_mng_present(hw))) + return ixgbe_set_copper_phy_power(hw, false); + + /* Determine LCD */ + status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed); + if (status) + return status; + + /* If no valid LCD link speed, then force link down and exit. */ + if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN) + return ixgbe_set_copper_phy_power(hw, false); + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &speed); + if (status) + return status; + + /* If no link now, speed is invalid so take link down */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status) + return ixgbe_set_copper_phy_power(hw, false); + + /* clear everything but the speed bits */ + speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK; + + /* If current speed is already LCD, then exit. */ + if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) && + (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) || + ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) && + (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL))) + return status; + + /* Clear AN completed indication */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + if (status) + return status; + + status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &an_10g_cntl_reg); + if (status) + return status; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + if (status) + return status; + + save_autoneg = hw->phy.autoneg_advertised; + + /* Setup link at least common link speed */ + status = hw->mac.ops.setup_link(hw, lcd_speed, false); + + /* restore autoneg from before setting lplu speed */ + hw->phy.autoneg_advertised = save_autoneg; + + return status; +} + /** ixgbe_init_phy_ops_X550em - PHY/SFP specific init * @hw: pointer to hardware structure * @@ -1514,6 +1684,11 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) ret_val = ixgbe_setup_kr_speed_x550em(hw, speed); } + /* setup SW LPLU only for first revision */ + if (!(IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, + IXGBE_FUSES0_GROUP(0)))) + phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em; + phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; phy->ops.reset = ixgbe_reset_phy_t_X550em; break; @@ -1760,7 +1935,6 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, .get_mac_addr = &ixgbe_get_mac_addr_generic, \ .get_device_caps = &ixgbe_get_device_caps_generic, \ .stop_adapter = &ixgbe_stop_adapter_generic, \ - .get_bus_info = &ixgbe_get_bus_info_generic, \ .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, \ .read_analog_reg8 = NULL, \ .write_analog_reg8 = NULL, \ @@ -1809,6 +1983,7 @@ static struct ixgbe_mac_operations mac_ops_X550 = { .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, .setup_link = &ixgbe_setup_mac_link_X540, .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, + .get_bus_info = &ixgbe_get_bus_info_generic, .setup_sfp = NULL, }; @@ -1820,6 +1995,7 @@ static struct ixgbe_mac_operations mac_ops_X550EM_x = { .get_wwn_prefix = NULL, .setup_link = NULL, /* defined later */ .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, + .get_bus_info = &ixgbe_get_bus_info_X550em, .setup_sfp = ixgbe_setup_sfp_modules_X550em, }; @@ -1855,7 +2031,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { .read_reg = &ixgbe_read_phy_reg_generic, \ .write_reg = &ixgbe_write_phy_reg_generic, \ .setup_link = &ixgbe_setup_phy_link_generic, \ - .set_phy_power = &ixgbe_set_copper_phy_power, \ + .set_phy_power = NULL, \ .check_overtemp = &ixgbe_tn_check_overtemp, \ .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, @@ -1893,7 +2069,7 @@ struct ixgbe_info ixgbe_X550_info = { struct ixgbe_info ixgbe_X550EM_x_info = { .mac = ixgbe_mac_X550EM_x, - .get_invariants = &ixgbe_get_invariants_X540, + .get_invariants = &ixgbe_get_invariants_X550_x, .mac_ops = &mac_ops_X550EM_x, .eeprom_ops = &eeprom_ops_X550EM_x, .phy_ops = &phy_ops_X550EM_x, diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 770e21a64..58434584b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -161,6 +161,18 @@ typedef u32 ixgbe_link_speed; #define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 #define IXGBE_RXDADV_SPH 0x8000 +/* RSS Hash results */ +#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ IXGBE_RXD_ERR_CE | \ IXGBE_RXD_ERR_LE | \ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index b2f5b161d..d3e5f5b37 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -813,22 +813,15 @@ static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - /* We support this operation only for 82599 and x540 at the moment */ - if (adapter->hw.mac.type < ixgbe_mac_X550_vf) - return IXGBEVF_82599_RETA_SIZE; + if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) + return IXGBEVF_X550_VFRETA_SIZE; - return 0; + return IXGBEVF_82599_RETA_SIZE; } static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev) { - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - /* We support this operation only for 82599 and x540 at the moment */ - if (adapter->hw.mac.type < ixgbe_mac_X550_vf) - return IXGBEVF_RSS_HASH_KEY_SIZE; - - return 0; + return IXGBEVF_RSS_HASH_KEY_SIZE; } static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, @@ -840,21 +833,33 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (hfunc) *hfunc = ETH_RSS_HASH_TOP; - /* If neither indirection table nor hash key was requested - just - * return a success avoiding taking any locks. - */ - if (!indir && !key) - return 0; + if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) { + if (key) + memcpy(key, adapter->rss_key, sizeof(adapter->rss_key)); - spin_lock_bh(&adapter->mbx_lock); - if (indir) - err = ixgbevf_get_reta_locked(&adapter->hw, indir, - adapter->num_rx_queues); + if (indir) { + int i; - if (!err && key) - err = ixgbevf_get_rss_key_locked(&adapter->hw, key); + for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + } + } else { + /* If neither indirection table nor hash key was requested + * - just return a success avoiding taking any locks. + */ + if (!indir && !key) + return 0; - spin_unlock_bh(&adapter->mbx_lock); + spin_lock_bh(&adapter->mbx_lock); + if (indir) + err = ixgbevf_get_reta_locked(&adapter->hw, indir, + adapter->num_rx_queues); + + if (!err && key) + err = ixgbevf_get_rss_key_locked(&adapter->hw, key); + + spin_unlock_bh(&adapter->mbx_lock); + } return err; } diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 775d08900..04c7ec844 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -144,9 +144,11 @@ struct ixgbevf_ring { #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES -#define IXGBEVF_MAX_RSS_QUEUES 2 -#define IXGBEVF_82599_RETA_SIZE 128 +#define IXGBEVF_MAX_RSS_QUEUES 2 +#define IXGBEVF_82599_RETA_SIZE 128 /* 128 entries */ +#define IXGBEVF_X550_VFRETA_SIZE 64 /* 64 entries */ #define IXGBEVF_RSS_HASH_KEY_SIZE 40 +#define IXGBEVF_VFRSSRK_REGS 10 /* 10 registers for RSS key */ #define IXGBEVF_DEFAULT_TXD 1024 #define IXGBEVF_DEFAULT_RXD 512 @@ -447,6 +449,9 @@ struct ixgbevf_adapter { spinlock_t mbx_lock; unsigned long last_reset; + + u32 rss_key[IXGBEVF_VFRSSRK_REGS]; + u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE]; }; enum ixbgevf_state_t { diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 1d7b00b03..149a0b448 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -457,6 +457,32 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, napi_gro_receive(&q_vector->napi, skb); } +#define IXGBE_RSS_L4_TYPES_MASK \ + ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) + +static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + IXGBE_RXDADV_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + /** * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: structure containig ring specific data @@ -506,6 +532,7 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { + ixgbevf_rx_hash(rx_ring, rx_desc, skb); ixgbevf_rx_checksum(rx_ring, rx_desc, skb); if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { @@ -648,46 +675,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, } } -/** - * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @skb: pointer to current skb being adjusted - * - * This function is an ixgbevf specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. - **/ -static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, - struct sk_buff *skb) -{ - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; -} - /** * ixgbevf_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on @@ -721,10 +708,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, } } - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - ixgbevf_pull_tail(rx_ring, skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@ -789,16 +772,19 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, struct sk_buff *skb) { struct page *page = rx_buffer->page; + unsigned char *va = page_address(page) + rx_buffer->page_offset; unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = IXGBEVF_RX_BUFSZ; #else unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); #endif + unsigned int pull_len; - if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; + if (unlikely(skb_is_nonlinear(skb))) + goto add_tail_frag; + if (likely(size <= IXGBEVF_RX_HDR_SIZE)) { memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); /* page is not reserved, we can reuse buffer as is */ @@ -810,8 +796,21 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, return false; } + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + va += pull_len; + size -= pull_len; + +add_tail_frag: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, truesize); + (unsigned long)va & ~PAGE_MASK, size, truesize); /* avoid re-using remote pages */ if (unlikely(ixgbevf_page_is_reserved(page))) @@ -1697,22 +1696,25 @@ static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vfmrqc = 0, vfreta = 0; - u32 rss_key[10]; u16 rss_i = adapter->num_rx_queues; - int i, j; + u8 i, j; /* Fill out hash function seeds */ - netdev_rss_key_fill(rss_key, sizeof(rss_key)); - for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); + netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); + for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++) + IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]); - /* Fill out redirection table */ - for (i = 0, j = 0; i < 64; i++, j++) { + for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) { if (j == rss_i) j = 0; - vfreta = (vfreta << 8) | (j * 0x1); - if ((i & 3) == 3) + + adapter->rss_indir_tbl[i] = j; + + vfreta |= j << (i & 0x3) * 8; + if ((i & 3) == 3) { IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta); + vfreta = 0; + } } /* Perform hash on these packet types */ diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 6e9a79209..060dd3922 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -583,7 +583,7 @@ jme_setup_tx_resources(struct jme_adapter *jme) atomic_set(&txring->next_to_clean, 0); atomic_set(&txring->nr_free, jme->tx_ring_size); - txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * + txring->bufinf = kzalloc(sizeof(struct jme_buffer_info) * jme->tx_ring_size, GFP_ATOMIC); if (unlikely(!(txring->bufinf))) goto err_free_txring; @@ -592,8 +592,6 @@ jme_setup_tx_resources(struct jme_adapter *jme) * Initialize Transmit Descriptors */ memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size)); - memset(txring->bufinf, 0, - sizeof(struct jme_buffer_info) * jme->tx_ring_size); return 0; @@ -845,7 +843,7 @@ jme_setup_rx_resources(struct jme_adapter *jme) rxring->next_to_use = 0; atomic_set(&rxring->next_to_clean, 0); - rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * + rxring->bufinf = kzalloc(sizeof(struct jme_buffer_info) * jme->rx_ring_size, GFP_ATOMIC); if (unlikely(!(rxring->bufinf))) goto err_free_rxring; @@ -853,8 +851,6 @@ jme_setup_rx_resources(struct jme_adapter *jme) /* * Initiallize Receive Descriptors */ - memset(rxring->bufinf, 0, - sizeof(struct jme_buffer_info) * jme->rx_ring_size); for (i = 0 ; i < jme->rx_ring_size ; ++i) { if (unlikely(jme_make_new_rx_buf(jme, i))) { jme_free_rx_resources(jme); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index d52639bc4..dfb6d5f79 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -759,11 +759,23 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, desc->l4i_chk = 0; desc->byte_cnt = length; - desc->buf_ptr = dma_map_single(dev->dev.parent, data, - length, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) { - WARN(1, "dma_map_single failed!\n"); - return -ENOMEM; + + if (length <= 8 && (uintptr_t)data & 0x7) { + /* Copy unaligned small data fragment to TSO header data area */ + memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE, + data, length); + desc->buf_ptr = txq->tso_hdrs_dma + + txq->tx_curr_desc * TSO_HEADER_SIZE; + } else { + /* Alignment is okay, map buffer and hand off to hardware */ + txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; + desc->buf_ptr = dma_map_single(dev->dev.parent, data, + length, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, + desc->buf_ptr))) { + WARN(1, "dma_map_single failed!\n"); + return -ENOMEM; + } } cmd_sts = BUFFER_OWNED_BY_DMA; @@ -779,7 +791,8 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, } static inline void -txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length) +txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length, + u32 *first_cmd_sts, bool first_desc) { struct mv643xx_eth_private *mp = txq_to_mp(txq); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); @@ -788,6 +801,7 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length) int ret; u32 cmd_csum = 0; u16 l4i_chk = 0; + u32 cmd_sts; tx_index = txq->tx_curr_desc; desc = &txq->tx_desc_area[tx_index]; @@ -803,9 +817,17 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length) desc->byte_cnt = hdr_len; desc->buf_ptr = txq->tso_hdrs_dma + txq->tx_curr_desc * TSO_HEADER_SIZE; - desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC | + cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC | GEN_CRC; + /* Defer updating the first command descriptor until all + * following descriptors have been written. + */ + if (first_desc) + *first_cmd_sts = cmd_sts; + else + desc->cmd_sts = cmd_sts; + txq->tx_curr_desc++; if (txq->tx_curr_desc == txq->tx_ring_size) txq->tx_curr_desc = 0; @@ -819,6 +841,8 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, int desc_count = 0; struct tso_t tso; int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + struct tx_desc *first_tx_desc; + u32 first_cmd_sts = 0; /* Count needed descriptors */ if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) { @@ -826,11 +850,14 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, return -EBUSY; } + first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc]; + /* Initialize the TSO handler, and prepare the first payload */ tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { + bool first_desc = (desc_count == 0); char *hdr; data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); @@ -840,7 +867,8 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, /* prepare packet headers: MAC + IP + TCP */ hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE; tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); - txq_put_hdr_tso(skb, txq, data_left); + txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts, + first_desc); while (data_left > 0) { int size; @@ -860,6 +888,10 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, __skb_queue_tail(&txq->tx_skb, skb); skb_tx_timestamp(skb); + /* ensure all other descriptors are written before first cmd_sts */ + wmb(); + first_tx_desc->cmd_sts = first_cmd_sts; + /* clear TX_END status */ mp->work_tx_end &= ~(1 << txq->index); @@ -1859,14 +1891,11 @@ oom: return; } - mc_spec = kmalloc(0x200, GFP_ATOMIC); + mc_spec = kzalloc(0x200, GFP_ATOMIC); if (mc_spec == NULL) goto oom; mc_other = mc_spec + (0x100 >> 2); - memset(mc_spec, 0, 0x100); - memset(mc_other, 0, 0x100); - netdev_for_each_mc_addr(ha, dev) { u8 *a = ha->addr; u32 *table; @@ -2788,8 +2817,10 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev) for_each_available_child_of_node(np, pnp) { ret = mv643xx_eth_shared_of_add_port(pdev, pnp); - if (ret) + if (ret) { + of_node_put(pnp); return ret; + } } return 0; } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 09ec32e33..0e4924498 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -949,7 +949,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp) /* Set CPU queue access map - all CPUs have access to all RX * queues and to all TX queues */ - for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) + for_each_present_cpu(cpu) mvreg_write(pp, MVNETA_CPU_MAP(cpu), (MVNETA_CPU_RXQ_ACCESS_ALL_MASK | MVNETA_CPU_TXQ_ACCESS_ALL_MASK)); @@ -1533,12 +1533,16 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, } skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); - if (!skb) - goto err_drop_frame; + /* After refill old buffer has to be unmapped regardless + * the skb is successfully built or not. + */ dma_unmap_single(dev->dev.parent, phys_addr, MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); + if (!skb) + goto err_drop_frame; + rcvd_pkts++; rcvd_bytes += rx_bytes; @@ -3175,6 +3179,8 @@ static int mvneta_probe(struct platform_device *pdev) struct phy_device *phy = of_phy_find_device(dn); mvneta_fixed_link_update(pp, phy); + + put_device(&phy->dev); } return 0; diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig index 52a6665b7..d54701047 100644 --- a/drivers/net/ethernet/mellanox/Kconfig +++ b/drivers/net/ethernet/mellanox/Kconfig @@ -18,5 +18,6 @@ if NET_VENDOR_MELLANOX source "drivers/net/ethernet/mellanox/mlx4/Kconfig" source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig" +source "drivers/net/ethernet/mellanox/mlxsw/Kconfig" endif # NET_VENDOR_MELLANOX diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile index 38fe32ef5..2e2a5ec50 100644 --- a/drivers/net/ethernet/mellanox/Makefile +++ b/drivers/net/ethernet/mellanox/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_MLX4_CORE) += mlx4/ obj-$(CONFIG_MLX5_CORE) += mlx5/core/ +obj-$(CONFIG_MLXSW_CORE) += mlxsw/ diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 0a3202047..2177e56ed 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2398,7 +2398,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) } } - memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size); + memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe)); priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD; INIT_WORK(&priv->mfunc.master.comm_work, mlx4_master_comm_channel); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 63769df87..eb8a4988d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -100,7 +100,6 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, { struct mlx4_en_dev *mdev = priv->mdev; int err = 0; - char name[25]; int timestamp_en = 0; bool assigned_eq = false; @@ -119,8 +118,8 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, err = mlx4_assign_eq(mdev->dev, priv->port, &cq->vector); if (err) { - mlx4_err(mdev, "Failed assigning an EQ to %s\n", - name); + mlx4_err(mdev, "Failed assigning an EQ to CQ vector %d\n", + cq->vector); goto free_eq; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 99ba1c50e..f79d81243 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -102,6 +102,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = { "blueflame", + "phv-bit" }; static const char main_strings[][ETH_GSTRING_LEN] = { @@ -1797,35 +1798,49 @@ static int mlx4_en_get_ts_info(struct net_device *dev, static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags) { struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); + bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV); + bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV); int i; + int ret = 0; - if (bf_enabled_new == bf_enabled_old) - return 0; /* Nothing to do */ + if (bf_enabled_new != bf_enabled_old) { + if (bf_enabled_new) { + bool bf_supported = true; - if (bf_enabled_new) { - bool bf_supported = true; + for (i = 0; i < priv->tx_ring_num; i++) + bf_supported &= priv->tx_ring[i]->bf_alloced; - for (i = 0; i < priv->tx_ring_num; i++) - bf_supported &= priv->tx_ring[i]->bf_alloced; + if (!bf_supported) { + en_err(priv, "BlueFlame is not supported\n"); + return -EINVAL; + } - if (!bf_supported) { - en_err(priv, "BlueFlame is not supported\n"); - return -EINVAL; + priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME; + } else { + priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME; } - priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME; - } else { - priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME; - } - - for (i = 0; i < priv->tx_ring_num; i++) - priv->tx_ring[i]->bf_enabled = bf_enabled_new; + for (i = 0; i < priv->tx_ring_num; i++) + priv->tx_ring[i]->bf_enabled = bf_enabled_new; - en_info(priv, "BlueFlame %s\n", - bf_enabled_new ? "Enabled" : "Disabled"); + en_info(priv, "BlueFlame %s\n", + bf_enabled_new ? "Enabled" : "Disabled"); + } + if (phv_enabled_new != phv_enabled_old) { + ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new); + if (ret) + return ret; + else if (phv_enabled_new) + priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV; + else + priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV; + en_info(priv, "PHV bit %s\n", + phv_enabled_new ? "Enabled" : "Disabled"); + } return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 913b716ed..a946e4bf7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -224,6 +224,26 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) kfree(mdev); } +static void mlx4_en_activate(struct mlx4_dev *dev, void *ctx) +{ + int i; + struct mlx4_en_dev *mdev = ctx; + + /* Create a netdev for each port */ + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { + mlx4_info(mdev, "Activating port:%d\n", i); + if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) + mdev->pndev[i] = NULL; + } + + /* register notifier */ + mdev->nb.notifier_call = mlx4_en_netdev_event; + if (register_netdevice_notifier(&mdev->nb)) { + mdev->nb.notifier_call = NULL; + mlx4_err(mdev, "Failed to create notifier\n"); + } +} + static void *mlx4_en_add(struct mlx4_dev *dev) { struct mlx4_en_dev *mdev; @@ -297,21 +317,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev) mutex_init(&mdev->state_lock); mdev->device_up = true; - /* Setup ports */ - - /* Create a netdev for each port */ - mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { - mlx4_info(mdev, "Activating port:%d\n", i); - if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) - mdev->pndev[i] = NULL; - } - /* register notifier */ - mdev->nb.notifier_call = mlx4_en_netdev_event; - if (register_netdevice_notifier(&mdev->nb)) { - mdev->nb.notifier_call = NULL; - mlx4_err(mdev, "Failed to create notifier\n"); - } - return mdev; err_mr: @@ -335,6 +340,7 @@ static struct mlx4_interface mlx4_en_interface = { .event = mlx4_en_event, .get_dev = mlx4_en_get_netdev, .protocol = MLX4_PROT_ETH, + .activate = mlx4_en_activate, }; static void mlx4_en_verify_params(void) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index e0de2fd1c..4726122ea 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2184,6 +2184,25 @@ static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } } +static netdev_features_t mlx4_en_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct mlx4_en_priv *en_priv = netdev_priv(netdev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + /* Since there is no support for separate RX C-TAG/S-TAG vlan accel + * enable/disable make sure S-TAG flag is always in same state as + * C-TAG. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX && + !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) + features |= NETIF_F_HW_VLAN_STAG_RX; + else + features &= ~NETIF_F_HW_VLAN_STAG_RX; + + return features; +} + static int mlx4_en_set_features(struct net_device *netdev, netdev_features_t features) { @@ -2218,6 +2237,10 @@ static int mlx4_en_set_features(struct net_device *netdev, en_info(priv, "Turn %s TX vlan strip offload\n", (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF"); + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX)) + en_info(priv, "Turn %s TX S-VLAN strip offload\n", + (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF"); + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) { en_info(priv, "Turn %s loopback\n", (features & NETIF_F_LOOPBACK) ? "ON" : "OFF"); @@ -2460,6 +2483,7 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_poll_controller = mlx4_en_netpoll, #endif .ndo_set_features = mlx4_en_set_features, + .ndo_fix_features = mlx4_en_fix_features, .ndo_setup_tc = mlx4_en_setup_tc, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, @@ -2500,6 +2524,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_poll_controller = mlx4_en_netpoll, #endif .ndo_set_features = mlx4_en_set_features, + .ndo_fix_features = mlx4_en_fix_features, .ndo_setup_tc = mlx4_en_setup_tc, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, @@ -2931,6 +2956,27 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) { + dev->features |= NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_FILTER; + dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX; + } + + if (mlx4_is_slave(mdev->dev)) { + int phv; + + err = get_phv_bit(mdev->dev, port, &phv); + if (!err && phv) { + dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; + priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV; + } + } else { + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN && + !(mdev->dev->caps.flags2 & + MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) + dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; + } + if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) dev->hw_features |= NETIF_F_RXFCS; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 4f95fa7b5..e7a5000aa 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -725,7 +725,7 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, hw_checksum = csum_unfold((__force __sum16)cqe->checksum); - if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) && + if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) { hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr); hdr += sizeof(struct vlan_hdr); @@ -906,17 +906,25 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud gro_skb->csum_level = 1; if ((cqe->vlan_my_qpn & - cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && + cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) && (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { u16 vid = be16_to_cpu(cqe->sl_vid); __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid); + } else if ((be32_to_cpu(cqe->vlan_my_qpn) & + MLX4_CQE_SVLAN_PRESENT_MASK) && + (dev->features & NETIF_F_HW_VLAN_STAG_RX)) { + __vlan_hwaccel_put_tag(gro_skb, + htons(ETH_P_8021AD), + be16_to_cpu(cqe->sl_vid)); } if (dev->features & NETIF_F_RXHASH) skb_set_hash(gro_skb, be32_to_cpu(cqe->immed_rss_invalid), - PKT_HASH_TYPE_L3); + (ip_summed == CHECKSUM_UNNECESSARY) ? + PKT_HASH_TYPE_L4 : + PKT_HASH_TYPE_L3); skb_record_rx_queue(gro_skb, cq->ring); skb_mark_napi_id(gro_skb, &cq->napi); @@ -962,12 +970,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud if (dev->features & NETIF_F_RXHASH) skb_set_hash(skb, be32_to_cpu(cqe->immed_rss_invalid), - PKT_HASH_TYPE_L3); + (ip_summed == CHECKSUM_UNNECESSARY) ? + PKT_HASH_TYPE_L4 : + PKT_HASH_TYPE_L3); if ((be32_to_cpu(cqe->vlan_my_qpn) & - MLX4_CQE_VLAN_PRESENT_MASK) && + MLX4_CQE_CVLAN_PRESENT_MASK) && (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid)); + else if ((be32_to_cpu(cqe->vlan_my_qpn) & + MLX4_CQE_SVLAN_PRESENT_MASK) && + (dev->features & NETIF_F_HW_VLAN_STAG_RX)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), + be16_to_cpu(cqe->sl_vid)); if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { timestamp = mlx4_en_get_cqe_ts(cqe); @@ -1032,13 +1047,15 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) /* If we used up all the quota - we're probably not done yet... */ if (done == budget) { - int cpu_curr; const struct cpumask *aff; + struct irq_data *idata; + int cpu_curr; INC_PERF_COUNTER(priv->pstats.napi_quota); cpu_curr = smp_processor_id(); - aff = irq_desc_get_irq_data(cq->irq_desc)->affinity; + idata = irq_desc_get_irq_data(cq->irq_desc); + aff = irq_data_get_affinity_mask(idata); if (likely(cpumask_test_cpu(cpu_curr, aff))) return budget; @@ -1065,7 +1082,10 @@ static const int frag_sizes[] = { void mlx4_en_calc_rx_buf(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); - int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN; + /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple + * headers. (For example: ETH_P_8021Q and ETH_P_8021AD). + */ + int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN); int buf_size = 0; int i = 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index c10d98f6a..4421bf546 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -718,6 +718,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) u32 index, bf_index; __be32 op_own; u16 vlan_tag = 0; + u16 vlan_proto = 0; int i_frag; int lso_header_size; void *fragptr = NULL; @@ -750,9 +751,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_drop; } - if (skb_vlan_tag_present(skb)) + if (skb_vlan_tag_present(skb)) { vlan_tag = skb_vlan_tag_get(skb); - + vlan_proto = be16_to_cpu(skb->vlan_proto); + } netdev_txq_bql_enqueue_prefetchw(ring->tx_queue); @@ -958,8 +960,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->bf.offset ^= ring->bf.buf_size; } else { tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); - tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * - !!skb_vlan_tag_present(skb); + if (vlan_proto == ETH_P_8021AD) + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN; + else if (vlan_proto == ETH_P_8021Q) + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN; + else + tx_desc->ctrl.ins_vlan = 0; + tx_desc->ctrl.fence_size = real_size; /* Ensure new descriptor hits memory diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 8e81e53c3..603d1c3d3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -196,7 +196,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) return; } - memcpy(s_eqe, eqe, dev->caps.eqe_size - 1); + memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1); s_eqe->slave_id = slave; /* ensure all information is written before setting the ownersip bit */ dma_wmb(); @@ -1364,6 +1364,10 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) * and performing a NOP command */ for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) { + /* Make sure request_irq was called */ + if (!priv->eq_table.eq[i].have_irq) + continue; + /* Temporary use polling for command completions */ mlx4_cmd_use_polling(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index e30bf57ad..e8ec1dec5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -154,6 +154,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [26] = "Port ETS Scheduler support", [27] = "Port beacon support", [28] = "RX-ALL support", + [29] = "802.1ad offload support", }; int i; @@ -307,6 +308,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31) +#define QUERY_FUNC_CAP_PHV_BIT 0x40 if (vhcr->op_modifier == 1) { struct mlx4_active_ports actv_ports = @@ -351,6 +353,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier], QUERY_FUNC_CAP_PHYS_PORT_ID); + if (dev->caps.phv_bit[port]) { + field = QUERY_FUNC_CAP_PHV_BIT; + MLX4_PUT(outbox->buf, field, + QUERY_FUNC_CAP_FLAGS0_OFFSET); + } + } else if (vhcr->op_modifier == 0) { struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); @@ -600,6 +608,9 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, MLX4_GET(func_cap->phys_port_id, outbox, QUERY_FUNC_CAP_PHYS_PORT_ID); + MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); + func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT); + /* All other resources are allocated by the master, but we still report * 'num' and 'reserved' capabilities as follows: * - num remains the maximum resource index @@ -700,6 +711,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94 +#define QUERY_DEV_CAP_PHV_EN_OFFSET 0x96 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c @@ -898,6 +910,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; if (field & (1 << 2)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS; + MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET); + if (field & 0x80) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN; + if (field & 0x40) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN; + MLX4_GET(dev_cap->reserved_lkey, outbox, QUERY_DEV_CAP_RSVD_LKEY_OFFSET); MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); @@ -1992,6 +2010,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); + /* phv_check enable */ + MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET); + if (byte_field & 0x2) + param->phv_check_en = 1; out: mlx4_free_cmd_mailbox(dev, mailbox); @@ -2758,3 +2780,63 @@ int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); } + +static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit) +{ +#define SET_PORT_GEN_PHV_VALID 0x10 +#define SET_PORT_GEN_PHV_EN 0x80 + + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_general_context *context; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + + context->v_ignore_fcs |= SET_PORT_GEN_PHV_VALID; + if (phv_bit) + context->phv_en |= SET_PORT_GEN_PHV_EN; + + in_mod = MLX4_SET_PORT_GENERAL << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv) +{ + int err; + struct mlx4_func_cap func_cap; + + memset(&func_cap, 0, sizeof(func_cap)); + err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap); + if (!err) + *phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT; + return err; +} +EXPORT_SYMBOL(get_phv_bit); + +int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val) +{ + int ret; + + if (mlx4_is_slave(dev)) + return -EPERM; + + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN && + !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) { + ret = mlx4_SET_PORT_phv_bit(dev, port, new_val); + if (!ret) + dev->caps.phv_bit[port] = new_val; + return ret; + } + + return -EOPNOTSUPP; +} +EXPORT_SYMBOL(set_phv_bit); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 07cb7c246..08de5555c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -204,6 +204,7 @@ struct mlx4_init_hca_param { u16 cqe_size; /* For use only when CQE stride feature enabled */ u16 eqe_size; /* For use only when EQE stride feature enabled */ u8 rss_ip_frags; + u8 phv_check_en; /* for QUERY_HCA */ }; struct mlx4_init_ib_param { diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 0d80aed59..0472941af 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -63,8 +63,11 @@ static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv) spin_lock_irq(&priv->ctx_lock); list_add_tail(&dev_ctx->list, &priv->ctx_list); spin_unlock_irq(&priv->ctx_lock); + if (intf->activate) + intf->activate(&priv->dev, dev_ctx->context); } else kfree(dev_ctx); + } static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index a408977a5..cc3a98975 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -405,6 +405,21 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.max_gso_sz = dev_cap->max_gso_sz; dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { + struct mlx4_init_hca_param hca_param; + + memset(&hca_param, 0, sizeof(hca_param)); + err = mlx4_QUERY_HCA(dev, &hca_param); + /* Turn off PHV_EN flag in case phv_check_en is set. + * phv_check_en is a HW check that parse the packet and verify + * phv bit was reported correctly in the wqe. To allow QinQ + * PHV_EN flag should be set and phv_check_en must be cleared + * otherwise QinQ packets will be drop by the HW. + */ + if (err || hca_param.phv_check_en) + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN; + } + /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; @@ -2654,14 +2669,11 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) if (msi_x) { int nreq = dev->caps.num_ports * num_online_cpus() + 1; - bool shared_ports = false; nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, nreq); - if (nreq > MAX_MSIX) { + if (nreq > MAX_MSIX) nreq = MAX_MSIX; - shared_ports = true; - } entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); if (!entries) @@ -2684,9 +2696,6 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports, dev->caps.num_ports); - if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) - shared_ports = true; - for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { if (i == MLX4_EQ_ASYNC) continue; @@ -2694,7 +2703,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) priv->eq_table.eq[i].irq = entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; - if (shared_ports) { + if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, dev->caps.num_ports); /* We don't set affinity hint when there @@ -2920,6 +2929,8 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, { u64 dev_flags = dev->flags; int err = 0; + int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev), + MLX4_MAX_NUM_VF); if (reset_flow) { dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), @@ -2945,6 +2956,12 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, } if (!(dev->flags & MLX4_FLAG_SRIOV)) { + if (total_vfs > fw_enabled_sriov_vfs) { + mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n", + total_vfs, fw_enabled_sriov_vfs); + err = -ENOMEM; + goto disable_sriov; + } mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); err = pci_enable_sriov(pdev, total_vfs); } @@ -3426,20 +3443,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, goto err_disable_pdev; } } - if (total_vfs >= MLX4_MAX_NUM_VF) { + if (total_vfs > MLX4_MAX_NUM_VF) { dev_err(&pdev->dev, - "Requested more VF's (%d) than allowed (%d)\n", - total_vfs, MLX4_MAX_NUM_VF - 1); + "Requested more VF's (%d) than allowed by hw (%d)\n", + total_vfs, MLX4_MAX_NUM_VF); err = -EINVAL; goto err_disable_pdev; } for (i = 0; i < MLX4_MAX_PORTS; i++) { - if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { + if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) { dev_err(&pdev->dev, - "Requested more VF's (%d) for port (%d) than allowed (%d)\n", + "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n", nvfs[i] + nvfs[2], i + 1, - MLX4_MAX_NUM_VF_P_PORT - 1); + MLX4_MAX_NUM_VF_P_PORT); err = -EINVAL; goto err_disable_pdev; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index bd9ea0d01..1d4e2e054 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1184,10 +1184,11 @@ out: if (prot == MLX4_PROT_ETH) { /* manage the steering entry for promisc mode */ if (new_entry) - new_steering_entry(dev, port, steer, index, qp->qpn); + err = new_steering_entry(dev, port, steer, + index, qp->qpn); else - existing_steering_entry(dev, port, steer, - index, qp->qpn); + err = existing_steering_entry(dev, port, steer, + index, qp->qpn); } if (err && link && index != -1) { if (index < dev->caps.num_mgms) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index a092c5c34..232b2b55f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -787,6 +787,9 @@ struct mlx4_set_port_general_context { u8 pprx; u8 pfcrx; u16 reserved4; + u32 reserved5; + u8 phv_en; + u8 reserved6[3]; }; struct mlx4_set_port_rqp_calc_context { diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 666d1669e..defcf8c39 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -95,6 +95,7 @@ */ #define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1 +#define MLX4_EN_PRIV_FLAGS_PHV 2 #define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 0715b4975..6cb383046 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -45,15 +45,34 @@ * register it in a memory region at HCA virtual address 0. */ -int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf) +static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, + size_t size, dma_addr_t *dma_handle, + int node) +{ + struct mlx5_priv *priv = &dev->priv; + int original_node; + void *cpu_handle; + + mutex_lock(&priv->alloc_mutex); + original_node = dev_to_node(&dev->pdev->dev); + set_dev_node(&dev->pdev->dev, node); + cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size, + dma_handle, GFP_KERNEL); + set_dev_node(&dev->pdev->dev, original_node); + mutex_unlock(&priv->alloc_mutex); + return cpu_handle; +} + +int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, + struct mlx5_buf *buf, int node) { dma_addr_t t; buf->size = size; buf->npages = 1; buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; - buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev, - size, &t, GFP_KERNEL); + buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size, + &t, node); if (!buf->direct.buf) return -ENOMEM; @@ -66,6 +85,11 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf) return 0; } + +int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf) +{ + return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node); +} EXPORT_SYMBOL_GPL(mlx5_buf_alloc); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) @@ -75,7 +99,8 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) } EXPORT_SYMBOL_GPL(mlx5_buf_free); -static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device) +static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, + int node) { struct mlx5_db_pgdir *pgdir; @@ -84,8 +109,9 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device) return NULL; bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE); - pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, - &pgdir->db_dma, GFP_KERNEL); + + pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE, + &pgdir->db_dma, node); if (!pgdir->db_page) { kfree(pgdir); return NULL; @@ -118,7 +144,7 @@ static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, return 0; } -int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) +int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node) { struct mlx5_db_pgdir *pgdir; int ret = 0; @@ -129,7 +155,7 @@ int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) if (!mlx5_alloc_db_from_pgdir(pgdir, db)) goto out; - pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev)); + pgdir = mlx5_alloc_db_pgdir(dev, node); if (!pgdir) { ret = -ENOMEM; goto out; @@ -145,6 +171,12 @@ out: return ret; } +EXPORT_SYMBOL_GPL(mlx5_db_alloc_node); + +int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) +{ + return mlx5_db_alloc_node(dev, db, dev->priv.numa_node); +} EXPORT_SYMBOL_GPL(mlx5_db_alloc); void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 3d23bd657..0983a208b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -42,24 +42,27 @@ #define MLX5E_MAX_NUM_TC 8 -#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x7 +#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd -#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x7 +#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd -#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (16 * 1024) +#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 -#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7 +#define MLX5E_LOG_INDIR_RQT_SIZE 0x7 +#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) +#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) #define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ +#define MLX5E_SQ_BF_BUDGET 16 static const char vport_strings[][ETH_GSTRING_LEN] = { /* vport statistics */ @@ -91,6 +94,7 @@ static const char vport_strings[][ETH_GSTRING_LEN] = { "lro_bytes", "rx_csum_good", "rx_csum_none", + "rx_csum_sw", "tx_csum_offload", "tx_queue_stopped", "tx_queue_wake", @@ -128,18 +132,94 @@ struct mlx5e_vport_stats { u64 lro_bytes; u64 rx_csum_good; u64 rx_csum_none; + u64 rx_csum_sw; u64 tx_csum_offload; u64 tx_queue_stopped; u64 tx_queue_wake; u64 tx_queue_dropped; u64 rx_wqe_err; -#define NUM_VPORT_COUNTERS 31 +#define NUM_VPORT_COUNTERS 32 +}; + +static const char pport_strings[][ETH_GSTRING_LEN] = { + /* IEEE802.3 counters */ + "frames_tx", + "frames_rx", + "check_seq_err", + "alignment_err", + "octets_tx", + "octets_received", + "multicast_xmitted", + "broadcast_xmitted", + "multicast_rx", + "broadcast_rx", + "in_range_len_errors", + "out_of_range_len", + "too_long_errors", + "symbol_err", + "mac_control_tx", + "mac_control_rx", + "unsupported_op_rx", + "pause_ctrl_rx", + "pause_ctrl_tx", + + /* RFC2863 counters */ + "in_octets", + "in_ucast_pkts", + "in_discards", + "in_errors", + "in_unknown_protos", + "out_octets", + "out_ucast_pkts", + "out_discards", + "out_errors", + "in_multicast_pkts", + "in_broadcast_pkts", + "out_multicast_pkts", + "out_broadcast_pkts", + + /* RFC2819 counters */ + "drop_events", + "octets", + "pkts", + "broadcast_pkts", + "multicast_pkts", + "crc_align_errors", + "undersize_pkts", + "oversize_pkts", + "fragments", + "jabbers", + "collisions", + "p64octets", + "p65to127octets", + "p128to255octets", + "p256to511octets", + "p512to1023octets", + "p1024to1518octets", + "p1519to2047octets", + "p2048to4095octets", + "p4096to8191octets", + "p8192to10239octets", +}; + +#define NUM_IEEE_802_3_COUNTERS 19 +#define NUM_RFC_2863_COUNTERS 13 +#define NUM_RFC_2819_COUNTERS 21 +#define NUM_PPORT_COUNTERS (NUM_IEEE_802_3_COUNTERS + \ + NUM_RFC_2863_COUNTERS + \ + NUM_RFC_2819_COUNTERS) + +struct mlx5e_pport_stats { + __be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS]; + __be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS]; + __be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS]; }; static const char rq_stats_strings[][ETH_GSTRING_LEN] = { "packets", "csum_none", + "csum_sw", "lro_packets", "lro_bytes", "wqe_err" @@ -148,10 +228,11 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = { struct mlx5e_rq_stats { u64 packets; u64 csum_none; + u64 csum_sw; u64 lro_packets; u64 lro_bytes; u64 wqe_err; -#define NUM_RQ_STATS 5 +#define NUM_RQ_STATS 6 }; static const char sq_stats_strings[][ETH_GSTRING_LEN] = { @@ -179,6 +260,7 @@ struct mlx5e_sq_stats { struct mlx5e_stats { struct mlx5e_vport_stats vport; + struct mlx5e_pport_stats pport; }; struct mlx5e_params { @@ -192,9 +274,12 @@ struct mlx5e_params { u16 tx_cq_moderation_usec; u16 tx_cq_moderation_pkts; u16 min_rx_wqes; - u16 rx_hash_log_tbl_sz; bool lro_en; u32 lro_wqe_sz; + u16 tx_max_inline; + u8 rss_hfunc; + u8 toeplitz_hash_key[40]; + u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; }; enum { @@ -214,6 +299,7 @@ struct mlx5e_cq { struct napi_struct *napi; struct mlx5_core_cq mcq; struct mlx5e_channel *channel; + struct mlx5e_priv *priv; /* control */ struct mlx5_wq_ctrl wq_ctrl; @@ -237,6 +323,7 @@ struct mlx5e_rq { struct mlx5_wq_ctrl wq_ctrl; u32 rqn; struct mlx5e_channel *channel; + struct mlx5e_priv *priv; } ____cacheline_aligned_in_smp; struct mlx5e_tx_skb_cb { @@ -266,7 +353,9 @@ struct mlx5e_sq { /* dirtied @xmit */ u16 pc ____cacheline_aligned_in_smp; u32 dma_fifo_pc; - u32 bf_offset; + u16 bf_offset; + u16 prev_cc; + u8 bf_budget; struct mlx5e_sq_stats stats; struct mlx5e_cq cq; @@ -279,9 +368,10 @@ struct mlx5e_sq { struct mlx5_wq_cyc wq; u32 dma_fifo_mask; void __iomem *uar_map; + void __iomem *uar_bf_map; struct netdev_queue *txq; u32 sqn; - u32 bf_buf_size; + u16 bf_buf_size; u16 max_inline; u16 edge; struct device *pdev; @@ -315,7 +405,6 @@ struct mlx5e_channel { __be32 mkey_be; u8 num_tc; unsigned long flags; - int tc_to_txq_map[MLX5E_MAX_NUM_TC]; /* control */ struct mlx5e_priv *priv; @@ -324,20 +413,24 @@ struct mlx5e_channel { }; enum mlx5e_traffic_types { - MLX5E_TT_IPV4_TCP = 0, - MLX5E_TT_IPV6_TCP = 1, - MLX5E_TT_IPV4_UDP = 2, - MLX5E_TT_IPV6_UDP = 3, - MLX5E_TT_IPV4 = 4, - MLX5E_TT_IPV6 = 5, - MLX5E_TT_ANY = 6, - MLX5E_NUM_TT = 7, + MLX5E_TT_IPV4_TCP, + MLX5E_TT_IPV6_TCP, + MLX5E_TT_IPV4_UDP, + MLX5E_TT_IPV6_UDP, + MLX5E_TT_IPV4_IPSEC_AH, + MLX5E_TT_IPV6_IPSEC_AH, + MLX5E_TT_IPV4_IPSEC_ESP, + MLX5E_TT_IPV6_IPSEC_ESP, + MLX5E_TT_IPV4, + MLX5E_TT_IPV6, + MLX5E_TT_ANY, + MLX5E_NUM_TT, }; -enum { - MLX5E_RQT_SPREADING = 0, - MLX5E_RQT_DEFAULT_RQ = 1, - MLX5E_NUM_RQT = 2, +enum mlx5e_rqt_ix { + MLX5E_INDIRECTION_RQT, + MLX5E_SINGLE_RQ_RQT, + MLX5E_NUM_RQT, }; struct mlx5e_eth_addr_info { @@ -362,10 +455,10 @@ struct mlx5e_eth_addr_db { enum { MLX5E_STATE_ASYNC_EVENTS_ENABLE, MLX5E_STATE_OPENED, + MLX5E_STATE_DESTROYING, }; struct mlx5e_vlan_db { - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; u32 active_vlans_ft_ix[VLAN_N_VID]; u32 untagged_rule_ft_ix; u32 any_vlan_rule_ft_ix; @@ -379,9 +472,9 @@ struct mlx5e_flow_table { struct mlx5e_priv { /* priv data path fields - start */ - int num_tc; int default_vlan_prio; struct mlx5e_sq **txq_to_sq_map; + int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; /* priv data path fields - end */ unsigned long state; @@ -390,10 +483,11 @@ struct mlx5e_priv { u32 pdn; u32 tdn; struct mlx5_core_mr mr; + struct mlx5e_rq drop_rq; struct mlx5e_channel **channel; u32 tisn[MLX5E_MAX_NUM_TC]; - u32 rqtn; + u32 rqtn[MLX5E_NUM_RQT]; u32 tirn[MLX5E_NUM_TT]; struct mlx5e_flow_table ft; @@ -470,10 +564,9 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); void mlx5e_update_stats(struct mlx5e_priv *priv); -int mlx5e_open_flow_table(struct mlx5e_priv *priv); -void mlx5e_close_flow_table(struct mlx5e_priv *priv); +int mlx5e_create_flow_tables(struct mlx5e_priv *priv); +void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv); void mlx5e_init_eth_addr(struct mlx5e_priv *priv); -void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv); void mlx5e_set_rx_mode_work(struct work_struct *work); int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, @@ -482,17 +575,17 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv); void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); -int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv); -void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv); + +int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix); int mlx5e_open_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev); -int mlx5e_update_priv_params(struct mlx5e_priv *priv, - struct mlx5e_params *new_params); static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, - struct mlx5e_tx_wqe *wqe) + struct mlx5e_tx_wqe *wqe, int bf_sz) { + u16 ofst = MLX5_BF_OFFSET + sq->bf_offset; + /* ensure wqe is visible to device before updating doorbell record */ dma_wmb(); @@ -503,9 +596,15 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, */ wmb(); - mlx5_write64((__be32 *)&wqe->ctrl, - sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset, - NULL); + if (bf_sz) { + __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz); + + /* flush the write-combining mapped buffer */ + wmb(); + + } else { + mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL); + } sq->bf_offset ^= sq->bf_buf_size; } @@ -519,3 +618,4 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) } extern const struct ethtool_ops mlx5e_ethtool_ops; +u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 388938482..bce912688 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -171,9 +171,9 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) switch (sset) { case ETH_SS_STATS: - return NUM_VPORT_COUNTERS + + return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS + priv->params.num_channels * NUM_RQ_STATS + - priv->params.num_channels * priv->num_tc * + priv->params.num_channels * priv->params.num_tc * NUM_SQ_STATS; /* fallthrough */ default: @@ -200,6 +200,11 @@ static void mlx5e_get_strings(struct net_device *dev, strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_strings[i]); + /* PPORT counters */ + for (i = 0; i < NUM_PPORT_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_strings[i]); + /* per channel counters */ for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_RQ_STATS; j++) @@ -207,7 +212,7 @@ static void mlx5e_get_strings(struct net_device *dev, "rx%d_%s", i, rq_stats_strings[j]); for (i = 0; i < priv->params.num_channels; i++) - for (tc = 0; tc < priv->num_tc; tc++) + for (tc = 0; tc < priv->params.num_tc; tc++) for (j = 0; j < NUM_SQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, @@ -234,6 +239,9 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, for (i = 0; i < NUM_VPORT_COUNTERS; i++) data[idx++] = ((u64 *)&priv->stats.vport)[i]; + for (i = 0; i < NUM_PPORT_COUNTERS; i++) + data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]); + /* per channel counters */ for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_RQ_STATS; j++) @@ -242,7 +250,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, ((u64 *)&priv->channel[i]->rq.stats)[j]; for (i = 0; i < priv->params.num_channels; i++) - for (tc = 0; tc < priv->num_tc; tc++) + for (tc = 0; tc < priv->params.num_tc; tc++) for (j = 0; j < NUM_SQ_STATS; j++) data[idx++] = !test_bit(MLX5E_STATE_OPENED, &priv->state) ? 0 : @@ -264,7 +272,7 @@ static int mlx5e_set_ringparam(struct net_device *dev, struct ethtool_ringparam *param) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_params new_params; + bool was_opened; u16 min_rx_wqes; u8 log_rq_size; u8 log_sq_size; @@ -316,11 +324,18 @@ static int mlx5e_set_ringparam(struct net_device *dev, return 0; mutex_lock(&priv->state_lock); - new_params = priv->params; - new_params.log_rq_size = log_rq_size; - new_params.log_sq_size = log_sq_size; - new_params.min_rx_wqes = min_rx_wqes; - err = mlx5e_update_priv_params(priv, &new_params); + + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened) + mlx5e_close_locked(dev); + + priv->params.log_rq_size = log_rq_size; + priv->params.log_sq_size = log_sq_size; + priv->params.min_rx_wqes = min_rx_wqes; + + if (was_opened) + err = mlx5e_open_locked(dev); + mutex_unlock(&priv->state_lock); return err; @@ -342,7 +357,7 @@ static int mlx5e_set_channels(struct net_device *dev, struct mlx5e_priv *priv = netdev_priv(dev); int ncv = priv->mdev->priv.eq_table.num_comp_vectors; unsigned int count = ch->combined_count; - struct mlx5e_params new_params; + bool was_opened; int err = 0; if (!count) { @@ -365,9 +380,16 @@ static int mlx5e_set_channels(struct net_device *dev, return 0; mutex_lock(&priv->state_lock); - new_params = priv->params; - new_params.num_channels = count; - err = mlx5e_update_priv_params(priv, &new_params); + + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened) + mlx5e_close_locked(dev); + + priv->params.num_channels = count; + + if (was_opened) + err = mlx5e_open_locked(dev); + mutex_unlock(&priv->state_lock); return err; @@ -606,7 +628,7 @@ static int mlx5e_set_settings(struct net_device *netdev, u32 link_modes; u32 speed; u32 eth_proto_cap, eth_proto_admin; - u8 port_status; + enum mlx5_port_status ps; int err; speed = ethtool_cmd_speed(cmd); @@ -640,25 +662,197 @@ static int mlx5e_set_settings(struct net_device *netdev, if (link_modes == eth_proto_admin) goto out; - err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN); + mlx5_query_port_admin_status(mdev, &ps); + if (ps == MLX5_PORT_UP) + mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); + mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN); + if (ps == MLX5_PORT_UP) + mlx5_set_port_admin_status(mdev, MLX5_PORT_UP); + +out: + return err; +} + +static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return sizeof(priv->params.toeplitz_hash_key); +} + +static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev) +{ + return MLX5E_INDIR_RQT_SIZE; +} + +static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + if (indir) + memcpy(indir, priv->params.indirection_rqt, + sizeof(priv->params.indirection_rqt)); + + if (key) + memcpy(key, priv->params.toeplitz_hash_key, + sizeof(priv->params.toeplitz_hash_key)); + + if (hfunc) + *hfunc = priv->params.rss_hfunc; + + return 0; +} + +static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + bool close_open; + int err = 0; + + if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && + (hfunc != ETH_RSS_HASH_XOR) && + (hfunc != ETH_RSS_HASH_TOP)) + return -EINVAL; + + mutex_lock(&priv->state_lock); + + if (indir) { + memcpy(priv->params.indirection_rqt, indir, + sizeof(priv->params.indirection_rqt)); + mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT); + } + + close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) && + test_bit(MLX5E_STATE_OPENED, &priv->state); + if (close_open) + mlx5e_close_locked(dev); + + if (key) + memcpy(priv->params.toeplitz_hash_key, key, + sizeof(priv->params.toeplitz_hash_key)); + + if (hfunc != ETH_RSS_HASH_NO_CHANGE) + priv->params.rss_hfunc = hfunc; + + if (close_open) + err = mlx5e_open_locked(priv->netdev); + + mutex_unlock(&priv->state_lock); + + return err; +} + +static int mlx5e_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *info, u32 *rule_locs) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err = 0; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = priv->params.num_channels; + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int mlx5e_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + void *data) +{ + const struct mlx5e_priv *priv = netdev_priv(dev); + int err = 0; + + switch (tuna->id) { + case ETHTOOL_TX_COPYBREAK: + *(u32 *)data = priv->params.tx_max_inline; + break; + default: + err = -EINVAL; + break; + } + + return err; +} + +static int mlx5e_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + bool was_opened; + u32 val; + int err = 0; + + switch (tuna->id) { + case ETHTOOL_TX_COPYBREAK: + val = *(u32 *)data; + if (val > mlx5e_get_max_inline_cap(mdev)) { + err = -EINVAL; + break; + } + + mutex_lock(&priv->state_lock); + + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened) + mlx5e_close_locked(dev); + + priv->params.tx_max_inline = val; + + if (was_opened) + err = mlx5e_open_locked(dev); + + mutex_unlock(&priv->state_lock); + break; + default: + err = -EINVAL; + break; + } + + return err; +} + +static void mlx5e_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + int err; + + err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause, + &pauseparam->tx_pause); if (err) { - netdev_err(netdev, "%s: set port eth proto admin failed: %d\n", + netdev_err(netdev, "%s: mlx5_query_port_pause failed:0x%x\n", __func__, err); - goto out; } +} - err = mlx5_query_port_status(mdev, &port_status); - if (err) - goto out; +static int mlx5e_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + int err; - if (port_status == MLX5_PORT_DOWN) - return 0; + if (pauseparam->autoneg) + return -EINVAL; + + err = mlx5_set_port_pause(mdev, + pauseparam->rx_pause ? 1 : 0, + pauseparam->tx_pause ? 1 : 0); + if (err) { + netdev_err(netdev, "%s: mlx5_set_port_pause failed:0x%x\n", + __func__, err); + } - err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN); - if (err) - goto out; - err = mlx5_set_port_status(mdev, MLX5_PORT_UP); -out: return err; } @@ -676,4 +870,13 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .set_coalesce = mlx5e_set_coalesce, .get_settings = mlx5e_get_settings, .set_settings = mlx5e_set_settings, + .get_rxfh_key_size = mlx5e_get_rxfh_key_size, + .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size, + .get_rxfh = mlx5e_get_rxfh, + .set_rxfh = mlx5e_set_rxfh, + .get_rxnfc = mlx5e_get_rxnfc, + .get_tunable = mlx5e_get_tunable, + .set_tunable = mlx5e_set_tunable, + .get_pauseparam = mlx5e_get_pauseparam, + .set_pauseparam = mlx5e_set_pauseparam, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c index 120db80c4..22d603f78 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c @@ -105,25 +105,41 @@ static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv, { void *ft = priv->ft.main; - if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP)) + if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) + mlx5_del_flow_table_entry(ft, + ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) + mlx5_del_flow_table_entry(ft, + ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) + mlx5_del_flow_table_entry(ft, + ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) + mlx5_del_flow_table_entry(ft, + ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP)) mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]); - if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP)) + if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP)) mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]); - if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP)) + if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP)) mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]); - if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP)) + if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP)) mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]); - if (ai->tt_vec & (1 << MLX5E_TT_IPV6)) + if (ai->tt_vec & BIT(MLX5E_TT_IPV6)) mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]); - if (ai->tt_vec & (1 << MLX5E_TT_IPV4)) + if (ai->tt_vec & BIT(MLX5E_TT_IPV4)) mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]); - if (ai->tt_vec & (1 << MLX5E_TT_ANY)) + if (ai->tt_vec & BIT(MLX5E_TT_ANY)) mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]); } @@ -156,33 +172,37 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) switch (eth_addr_type) { case MLX5E_UC: ret = - (1 << MLX5E_TT_IPV4_TCP) | - (1 << MLX5E_TT_IPV6_TCP) | - (1 << MLX5E_TT_IPV4_UDP) | - (1 << MLX5E_TT_IPV6_UDP) | - (1 << MLX5E_TT_IPV4) | - (1 << MLX5E_TT_IPV6) | - (1 << MLX5E_TT_ANY) | + BIT(MLX5E_TT_IPV4_TCP) | + BIT(MLX5E_TT_IPV6_TCP) | + BIT(MLX5E_TT_IPV4_UDP) | + BIT(MLX5E_TT_IPV6_UDP) | + BIT(MLX5E_TT_IPV4_IPSEC_AH) | + BIT(MLX5E_TT_IPV6_IPSEC_AH) | + BIT(MLX5E_TT_IPV4_IPSEC_ESP) | + BIT(MLX5E_TT_IPV6_IPSEC_ESP) | + BIT(MLX5E_TT_IPV4) | + BIT(MLX5E_TT_IPV6) | + BIT(MLX5E_TT_ANY) | 0; break; case MLX5E_MC_IPV4: ret = - (1 << MLX5E_TT_IPV4_UDP) | - (1 << MLX5E_TT_IPV4) | + BIT(MLX5E_TT_IPV4_UDP) | + BIT(MLX5E_TT_IPV4) | 0; break; case MLX5E_MC_IPV6: ret = - (1 << MLX5E_TT_IPV6_UDP) | - (1 << MLX5E_TT_IPV6) | + BIT(MLX5E_TT_IPV6_UDP) | + BIT(MLX5E_TT_IPV6) | 0; break; case MLX5E_MC_OTHER: ret = - (1 << MLX5E_TT_ANY) | + BIT(MLX5E_TT_ANY) | 0; break; } @@ -191,23 +211,27 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) case MLX5E_ALLMULTI: ret = - (1 << MLX5E_TT_IPV4_UDP) | - (1 << MLX5E_TT_IPV6_UDP) | - (1 << MLX5E_TT_IPV4) | - (1 << MLX5E_TT_IPV6) | - (1 << MLX5E_TT_ANY) | + BIT(MLX5E_TT_IPV4_UDP) | + BIT(MLX5E_TT_IPV6_UDP) | + BIT(MLX5E_TT_IPV4) | + BIT(MLX5E_TT_IPV6) | + BIT(MLX5E_TT_ANY) | 0; break; default: /* MLX5E_PROMISC */ ret = - (1 << MLX5E_TT_IPV4_TCP) | - (1 << MLX5E_TT_IPV6_TCP) | - (1 << MLX5E_TT_IPV4_UDP) | - (1 << MLX5E_TT_IPV6_UDP) | - (1 << MLX5E_TT_IPV4) | - (1 << MLX5E_TT_IPV6) | - (1 << MLX5E_TT_ANY) | + BIT(MLX5E_TT_IPV4_TCP) | + BIT(MLX5E_TT_IPV6_TCP) | + BIT(MLX5E_TT_IPV4_UDP) | + BIT(MLX5E_TT_IPV6_UDP) | + BIT(MLX5E_TT_IPV4_IPSEC_AH) | + BIT(MLX5E_TT_IPV6_IPSEC_AH) | + BIT(MLX5E_TT_IPV4_IPSEC_ESP) | + BIT(MLX5E_TT_IPV6_IPSEC_ESP) | + BIT(MLX5E_TT_IPV4) | + BIT(MLX5E_TT_IPV6) | + BIT(MLX5E_TT_ANY) | 0; break; } @@ -226,6 +250,7 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, u8 *match_criteria_dmac; void *ft = priv->ft.main; u32 *tirn = priv->tirn; + u32 *ft_ix; u32 tt_vec; int err; @@ -261,51 +286,51 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, tt_vec = mlx5e_get_tt_vec(ai, type); - if (tt_vec & (1 << MLX5E_TT_ANY)) { + ft_ix = &ai->ft_ix[MLX5E_TT_ANY]; + if (tt_vec & BIT(MLX5E_TT_ANY)) { MLX5_SET(dest_format_struct, dest, destination_id, tirn[MLX5E_TT_ANY]); err = mlx5_add_flow_table_entry(ft, match_criteria_enable, match_criteria, flow_context, - &ai->ft_ix[MLX5E_TT_ANY]); - if (err) { - mlx5e_del_eth_addr_from_flow_table(priv, ai); - return err; - } - ai->tt_vec |= (1 << MLX5E_TT_ANY); + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_ANY); } match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ethertype); - if (tt_vec & (1 << MLX5E_TT_IPV4)) { + ft_ix = &ai->ft_ix[MLX5E_TT_IPV4]; + if (tt_vec & BIT(MLX5E_TT_IPV4)) { MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, ETH_P_IP); MLX5_SET(dest_format_struct, dest, destination_id, tirn[MLX5E_TT_IPV4]); err = mlx5_add_flow_table_entry(ft, match_criteria_enable, match_criteria, flow_context, - &ai->ft_ix[MLX5E_TT_IPV4]); - if (err) { - mlx5e_del_eth_addr_from_flow_table(priv, ai); - return err; - } - ai->tt_vec |= (1 << MLX5E_TT_IPV4); + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV4); } - if (tt_vec & (1 << MLX5E_TT_IPV6)) { + ft_ix = &ai->ft_ix[MLX5E_TT_IPV6]; + if (tt_vec & BIT(MLX5E_TT_IPV6)) { MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, ETH_P_IPV6); MLX5_SET(dest_format_struct, dest, destination_id, tirn[MLX5E_TT_IPV6]); err = mlx5_add_flow_table_entry(ft, match_criteria_enable, match_criteria, flow_context, - &ai->ft_ix[MLX5E_TT_IPV6]); - if (err) { - mlx5e_del_eth_addr_from_flow_table(priv, ai); - return err; - } - ai->tt_vec |= (1 << MLX5E_TT_IPV6); + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV6); } MLX5_SET_TO_ONES(fte_match_param, match_criteria, @@ -313,70 +338,141 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, IPPROTO_UDP); - if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) { + ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP]; + if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) { MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, ETH_P_IP); MLX5_SET(dest_format_struct, dest, destination_id, tirn[MLX5E_TT_IPV4_UDP]); err = mlx5_add_flow_table_entry(ft, match_criteria_enable, match_criteria, flow_context, - &ai->ft_ix[MLX5E_TT_IPV4_UDP]); - if (err) { - mlx5e_del_eth_addr_from_flow_table(priv, ai); - return err; - } - ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP); + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP); } - if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) { + ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP]; + if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) { MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, ETH_P_IPV6); MLX5_SET(dest_format_struct, dest, destination_id, tirn[MLX5E_TT_IPV6_UDP]); err = mlx5_add_flow_table_entry(ft, match_criteria_enable, match_criteria, flow_context, - &ai->ft_ix[MLX5E_TT_IPV6_UDP]); - if (err) { - mlx5e_del_eth_addr_from_flow_table(priv, ai); - return err; - } - ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP); + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP); } MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, IPPROTO_TCP); - if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) { + ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP]; + if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) { MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, ETH_P_IP); MLX5_SET(dest_format_struct, dest, destination_id, tirn[MLX5E_TT_IPV4_TCP]); err = mlx5_add_flow_table_entry(ft, match_criteria_enable, match_criteria, flow_context, - &ai->ft_ix[MLX5E_TT_IPV4_TCP]); - if (err) { - mlx5e_del_eth_addr_from_flow_table(priv, ai); - return err; - } - ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP); + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP); } - if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) { + ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP]; + if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) { MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, ETH_P_IPV6); MLX5_SET(dest_format_struct, dest, destination_id, tirn[MLX5E_TT_IPV6_TCP]); err = mlx5_add_flow_table_entry(ft, match_criteria_enable, match_criteria, flow_context, - &ai->ft_ix[MLX5E_TT_IPV6_TCP]); - if (err) { - mlx5e_del_eth_addr_from_flow_table(priv, ai); - return err; - } - ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP); + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP); + } + + MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, + IPPROTO_AH); + + ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]; + if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IP); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV4_IPSEC_AH]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH); + } + + ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]; + if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IPV6); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV6_IPSEC_AH]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH); + } + + MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, + IPPROTO_ESP); + + ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]; + if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IP); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV4_IPSEC_ESP]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP); + } + + ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]; + if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IPV6); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV6_IPSEC_ESP]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP); } return 0; + +err_del_ai: + mlx5e_del_eth_addr_from_flow_table(priv, ai); + + return err; } static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, @@ -498,44 +594,32 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) { - WARN_ON(!mutex_is_locked(&priv->state_lock)); + if (!priv->vlan.filter_disabled) + return; - if (priv->vlan.filter_disabled) { - priv->vlan.filter_disabled = false; - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); - } + priv->vlan.filter_disabled = false; + if (priv->netdev->flags & IFF_PROMISC) + return; + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); } void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) { - WARN_ON(!mutex_is_locked(&priv->state_lock)); + if (priv->vlan.filter_disabled) + return; - if (!priv->vlan.filter_disabled) { - priv->vlan.filter_disabled = true; - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); - } + priv->vlan.filter_disabled = true; + if (priv->netdev->flags & IFF_PROMISC) + return; + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); } int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid) { struct mlx5e_priv *priv = netdev_priv(dev); - int err = 0; - mutex_lock(&priv->state_lock); - - set_bit(vid, priv->vlan.active_vlans); - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) - err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, - vid); - - mutex_unlock(&priv->state_lock); - - return err; + return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); } int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, @@ -543,56 +627,11 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, { struct mlx5e_priv *priv = netdev_priv(dev); - mutex_lock(&priv->state_lock); - - clear_bit(vid, priv->vlan.active_vlans); - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); - - mutex_unlock(&priv->state_lock); - - return 0; -} - -int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv) -{ - u16 vid; - int err; - - for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) { - err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, - vid); - if (err) - return err; - } - - err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - if (err) - return err; - - if (priv->vlan.filter_disabled) { - err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); - if (err) - return err; - } + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); return 0; } -void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv) -{ - u16 vid; - - if (priv->vlan.filter_disabled) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); - - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - - for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); -} - #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) @@ -656,18 +695,21 @@ static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) hn->action = MLX5E_ACTION_DEL; - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state)) mlx5e_sync_netdev_addr(priv); mlx5e_apply_netdev_addr(priv); } -void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv) +void mlx5e_set_rx_mode_work(struct work_struct *work) { + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, + set_rx_mode_work); + struct mlx5e_eth_addr_db *ea = &priv->eth_addr; struct net_device *ndev = priv->netdev; - bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state); + bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state); bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC); bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI); bool broadcast_enabled = rx_mode_enable; @@ -679,8 +721,12 @@ void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv) bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; - if (enable_promisc) + if (enable_promisc) { mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); + if (!priv->vlan.filter_disabled) + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); + } if (enable_allmulti) mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); if (enable_broadcast) @@ -692,25 +738,18 @@ void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv) mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); if (disable_allmulti) mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); - if (disable_promisc) + if (disable_promisc) { + if (!priv->vlan.filter_disabled) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); + } ea->promisc_enabled = promisc_enabled; ea->allmulti_enabled = allmulti_enabled; ea->broadcast_enabled = broadcast_enabled; } -void mlx5e_set_rx_mode_work(struct work_struct *work) -{ - struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, - set_rx_mode_work); - - mutex_lock(&priv->state_lock); - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) - mlx5e_set_rx_mode_core(priv); - mutex_unlock(&priv->state_lock); -} - void mlx5e_init_eth_addr(struct mlx5e_priv *priv) { ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast); @@ -725,7 +764,7 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) if (!g) return -ENOMEM; - g[0].log_sz = 2; + g[0].log_sz = 3; g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, outer_headers.ethertype); @@ -833,7 +872,7 @@ static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv) mlx5_destroy_flow_table(priv->ft.vlan); } -int mlx5e_open_flow_table(struct mlx5e_priv *priv) +int mlx5e_create_flow_tables(struct mlx5e_priv *priv) { int err; @@ -845,16 +884,24 @@ int mlx5e_open_flow_table(struct mlx5e_priv *priv) if (err) goto err_destroy_main_flow_table; + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); + if (err) + goto err_destroy_vlan_flow_table; + return 0; +err_destroy_vlan_flow_table: + mlx5e_destroy_vlan_flow_table(priv); + err_destroy_main_flow_table: mlx5e_destroy_main_flow_table(priv); return err; } -void mlx5e_close_flow_table(struct mlx5e_priv *priv) +void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv) { + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); mlx5e_destroy_vlan_flow_table(priv); mlx5e_destroy_main_flow_table(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 40206da1f..59874d666 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -41,6 +41,7 @@ struct mlx5e_rq_param { struct mlx5e_sq_param { u32 sqc[MLX5_ST_SZ_DW(sqc)]; struct mlx5_wq_param wq; + u16 max_inline; }; struct mlx5e_cq_param { @@ -81,6 +82,47 @@ static void mlx5e_update_carrier_work(struct work_struct *work) mutex_unlock(&priv->state_lock); } +static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_pport_stats *s = &priv->stats.pport; + u32 *in; + u32 *out; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + + in = mlx5_vzalloc(sz); + out = mlx5_vzalloc(sz); + if (!in || !out) + goto free_out; + + MLX5_SET(ppcnt_reg, in, local_port, 1); + + MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, + sz, MLX5_REG_PPCNT, 0, 0); + memcpy(s->IEEE_802_3_counters, + MLX5_ADDR_OF(ppcnt_reg, out, counter_set), + sizeof(s->IEEE_802_3_counters)); + + MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, + sz, MLX5_REG_PPCNT, 0, 0); + memcpy(s->RFC_2863_counters, + MLX5_ADDR_OF(ppcnt_reg, out, counter_set), + sizeof(s->RFC_2863_counters)); + + MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, + sz, MLX5_REG_PPCNT, 0, 0); + memcpy(s->RFC_2819_counters, + MLX5_ADDR_OF(ppcnt_reg, out, counter_set), + sizeof(s->RFC_2819_counters)); + +free_out: + kvfree(in); + kvfree(out); +} + void mlx5e_update_stats(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -107,6 +149,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->lro_packets = 0; s->lro_bytes = 0; s->rx_csum_none = 0; + s->rx_csum_sw = 0; s->rx_wqe_err = 0; for (i = 0; i < priv->params.num_channels; i++) { rq_stats = &priv->channel[i]->rq.stats; @@ -114,9 +157,10 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->lro_packets += rq_stats->lro_packets; s->lro_bytes += rq_stats->lro_bytes; s->rx_csum_none += rq_stats->csum_none; + s->rx_csum_sw += rq_stats->csum_sw; s->rx_wqe_err += rq_stats->wqe_err; - for (j = 0; j < priv->num_tc; j++) { + for (j = 0; j < priv->params.num_tc; j++) { sq_stats = &priv->channel[i]->sq[j].stats; s->tso_packets += sq_stats->tso_packets; @@ -199,8 +243,10 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) /* Update calculated offload counters */ s->tx_csum_offload = s->tx_packets - tx_offload_none; - s->rx_csum_good = s->rx_packets - s->rx_csum_none; + s->rx_csum_good = s->rx_packets - s->rx_csum_none - + s->rx_csum_sw; + mlx5e_update_pport_counters(priv); free_out: kvfree(out); } @@ -272,6 +318,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, int err; int i; + param->wq.db_numa_node = cpu_to_node(c->cpu); + err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, &rq->wq_ctrl); if (err) @@ -304,6 +352,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, rq->netdev = c->netdev; rq->channel = c; rq->ix = c->ix; + rq->priv = c->priv; return 0; @@ -321,8 +370,7 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq) static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) { - struct mlx5e_channel *c = rq->channel; - struct mlx5e_priv *priv = c->priv; + struct mlx5e_priv *priv = rq->priv; struct mlx5_core_dev *mdev = priv->mdev; void *in; @@ -342,11 +390,11 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) memcpy(rqc, param->rqc, sizeof(param->rqc)); - MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn); + MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); MLX5_SET(rqc, rqc, flush_in_error_en, 1); MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - - PAGE_SHIFT); + MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); mlx5_fill_page_array(&rq->wq_ctrl.buf, @@ -389,11 +437,7 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) static void mlx5e_disable_rq(struct mlx5e_rq *rq) { - struct mlx5e_channel *c = rq->channel; - struct mlx5e_priv *priv = c->priv; - struct mlx5_core_dev *mdev = priv->mdev; - - mlx5_core_destroy_rq(mdev, rq->rqn); + mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn); } static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) @@ -502,6 +546,8 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, if (err) return err; + param->wq.db_numa_node = cpu_to_node(c->cpu); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); if (err) @@ -509,7 +555,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; sq->uar_map = sq->uar.map; + sq->uar_bf_map = sq->uar.bf_map; sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; + sq->max_inline = param->max_inline; err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)); if (err) @@ -518,11 +566,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, txq_ix = c->ix + tc * priv->params.num_channels; sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix); - sq->pdev = c->pdev; - sq->mkey_be = c->mkey_be; - sq->channel = c; - sq->tc = tc; - sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS; + sq->pdev = c->pdev; + sq->mkey_be = c->mkey_be; + sq->channel = c; + sq->tc = tc; + sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS; + sq->bf_budget = MLX5E_SQ_BF_BUDGET; priv->txq_to_sq_map[txq_ix] = sq; return 0; @@ -569,7 +618,6 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) memcpy(sqc, param->sqc, sizeof(param->sqc)); - MLX5_SET(sqc, sqc, user_index, sq->tc); MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]); MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn); MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); @@ -579,7 +627,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); MLX5_SET(wq, wq, uar_page, sq->uar.index); MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - - PAGE_SHIFT); + MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); mlx5_fill_page_array(&sq->wq_ctrl.buf, @@ -702,7 +750,8 @@ static int mlx5e_create_cq(struct mlx5e_channel *c, int err; u32 i; - param->wq.numa = cpu_to_node(c->cpu); + param->wq.buf_numa_node = cpu_to_node(c->cpu); + param->wq.db_numa_node = cpu_to_node(c->cpu); param->eq_ix = c->ix; err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, @@ -732,6 +781,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c, } cq->channel = c; + cq->priv = priv; return 0; } @@ -743,8 +793,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq) static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) { - struct mlx5e_channel *c = cq->channel; - struct mlx5e_priv *priv = c->priv; + struct mlx5e_priv *priv = cq->priv; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_cq *mcq = &cq->mcq; @@ -773,7 +822,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - - PAGE_SHIFT); + MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); err = mlx5_core_create_cq(mdev, mcq, in, inlen); @@ -790,8 +839,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) static void mlx5e_disable_cq(struct mlx5e_cq *cq) { - struct mlx5e_channel *c = cq->channel; - struct mlx5e_priv *priv = c->priv; + struct mlx5e_priv *priv = cq->priv; struct mlx5_core_dev *mdev = priv->mdev; mlx5_core_destroy_cq(mdev, &cq->mcq); @@ -901,13 +949,13 @@ static void mlx5e_close_sqs(struct mlx5e_channel *c) mlx5e_close_sq(&c->sq[tc]); } -static void mlx5e_build_tc_to_txq_map(struct mlx5e_channel *c, - int num_channels) +static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix) { int i; for (i = 0; i < MLX5E_MAX_NUM_TC; i++) - c->tc_to_txq_map[i] = c->ix + i * num_channels; + priv->channeltc_to_txq_map[ix][i] = + ix + i * priv->params.num_channels; } static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, @@ -929,9 +977,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->pdev = &priv->mdev->pdev->dev; c->netdev = priv->netdev; c->mkey_be = cpu_to_be32(priv->mr.key); - c->num_tc = priv->num_tc; + c->num_tc = priv->params.num_tc; - mlx5e_build_tc_to_txq_map(c, priv->params.num_channels); + mlx5e_build_channeltc_to_txq_map(priv, ix); netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); @@ -1000,7 +1048,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); MLX5_SET(wq, wq, pd, priv->pdn); - param->wq.numa = dev_to_node(&priv->mdev->pdev->dev); + param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); param->wq.linear = 1; } @@ -1014,7 +1062,8 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); MLX5_SET(wq, wq, pd, priv->pdn); - param->wq.numa = dev_to_node(&priv->mdev->pdev->dev); + param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); + param->max_inline = priv->params.tx_max_inline; } static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, @@ -1059,27 +1108,28 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv, static int mlx5e_open_channels(struct mlx5e_priv *priv) { struct mlx5e_channel_param cparam; + int nch = priv->params.num_channels; int err = -ENOMEM; int i; int j; - priv->channel = kcalloc(priv->params.num_channels, - sizeof(struct mlx5e_channel *), GFP_KERNEL); + priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *), + GFP_KERNEL); - priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc, + priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc, sizeof(struct mlx5e_sq *), GFP_KERNEL); if (!priv->channel || !priv->txq_to_sq_map) goto err_free_txq_to_sq_map; mlx5e_build_channel_param(priv, &cparam); - for (i = 0; i < priv->params.num_channels; i++) { + for (i = 0; i < nch; i++) { err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]); if (err) goto err_close_channels; } - for (j = 0; j < priv->params.num_channels; j++) { + for (j = 0; j < nch; j++) { err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq); if (err) goto err_close_channels; @@ -1109,67 +1159,73 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv) kfree(priv->channel); } -static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc) +static int mlx5e_rx_hash_fn(int hfunc) { - struct mlx5_core_dev *mdev = priv->mdev; - u32 in[MLX5_ST_SZ_DW(create_tis_in)]; - void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); + return (hfunc == ETH_RSS_HASH_TOP) ? + MLX5_RX_HASH_FN_TOEPLITZ : + MLX5_RX_HASH_FN_INVERTED_XOR8; +} - memset(in, 0, sizeof(in)); +static int mlx5e_bits_invert(unsigned long a, int size) +{ + int inv = 0; + int i; - MLX5_SET(tisc, tisc, prio, tc); - MLX5_SET(tisc, tisc, transport_domain, priv->tdn); + for (i = 0; i < size; i++) + inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i; - return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); + return inv; } -static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc) +static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc) { - mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); -} + int i; -static int mlx5e_open_tises(struct mlx5e_priv *priv) -{ - int num_tc = priv->num_tc; - int err; - int tc; + for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) { + int ix = i; - for (tc = 0; tc < num_tc; tc++) { - err = mlx5e_open_tis(priv, tc); - if (err) - goto err_close_tises; + if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR) + ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE); + + ix = priv->params.indirection_rqt[ix]; + ix = ix % priv->params.num_channels; + MLX5_SET(rqtc, rqtc, rq_num[i], + test_bit(MLX5E_STATE_OPENED, &priv->state) ? + priv->channel[ix]->rq.rqn : + priv->drop_rq.rqn); } +} - return 0; +static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc, + enum mlx5e_rqt_ix rqt_ix) +{ -err_close_tises: - for (tc--; tc >= 0; tc--) - mlx5e_close_tis(priv, tc); + switch (rqt_ix) { + case MLX5E_INDIRECTION_RQT: + mlx5e_fill_indir_rqt_rqns(priv, rqtc); - return err; -} + break; -static void mlx5e_close_tises(struct mlx5e_priv *priv) -{ - int num_tc = priv->num_tc; - int tc; + default: /* MLX5E_SINGLE_RQ_RQT */ + MLX5_SET(rqtc, rqtc, rq_num[0], + test_bit(MLX5E_STATE_OPENED, &priv->state) ? + priv->channel[0]->rq.rqn : + priv->drop_rq.rqn); - for (tc = 0; tc < num_tc; tc++) - mlx5e_close_tis(priv, tc); + break; + } } -static int mlx5e_open_rqt(struct mlx5e_priv *priv) +static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) { struct mlx5_core_dev *mdev = priv->mdev; u32 *in; - u32 out[MLX5_ST_SZ_DW(create_rqt_out)]; void *rqtc; int inlen; - int err; int sz; - int i; + int err; - sz = 1 << priv->params.rx_hash_log_tbl_sz; + sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE; inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; in = mlx5_vzalloc(inlen); @@ -1181,198 +1237,101 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv) MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); MLX5_SET(rqtc, rqtc, rqt_max_size, sz); - for (i = 0; i < sz; i++) { - int ix = i % priv->params.num_channels; - - MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn); - } - - MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); + mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix); - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out)); - if (!err) - priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn); + err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]); kvfree(in); return err; } -static void mlx5e_close_rqt(struct mlx5e_priv *priv) +int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) { - u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)]; + struct mlx5_core_dev *mdev = priv->mdev; + u32 *in; + void *rqtc; + int inlen; + int sz; + int err; - memset(in, 0, sizeof(in)); + sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE; - MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); - MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn); + inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; - mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out, - sizeof(out)); -} + rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx); -static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) -{ - void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); + MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); - MLX5_SET(tirc, tirc, transport_domain, priv->tdn); + mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix); -#define ROUGH_MAX_L2_L3_HDR_SZ 256 + MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1); -#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ - MLX5_HASH_FIELD_SEL_DST_IP) - -#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\ - MLX5_HASH_FIELD_SEL_DST_IP |\ - MLX5_HASH_FIELD_SEL_L4_SPORT |\ - MLX5_HASH_FIELD_SEL_L4_DPORT) - - if (priv->params.lro_en) { - MLX5_SET(tirc, tirc, lro_enable_mask, - MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | - MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); - MLX5_SET(tirc, tirc, lro_max_ip_payload_size, - (priv->params.lro_wqe_sz - - ROUGH_MAX_L2_L3_HDR_SZ) >> 8); - MLX5_SET(tirc, tirc, lro_timeout_period_usecs, - MLX5_CAP_ETH(priv->mdev, - lro_timer_supported_periods[3])); - } + err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen); - switch (tt) { - case MLX5E_TT_ANY: - MLX5_SET(tirc, tirc, disp_type, - MLX5_TIRC_DISP_TYPE_DIRECT); - MLX5_SET(tirc, tirc, inline_rqn, - priv->channel[0]->rq.rqn); - break; - default: - MLX5_SET(tirc, tirc, disp_type, - MLX5_TIRC_DISP_TYPE_INDIRECT); - MLX5_SET(tirc, tirc, indirect_table, - priv->rqtn); - MLX5_SET(tirc, tirc, rx_hash_fn, - MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ); - MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); - netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc, - rx_hash_toeplitz_key), - MLX5_FLD_SZ_BYTES(tirc, - rx_hash_toeplitz_key)); - break; - } + kvfree(in); - switch (tt) { - case MLX5E_TT_IPV4_TCP: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV4); - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, - MLX5_L4_PROT_TYPE_TCP); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_ALL); - break; + return err; +} - case MLX5E_TT_IPV6_TCP: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV6); - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, - MLX5_L4_PROT_TYPE_TCP); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_ALL); - break; +static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) +{ + mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]); +} - case MLX5E_TT_IPV4_UDP: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV4); - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, - MLX5_L4_PROT_TYPE_UDP); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_ALL); - break; +static void mlx5e_redirect_rqts(struct mlx5e_priv *priv) +{ + mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT); + mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT); +} - case MLX5E_TT_IPV6_UDP: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV6); - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, - MLX5_L4_PROT_TYPE_UDP); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_ALL); - break; +static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) +{ + if (!priv->params.lro_en) + return; - case MLX5E_TT_IPV4: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV4); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP); - break; +#define ROUGH_MAX_L2_L3_HDR_SZ 256 - case MLX5E_TT_IPV6: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV6); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP); - break; - } + MLX5_SET(tirc, tirc, lro_enable_mask, + MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | + MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); + MLX5_SET(tirc, tirc, lro_max_ip_payload_size, + (priv->params.lro_wqe_sz - + ROUGH_MAX_L2_L3_HDR_SZ) >> 8); + MLX5_SET(tirc, tirc, lro_timeout_period_usecs, + MLX5_CAP_ETH(priv->mdev, + lro_timer_supported_periods[2])); } -static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt) +static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) { struct mlx5_core_dev *mdev = priv->mdev; - u32 *in; + + void *in; void *tirc; int inlen; int err; - inlen = MLX5_ST_SZ_BYTES(create_tir_in); + inlen = MLX5_ST_SZ_BYTES(modify_tir_in); in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; - tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + MLX5_SET(modify_tir_in, in, bitmask.lro, 1); + tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); - mlx5e_build_tir_ctx(priv, tirc, tt); + mlx5e_build_tir_ctx_lro(tirc, priv); - err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]); + err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen); kvfree(in); return err; } -static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt) -{ - mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); -} - -static int mlx5e_open_tirs(struct mlx5e_priv *priv) -{ - int err; - int i; - - for (i = 0; i < MLX5E_NUM_TT; i++) { - err = mlx5e_open_tir(priv, i); - if (err) - goto err_close_tirs; - } - - return 0; - -err_close_tirs: - for (i--; i >= 0; i--) - mlx5e_close_tir(priv, i); - - return err; -} - -static void mlx5e_close_tirs(struct mlx5e_priv *priv) -{ - int i; - - for (i = 0; i < MLX5E_NUM_TT; i++) - mlx5e_close_tir(priv, i); -} - static int mlx5e_set_dev_port_mtu(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -1400,6 +1359,8 @@ int mlx5e_open_locked(struct net_device *netdev) int num_txqs; int err; + set_bit(MLX5E_STATE_OPENED, &priv->state); + num_txqs = priv->params.num_channels * priv->params.num_tc; netif_set_real_num_tx_queues(netdev, num_txqs); netif_set_real_num_rx_queues(netdev, priv->params.num_channels); @@ -1408,74 +1369,19 @@ int mlx5e_open_locked(struct net_device *netdev) if (err) return err; - err = mlx5e_open_tises(priv); - if (err) { - netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n", - __func__, err); - return err; - } - err = mlx5e_open_channels(priv); if (err) { netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n", __func__, err); - goto err_close_tises; - } - - err = mlx5e_open_rqt(priv); - if (err) { - netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n", - __func__, err); - goto err_close_channels; - } - - err = mlx5e_open_tirs(priv); - if (err) { - netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n", - __func__, err); - goto err_close_rqls; - } - - err = mlx5e_open_flow_table(priv); - if (err) { - netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n", - __func__, err); - goto err_close_tirs; - } - - err = mlx5e_add_all_vlan_rules(priv); - if (err) { - netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n", - __func__, err); - goto err_close_flow_table; + return err; } - mlx5e_init_eth_addr(priv); - - set_bit(MLX5E_STATE_OPENED, &priv->state); - mlx5e_update_carrier(priv); - mlx5e_set_rx_mode_core(priv); + mlx5e_redirect_rqts(priv); schedule_delayed_work(&priv->update_stats_work, 0); - return 0; - -err_close_flow_table: - mlx5e_close_flow_table(priv); - -err_close_tirs: - mlx5e_close_tirs(priv); - -err_close_rqls: - mlx5e_close_rqt(priv); - -err_close_channels: - mlx5e_close_channels(priv); - -err_close_tises: - mlx5e_close_tises(priv); - return err; + return 0; } static int mlx5e_open(struct net_device *netdev) @@ -1496,14 +1402,9 @@ int mlx5e_close_locked(struct net_device *netdev) clear_bit(MLX5E_STATE_OPENED, &priv->state); - mlx5e_set_rx_mode_core(priv); - mlx5e_del_all_vlan_rules(priv); + mlx5e_redirect_rqts(priv); netif_carrier_off(priv->netdev); - mlx5e_close_flow_table(priv); - mlx5e_close_tirs(priv); - mlx5e_close_rqt(priv); mlx5e_close_channels(priv); - mlx5e_close_tises(priv); return 0; } @@ -1520,50 +1421,365 @@ static int mlx5e_close(struct net_device *netdev) return err; } -int mlx5e_update_priv_params(struct mlx5e_priv *priv, - struct mlx5e_params *new_params) +static int mlx5e_create_drop_rq(struct mlx5e_priv *priv, + struct mlx5e_rq *rq, + struct mlx5e_rq_param *param) { - int err = 0; - int was_opened; - - WARN_ON(!mutex_is_locked(&priv->state_lock)); + struct mlx5_core_dev *mdev = priv->mdev; + void *rqc = param->rqc; + void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); + int err; - was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - if (was_opened) - mlx5e_close_locked(priv->netdev); + param->wq.db_numa_node = param->wq.buf_numa_node; - priv->params = *new_params; + err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, + &rq->wq_ctrl); + if (err) + return err; - if (was_opened) - err = mlx5e_open_locked(priv->netdev); + rq->priv = priv; - return err; + return 0; } -static struct rtnl_link_stats64 * -mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) +static int mlx5e_create_drop_cq(struct mlx5e_priv *priv, + struct mlx5e_cq *cq, + struct mlx5e_cq_param *param) { - struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_vport_stats *vstats = &priv->stats.vport; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_core_cq *mcq = &cq->mcq; + int eqn_not_used; + int irqn; + int err; - stats->rx_packets = vstats->rx_packets; - stats->rx_bytes = vstats->rx_bytes; - stats->tx_packets = vstats->tx_packets; - stats->tx_bytes = vstats->tx_bytes; - stats->multicast = vstats->rx_multicast_packets + - vstats->tx_multicast_packets; - stats->tx_errors = vstats->tx_error_packets; - stats->rx_errors = vstats->rx_error_packets; - stats->tx_dropped = vstats->tx_queue_dropped; - stats->rx_crc_errors = 0; - stats->rx_length_errors = 0; + err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, + &cq->wq_ctrl); + if (err) + return err; - return stats; -} + mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); -static void mlx5e_set_rx_mode(struct net_device *dev) -{ - struct mlx5e_priv *priv = netdev_priv(dev); + mcq->cqe_sz = 64; + mcq->set_ci_db = cq->wq_ctrl.db.db; + mcq->arm_db = cq->wq_ctrl.db.db + 1; + *mcq->set_ci_db = 0; + *mcq->arm_db = 0; + mcq->vector = param->eq_ix; + mcq->comp = mlx5e_completion_event; + mcq->event = mlx5e_cq_error_event; + mcq->irqn = irqn; + mcq->uar = &priv->cq_uar; + + cq->priv = priv; + + return 0; +} + +static int mlx5e_open_drop_rq(struct mlx5e_priv *priv) +{ + struct mlx5e_cq_param cq_param; + struct mlx5e_rq_param rq_param; + struct mlx5e_rq *rq = &priv->drop_rq; + struct mlx5e_cq *cq = &priv->drop_rq.cq; + int err; + + memset(&cq_param, 0, sizeof(cq_param)); + memset(&rq_param, 0, sizeof(rq_param)); + mlx5e_build_rx_cq_param(priv, &cq_param); + mlx5e_build_rq_param(priv, &rq_param); + + err = mlx5e_create_drop_cq(priv, cq, &cq_param); + if (err) + return err; + + err = mlx5e_enable_cq(cq, &cq_param); + if (err) + goto err_destroy_cq; + + err = mlx5e_create_drop_rq(priv, rq, &rq_param); + if (err) + goto err_disable_cq; + + err = mlx5e_enable_rq(rq, &rq_param); + if (err) + goto err_destroy_rq; + + return 0; + +err_destroy_rq: + mlx5e_destroy_rq(&priv->drop_rq); + +err_disable_cq: + mlx5e_disable_cq(&priv->drop_rq.cq); + +err_destroy_cq: + mlx5e_destroy_cq(&priv->drop_rq.cq); + + return err; +} + +static void mlx5e_close_drop_rq(struct mlx5e_priv *priv) +{ + mlx5e_disable_rq(&priv->drop_rq); + mlx5e_destroy_rq(&priv->drop_rq); + mlx5e_disable_cq(&priv->drop_rq.cq); + mlx5e_destroy_cq(&priv->drop_rq.cq); +} + +static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(create_tis_in)]; + void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); + + memset(in, 0, sizeof(in)); + + MLX5_SET(tisc, tisc, prio, tc); + MLX5_SET(tisc, tisc, transport_domain, priv->tdn); + + return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); +} + +static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc) +{ + mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); +} + +static int mlx5e_create_tises(struct mlx5e_priv *priv) +{ + int err; + int tc; + + for (tc = 0; tc < priv->params.num_tc; tc++) { + err = mlx5e_create_tis(priv, tc); + if (err) + goto err_close_tises; + } + + return 0; + +err_close_tises: + for (tc--; tc >= 0; tc--) + mlx5e_destroy_tis(priv, tc); + + return err; +} + +static void mlx5e_destroy_tises(struct mlx5e_priv *priv) +{ + int tc; + + for (tc = 0; tc < priv->params.num_tc; tc++) + mlx5e_destroy_tis(priv, tc); +} + +static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) +{ + void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); + + MLX5_SET(tirc, tirc, transport_domain, priv->tdn); + +#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP) + +#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP |\ + MLX5_HASH_FIELD_SEL_L4_SPORT |\ + MLX5_HASH_FIELD_SEL_L4_DPORT) + +#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP |\ + MLX5_HASH_FIELD_SEL_IPSEC_SPI) + + mlx5e_build_tir_ctx_lro(tirc, priv); + + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); + + switch (tt) { + case MLX5E_TT_ANY: + MLX5_SET(tirc, tirc, indirect_table, + priv->rqtn[MLX5E_SINGLE_RQ_RQT]); + MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); + break; + default: + MLX5_SET(tirc, tirc, indirect_table, + priv->rqtn[MLX5E_INDIRECTION_RQT]); + MLX5_SET(tirc, tirc, rx_hash_fn, + mlx5e_rx_hash_fn(priv->params.rss_hfunc)); + if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { + void *rss_key = MLX5_ADDR_OF(tirc, tirc, + rx_hash_toeplitz_key); + size_t len = MLX5_FLD_SZ_BYTES(tirc, + rx_hash_toeplitz_key); + + MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); + memcpy(rss_key, priv->params.toeplitz_hash_key, len); + } + break; + } + + switch (tt) { + case MLX5E_TT_IPV4_TCP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_TCP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_L4PORTS); + break; + + case MLX5E_TT_IPV6_TCP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_TCP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_L4PORTS); + break; + + case MLX5E_TT_IPV4_UDP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_UDP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_L4PORTS); + break; + + case MLX5E_TT_IPV6_UDP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_UDP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_L4PORTS); + break; + + case MLX5E_TT_IPV4_IPSEC_AH: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_IPSEC_SPI); + break; + + case MLX5E_TT_IPV6_IPSEC_AH: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_IPSEC_SPI); + break; + + case MLX5E_TT_IPV4_IPSEC_ESP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_IPSEC_SPI); + break; + + case MLX5E_TT_IPV6_IPSEC_ESP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_IPSEC_SPI); + break; + + case MLX5E_TT_IPV4: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP); + break; + + case MLX5E_TT_IPV6: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP); + break; + } +} + +static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 *in; + void *tirc; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(create_tir_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + + mlx5e_build_tir_ctx(priv, tirc, tt); + + err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]); + + kvfree(in); + + return err; +} + +static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt) +{ + mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); +} + +static int mlx5e_create_tirs(struct mlx5e_priv *priv) +{ + int err; + int i; + + for (i = 0; i < MLX5E_NUM_TT; i++) { + err = mlx5e_create_tir(priv, i); + if (err) + goto err_destroy_tirs; + } + + return 0; + +err_destroy_tirs: + for (i--; i >= 0; i--) + mlx5e_destroy_tir(priv, i); + + return err; +} + +static void mlx5e_destroy_tirs(struct mlx5e_priv *priv) +{ + int i; + + for (i = 0; i < MLX5E_NUM_TT; i++) + mlx5e_destroy_tir(priv, i); +} + +static struct rtnl_link_stats64 * +mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_vport_stats *vstats = &priv->stats.vport; + + stats->rx_packets = vstats->rx_packets; + stats->rx_bytes = vstats->rx_bytes; + stats->tx_packets = vstats->tx_packets; + stats->tx_bytes = vstats->tx_bytes; + stats->multicast = vstats->rx_multicast_packets + + vstats->tx_multicast_packets; + stats->tx_errors = vstats->tx_error_packets; + stats->rx_errors = vstats->rx_error_packets; + stats->tx_dropped = vstats->tx_queue_dropped; + stats->rx_crc_errors = 0; + stats->rx_length_errors = 0; + + return stats; +} + +static void mlx5e_set_rx_mode(struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); schedule_work(&priv->set_rx_mode_work); } @@ -1589,20 +1805,26 @@ static int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) { struct mlx5e_priv *priv = netdev_priv(netdev); + int err = 0; netdev_features_t changes = features ^ netdev->features; - struct mlx5e_params new_params; - bool update_params = false; mutex_lock(&priv->state_lock); - new_params = priv->params; if (changes & NETIF_F_LRO) { - new_params.lro_en = !!(features & NETIF_F_LRO); - update_params = true; + bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + + if (was_opened) + mlx5e_close_locked(priv->netdev); + + priv->params.lro_en = !!(features & NETIF_F_LRO); + mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP); + mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP); + + if (was_opened) + err = mlx5e_open_locked(priv->netdev); } - if (update_params) - mlx5e_update_priv_params(priv, &new_params); + mutex_unlock(&priv->state_lock); if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) { if (features & NETIF_F_HW_VLAN_CTAG_FILTER) @@ -1611,8 +1833,6 @@ static int mlx5e_set_features(struct net_device *netdev, mlx5e_disable_vlan_filter(priv); } - mutex_unlock(&priv->state_lock); - return 0; } @@ -1620,8 +1840,9 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + bool was_opened; int max_mtu; - int err; + int err = 0; mlx5_query_port_max_mtu(mdev, &max_mtu, 1); @@ -1633,8 +1854,16 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) } mutex_lock(&priv->state_lock); + + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened) + mlx5e_close_locked(netdev); + netdev->mtu = new_mtu; - err = mlx5e_update_priv_params(priv, &priv->params); + + if (was_opened) + err = mlx5e_open_locked(netdev); + mutex_unlock(&priv->state_lock); return err; @@ -1673,11 +1902,21 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) return 0; } +u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) +{ + int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; + + return bf_buf_size - + sizeof(struct mlx5e_tx_wqe) + + 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/; +} + static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, struct net_device *netdev, - int num_comp_vectors) + int num_channels) { struct mlx5e_priv *priv = netdev_priv(netdev); + int i; priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; @@ -1691,24 +1930,25 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; priv->params.tx_cq_moderation_pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; + priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); priv->params.min_rx_wqes = MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; - priv->params.rx_hash_log_tbl_sz = - (order_base_2(num_comp_vectors) > - MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ? - order_base_2(num_comp_vectors) : - MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ; priv->params.num_tc = 1; priv->params.default_vlan_prio = 0; + priv->params.rss_hfunc = ETH_RSS_HASH_XOR; + + netdev_rss_key_fill(priv->params.toeplitz_hash_key, + sizeof(priv->params.toeplitz_hash_key)); + + for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) + priv->params.indirection_rqt[i] = i % num_channels; - priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap); priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; priv->mdev = mdev; priv->netdev = netdev; - priv->params.num_channels = num_comp_vectors; - priv->num_tc = priv->params.num_tc; + priv->params.num_channels = num_channels; priv->default_vlan_prio = priv->params.default_vlan_prio; spin_lock_init(&priv->async_events_spinlock); @@ -1733,9 +1973,8 @@ static void mlx5e_build_netdev(struct net_device *netdev) SET_NETDEV_DEV(netdev, &mdev->pdev->dev); - if (priv->num_tc > 1) { + if (priv->params.num_tc > 1) mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue; - } netdev->netdev_ops = &mlx5e_netdev_ops; netdev->watchdog_timeo = 15 * HZ; @@ -1798,19 +2037,20 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) { struct net_device *netdev; struct mlx5e_priv *priv; - int ncv = mdev->priv.eq_table.num_comp_vectors; + int nch = min_t(int, mdev->priv.eq_table.num_comp_vectors, + MLX5E_MAX_NUM_CHANNELS); int err; if (mlx5e_check_required_hca_cap(mdev)) return NULL; - netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv); + netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch); if (!netdev) { mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); return NULL; } - mlx5e_build_netdev_priv(mdev, netdev, ncv); + mlx5e_build_netdev_priv(mdev, netdev, nch); mlx5e_build_netdev(netdev); netif_carrier_off(netdev); @@ -1819,43 +2059,95 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) err = mlx5_alloc_map_uar(mdev, &priv->cq_uar); if (err) { - netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n", - __func__, err); + mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); goto err_free_netdev; } err = mlx5_core_alloc_pd(mdev, &priv->pdn); if (err) { - netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n", - __func__, err); + mlx5_core_err(mdev, "alloc pd failed, %d\n", err); goto err_unmap_free_uar; } err = mlx5_alloc_transport_domain(mdev, &priv->tdn); if (err) { - netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n", - __func__, err); + mlx5_core_err(mdev, "alloc td failed, %d\n", err); goto err_dealloc_pd; } err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); if (err) { - netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n", - __func__, err); + mlx5_core_err(mdev, "create mkey failed, %d\n", err); goto err_dealloc_transport_domain; } - err = register_netdev(netdev); + err = mlx5e_create_tises(priv); if (err) { - netdev_err(netdev, "%s: register_netdev failed, %d\n", - __func__, err); + mlx5_core_warn(mdev, "create tises failed, %d\n", err); goto err_destroy_mkey; } + err = mlx5e_open_drop_rq(priv); + if (err) { + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); + goto err_destroy_tises; + } + + err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT); + if (err) { + mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err); + goto err_close_drop_rq; + } + + err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT); + if (err) { + mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err); + goto err_destroy_rqt_indir; + } + + err = mlx5e_create_tirs(priv); + if (err) { + mlx5_core_warn(mdev, "create tirs failed, %d\n", err); + goto err_destroy_rqt_single; + } + + err = mlx5e_create_flow_tables(priv); + if (err) { + mlx5_core_warn(mdev, "create flow tables failed, %d\n", err); + goto err_destroy_tirs; + } + + mlx5e_init_eth_addr(priv); + + err = register_netdev(netdev); + if (err) { + mlx5_core_err(mdev, "register_netdev failed, %d\n", err); + goto err_destroy_flow_tables; + } + mlx5e_enable_async_events(priv); + schedule_work(&priv->set_rx_mode_work); return priv; +err_destroy_flow_tables: + mlx5e_destroy_flow_tables(priv); + +err_destroy_tirs: + mlx5e_destroy_tirs(priv); + +err_destroy_rqt_single: + mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT); + +err_destroy_rqt_indir: + mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT); + +err_close_drop_rq: + mlx5e_close_drop_rq(priv); + +err_destroy_tises: + mlx5e_destroy_tises(priv); + err_destroy_mkey: mlx5_core_destroy_mkey(mdev, &priv->mr); @@ -1879,13 +2171,22 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) struct mlx5e_priv *priv = vpriv; struct net_device *netdev = priv->netdev; + set_bit(MLX5E_STATE_DESTROYING, &priv->state); + + schedule_work(&priv->set_rx_mode_work); + mlx5e_disable_async_events(priv); + flush_scheduled_work(); unregister_netdev(netdev); + mlx5e_destroy_flow_tables(priv); + mlx5e_destroy_tirs(priv); + mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT); + mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT); + mlx5e_close_drop_rq(priv); + mlx5e_destroy_tises(priv); mlx5_core_destroy_mkey(priv->mdev, &priv->mr); mlx5_dealloc_transport_domain(priv->mdev, priv->tdn); mlx5_core_dealloc_pd(priv->mdev, priv->pdn); mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); - mlx5e_disable_async_events(priv); - flush_scheduled_work(); free_netdev(netdev); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 9a9374131..cf0098596 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -111,10 +111,12 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe) tcp = (struct tcphdr *)(skb->data + ETH_HLEN + sizeof(struct iphdr)); ipv6 = NULL; + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; } else { tcp = (struct tcphdr *)(skb->data + ETH_HLEN + sizeof(struct ipv6hdr)); ipv4 = NULL; + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; } if (get_cqe_lro_tcppsh(cqe)) @@ -149,6 +151,38 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); } +static inline bool is_first_ethertype_ip(struct sk_buff *skb) +{ + __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto; + + return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); +} + +static inline void mlx5e_handle_csum(struct net_device *netdev, + struct mlx5_cqe64 *cqe, + struct mlx5e_rq *rq, + struct sk_buff *skb) +{ + if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) + goto csum_none; + + if (likely(cqe->hds_ip_ext & CQE_L4_OK)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if (is_first_ethertype_ip(skb)) { + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = csum_unfold((__force __sum16)cqe->check_sum); + rq->stats.csum_sw++; + } else { + goto csum_none; + } + + return; + +csum_none: + skb->ip_summed = CHECKSUM_NONE; + rq->stats.csum_none++; +} + static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, struct sk_buff *skb) @@ -162,20 +196,12 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; if (lro_num_seg > 1) { mlx5e_lro_update_hdr(skb, cqe); - skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); rq->stats.lro_packets++; rq->stats.lro_bytes += cqe_bcnt; } - if (likely(netdev->features & NETIF_F_RXCSUM) && - (cqe->hds_ip_ext & CQE_L2_OK) && - (cqe->hds_ip_ext & CQE_L3_OK) && - (cqe->hds_ip_ext & CQE_L4_OK)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else { - skb->ip_summed = CHECKSUM_NONE; - rq->stats.csum_none++; - } + mlx5e_handle_csum(netdev, cqe, rq, skb); skb->protocol = eth_type_trans(skb, netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 03f28f438..b73672f32 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -57,7 +57,7 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw) if (notify_hw) { cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; - mlx5e_tx_notify_hw(sq, wqe); + mlx5e_tx_notify_hw(sq, wqe, 0); } } @@ -106,13 +106,21 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, priv->default_vlan_prio; int tc = netdev_get_prio_tc_map(dev, up); - return priv->channel[channel_ix]->tc_to_txq_map[tc]; + return priv->channeltc_to_txq_map[channel_ix][tc]; } static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, - struct sk_buff *skb) + struct sk_buff *skb, bool bf) { -#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */ + /* Some NIC TX decisions, e.g loopback, are based on the packet + * headers and occur before the data gather. + * Therefore these headers must be copied into the WQE + */ +#define MLX5E_MIN_INLINE (ETH_HLEN + 2/*vlan tag*/) + + if (bf && (skb_headlen(skb) <= sq->max_inline)) + return skb_headlen(skb); + return MLX5E_MIN_INLINE; } @@ -129,6 +137,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) u8 opcode = MLX5_OPCODE_SEND; dma_addr_t dma_addr = 0; + bool bf = false; u16 headlen; u16 ds_cnt; u16 ihs; @@ -141,6 +150,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) else sq->stats.csum_offload_none++; + if (sq->cc != sq->prev_cc) { + sq->prev_cc = sq->cc; + sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0; + } + if (skb_is_gso(skb)) { u32 payload_len; @@ -153,7 +167,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) sq->stats.tso_packets++; sq->stats.tso_bytes += payload_len; } else { - ihs = mlx5e_get_inline_hdr_size(sq, skb); + bf = sq->bf_budget && + !skb->xmit_more && + !skb_shinfo(skb)->nr_frags; + ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); } @@ -225,14 +242,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) } if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { + int bf_sz = 0; + + if (bf && sq->uar_bf_map) + bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3; + cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; - mlx5e_tx_notify_hw(sq, wqe); + mlx5e_tx_notify_hw(sq, wqe, bf_sz); } /* fill sq edge with nops to avoid wqe wrap around */ while ((sq->pc & wq->sz_m1) > sq->edge) mlx5e_send_nop(sq, false); + sq->bf_budget = bf ? sq->bf_budget - 1 : 0; + sq->stats.packets++; return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 06e3e1e54..03aabdd79 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -457,7 +457,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) struct mlx5_priv *priv = &mdev->priv; struct msix_entry *msix = priv->msix_arr; int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; - int numa_node = dev_to_node(&mdev->pdev->dev); + int numa_node = priv->numa_node; int err; if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { @@ -656,6 +656,22 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) } #endif +static int map_bf_area(struct mlx5_core_dev *dev) +{ + resource_size_t bf_start = pci_resource_start(dev->pdev, 0); + resource_size_t bf_len = pci_resource_len(dev->pdev, 0); + + dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len); + + return dev->priv.bf_mapping ? 0 : -ENOMEM; +} + +static void unmap_bf_area(struct mlx5_core_dev *dev) +{ + if (dev->priv.bf_mapping) + io_mapping_free(dev->priv.bf_mapping); +} + static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) { struct mlx5_priv *priv = &dev->priv; @@ -670,6 +686,10 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) INIT_LIST_HEAD(&priv->pgdir_list); spin_lock_init(&priv->mkey_lock); + mutex_init(&priv->alloc_mutex); + + priv->numa_node = dev_to_node(&dev->pdev->dev); + priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); if (!priv->dbg_root) return -ENOMEM; @@ -806,10 +826,13 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) goto err_stop_eqs; } + if (map_bf_area(dev)) + dev_err(&pdev->dev, "Failed to map blue flame area\n"); + err = mlx5_irq_set_affinity_hints(dev); if (err) { dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n"); - goto err_free_comp_eqs; + goto err_unmap_bf_area; } MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); @@ -821,7 +844,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) return 0; -err_free_comp_eqs: +err_unmap_bf_area: + unmap_bf_area(dev); + free_comp_eqs(dev); err_stop_eqs: @@ -879,6 +904,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev) mlx5_cleanup_qp_table(dev); mlx5_cleanup_cq_table(dev); mlx5_irq_clear_affinity_hints(dev); + unmap_bf_area(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); mlx5_free_uuars(dev, &priv->uuari); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index fc88ecaec..566a70488 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -73,7 +73,12 @@ static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in, int in_size, u32 *out, int out_size) { - mlx5_cmd_exec(dev, in, in_size, out, out_size); + int err; + + err = mlx5_cmd_exec(dev, in, in_size, out, out_size); + if (err) + return err; + return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 70147999f..3b9480fa3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -216,22 +216,25 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, } EXPORT_SYMBOL_GPL(mlx5_set_port_proto); -int mlx5_set_port_status(struct mlx5_core_dev *dev, - enum mlx5_port_status status) +int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, + enum mlx5_port_status status) { u32 in[MLX5_ST_SZ_DW(paos_reg)]; u32 out[MLX5_ST_SZ_DW(paos_reg)]; memset(in, 0, sizeof(in)); + MLX5_SET(paos_reg, in, local_port, 1); MLX5_SET(paos_reg, in, admin_status, status); MLX5_SET(paos_reg, in, ase, 1); return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PAOS, 0, 1); } +EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status); -int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status) +int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, + enum mlx5_port_status *status) { u32 in[MLX5_ST_SZ_DW(paos_reg)]; u32 out[MLX5_ST_SZ_DW(paos_reg)]; @@ -239,14 +242,17 @@ int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status) memset(in, 0, sizeof(in)); + MLX5_SET(paos_reg, in, local_port, 1); + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PAOS, 0, 0); if (err) return err; - *status = MLX5_GET(paos_reg, out, oper_status); + *status = MLX5_GET(paos_reg, out, admin_status); return err; } +EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status); static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, int *max_mtu, int *oper_mtu, u8 port) @@ -305,7 +311,7 @@ static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc, int err; memset(in, 0, sizeof(in)); - MLX5_SET(ptys_reg, in, local_port, local_port); + MLX5_SET(pvlc_reg, in, local_port, local_port); err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc, pvlc_size, MLX5_REG_PVLC, 0, 0); @@ -328,3 +334,45 @@ int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, return 0; } EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap); + +int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause) +{ + u32 in[MLX5_ST_SZ_DW(pfcc_reg)]; + u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; + int err; + + memset(in, 0, sizeof(in)); + MLX5_SET(pfcc_reg, in, local_port, 1); + MLX5_SET(pfcc_reg, in, pptx, tx_pause); + MLX5_SET(pfcc_reg, in, pprx, rx_pause); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PFCC, 0, 1); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_set_port_pause); + +int mlx5_query_port_pause(struct mlx5_core_dev *dev, + u32 *rx_pause, u32 *tx_pause) +{ + u32 in[MLX5_ST_SZ_DW(pfcc_reg)]; + u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; + int err; + + memset(in, 0, sizeof(in)); + MLX5_SET(pfcc_reg, in, local_port, 1); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PFCC, 0, 0); + if (err) + return err; + + if (rx_pause) + *rx_pause = MLX5_GET(pfcc_reg, out, pprx); + + if (tx_pause) + *tx_pause = MLX5_GET(pfcc_reg, out, pptx); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_pause); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index 8d98b0302..b4c87c7b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -163,6 +163,18 @@ int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, return err; } +int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, + int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_tir_out)]; + + MLX5_SET(modify_tir_in, in, tirn, tirn); + MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); +} + void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn) { u32 in[MLX5_ST_SZ_DW(destroy_tir_out)]; @@ -358,3 +370,44 @@ int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm) return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); } + +int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rqtn) +{ + u32 out[MLX5_ST_SZ_DW(create_rqt_out)]; + int err; + + MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *rqtn = MLX5_GET(create_rqt_out, out, rqtn); + + return err; +} + +int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, + int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_rqt_out)]; + + MLX5_SET(modify_rqt_in, in, rqtn, rqtn); + MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); +} + +void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); + MLX5_SET(destroy_rqt_in, in, rqtn, rqtn); + + mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h index f9ef24471..74cae5143 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h @@ -45,6 +45,8 @@ int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen); void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn); +int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, + int inlen); void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn); @@ -61,4 +63,10 @@ int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn); int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); +int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rqtn); +int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, + int inlen); +void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); + #endif /* __TRANSOBJ_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 9ef85873c..eb05c845e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -32,6 +32,7 @@ #include #include +#include #include #include #include "mlx5_core.h" @@ -246,6 +247,10 @@ int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) goto err_free_uar; } + if (mdev->priv.bf_mapping) + uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping, + uar->index << PAGE_SHIFT); + return 0; err_free_uar: @@ -257,6 +262,7 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar); void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) { + io_mapping_unmap(uar->bf_map); iounmap(uar->map); mlx5_cmd_free_uar(mdev, uar->index); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index 838841158..ce21ee5b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -73,13 +73,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; - err = mlx5_db_alloc(mdev, &wq_ctrl->db); + err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); return err; } - err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf); + err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq), + &wq_ctrl->buf, param->buf_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); goto err_db_free; @@ -108,13 +109,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size); wq->sz_m1 = (1 << wq->log_sz) - 1; - err = mlx5_db_alloc(mdev, &wq_ctrl->db); + err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); return err; } - err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf); + err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq), + &wq_ctrl->buf, param->buf_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); goto err_db_free; @@ -144,7 +146,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; - err = mlx5_db_alloc(mdev, &wq_ctrl->db); + err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index e0ddd69fb..6c2a8f950 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -37,7 +37,8 @@ struct mlx5_wq_param { int linear; - int numa; + int buf_numa_node; + int db_numa_node; }; struct mlx5_wq_ctrl { diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig new file mode 100644 index 000000000..2941d9c5a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -0,0 +1,32 @@ +# +# Mellanox switch drivers configuration +# + +config MLXSW_CORE + tristate "Mellanox Technologies Switch ASICs support" + ---help--- + This driver supports Mellanox Technologies Switch ASICs family. + + To compile this driver as a module, choose M here: the + module will be called mlxsw_core. + +config MLXSW_PCI + tristate "PCI bus implementation for Mellanox Technologies Switch ASICs" + depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE + default m + ---help--- + This is PCI bus implementation for Mellanox Technologies Switch ASICs. + + To compile this driver as a module, choose M here: the + module will be called mlxsw_pci. + +config MLXSW_SWITCHX2 + tristate "Mellanox Technologies SwitchX-2 support" + depends on MLXSW_CORE && NET_SWITCHDEV + default m + ---help--- + This driver supports Mellanox Technologies SwitchX-2 Ethernet + Switch ASICs. + + To compile this driver as a module, choose M here: the + module will be called mlxsw_switchx2. diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile new file mode 100644 index 000000000..0a05f65ee --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o +mlxsw_core-objs := core.o +obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o +mlxsw_pci-objs := pci.o +obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o +mlxsw_switchx2-objs := switchx2.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h new file mode 100644 index 000000000..770db17eb --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -0,0 +1,1090 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/cmd.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko + * Copyright (c) 2015 Ido Schimmel + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_CMD_H +#define _MLXSW_CMD_H + +#include "item.h" + +#define MLXSW_CMD_MBOX_SIZE 4096 + +static inline char *mlxsw_cmd_mbox_alloc(void) +{ + return kzalloc(MLXSW_CMD_MBOX_SIZE, GFP_KERNEL); +} + +static inline void mlxsw_cmd_mbox_free(char *mbox) +{ + kfree(mbox); +} + +static inline void mlxsw_cmd_mbox_zero(char *mbox) +{ + memset(mbox, 0, MLXSW_CMD_MBOX_SIZE); +} + +struct mlxsw_core; + +int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, + u32 in_mod, bool out_mbox_direct, + char *in_mbox, size_t in_mbox_size, + char *out_mbox, size_t out_mbox_size); + +static inline int mlxsw_cmd_exec_in(struct mlxsw_core *mlxsw_core, u16 opcode, + u8 opcode_mod, u32 in_mod, char *in_mbox, + size_t in_mbox_size) +{ + return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false, + in_mbox, in_mbox_size, NULL, 0); +} + +static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode, + u8 opcode_mod, u32 in_mod, + bool out_mbox_direct, + char *out_mbox, size_t out_mbox_size) +{ + return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, + out_mbox_direct, NULL, 0, + out_mbox, out_mbox_size); +} + +static inline int mlxsw_cmd_exec_none(struct mlxsw_core *mlxsw_core, u16 opcode, + u8 opcode_mod, u32 in_mod) +{ + return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false, + NULL, 0, NULL, 0); +} + +enum mlxsw_cmd_opcode { + MLXSW_CMD_OPCODE_QUERY_FW = 0x004, + MLXSW_CMD_OPCODE_QUERY_BOARDINFO = 0x006, + MLXSW_CMD_OPCODE_QUERY_AQ_CAP = 0x003, + MLXSW_CMD_OPCODE_MAP_FA = 0xFFF, + MLXSW_CMD_OPCODE_UNMAP_FA = 0xFFE, + MLXSW_CMD_OPCODE_CONFIG_PROFILE = 0x100, + MLXSW_CMD_OPCODE_ACCESS_REG = 0x040, + MLXSW_CMD_OPCODE_SW2HW_DQ = 0x201, + MLXSW_CMD_OPCODE_HW2SW_DQ = 0x202, + MLXSW_CMD_OPCODE_2ERR_DQ = 0x01E, + MLXSW_CMD_OPCODE_QUERY_DQ = 0x022, + MLXSW_CMD_OPCODE_SW2HW_CQ = 0x016, + MLXSW_CMD_OPCODE_HW2SW_CQ = 0x017, + MLXSW_CMD_OPCODE_QUERY_CQ = 0x018, + MLXSW_CMD_OPCODE_SW2HW_EQ = 0x013, + MLXSW_CMD_OPCODE_HW2SW_EQ = 0x014, + MLXSW_CMD_OPCODE_QUERY_EQ = 0x015, +}; + +static inline const char *mlxsw_cmd_opcode_str(u16 opcode) +{ + switch (opcode) { + case MLXSW_CMD_OPCODE_QUERY_FW: + return "QUERY_FW"; + case MLXSW_CMD_OPCODE_QUERY_BOARDINFO: + return "QUERY_BOARDINFO"; + case MLXSW_CMD_OPCODE_QUERY_AQ_CAP: + return "QUERY_AQ_CAP"; + case MLXSW_CMD_OPCODE_MAP_FA: + return "MAP_FA"; + case MLXSW_CMD_OPCODE_UNMAP_FA: + return "UNMAP_FA"; + case MLXSW_CMD_OPCODE_CONFIG_PROFILE: + return "CONFIG_PROFILE"; + case MLXSW_CMD_OPCODE_ACCESS_REG: + return "ACCESS_REG"; + case MLXSW_CMD_OPCODE_SW2HW_DQ: + return "SW2HW_DQ"; + case MLXSW_CMD_OPCODE_HW2SW_DQ: + return "HW2SW_DQ"; + case MLXSW_CMD_OPCODE_2ERR_DQ: + return "2ERR_DQ"; + case MLXSW_CMD_OPCODE_QUERY_DQ: + return "QUERY_DQ"; + case MLXSW_CMD_OPCODE_SW2HW_CQ: + return "SW2HW_CQ"; + case MLXSW_CMD_OPCODE_HW2SW_CQ: + return "HW2SW_CQ"; + case MLXSW_CMD_OPCODE_QUERY_CQ: + return "QUERY_CQ"; + case MLXSW_CMD_OPCODE_SW2HW_EQ: + return "SW2HW_EQ"; + case MLXSW_CMD_OPCODE_HW2SW_EQ: + return "HW2SW_EQ"; + case MLXSW_CMD_OPCODE_QUERY_EQ: + return "QUERY_EQ"; + default: + return "*UNKNOWN*"; + } +} + +enum mlxsw_cmd_status { + /* Command execution succeeded. */ + MLXSW_CMD_STATUS_OK = 0x00, + /* Internal error (e.g. bus error) occurred while processing command. */ + MLXSW_CMD_STATUS_INTERNAL_ERR = 0x01, + /* Operation/command not supported or opcode modifier not supported. */ + MLXSW_CMD_STATUS_BAD_OP = 0x02, + /* Parameter not supported, parameter out of range. */ + MLXSW_CMD_STATUS_BAD_PARAM = 0x03, + /* System was not enabled or bad system state. */ + MLXSW_CMD_STATUS_BAD_SYS_STATE = 0x04, + /* Attempt to access reserved or unallocated resource, or resource in + * inappropriate ownership. + */ + MLXSW_CMD_STATUS_BAD_RESOURCE = 0x05, + /* Requested resource is currently executing a command. */ + MLXSW_CMD_STATUS_RESOURCE_BUSY = 0x06, + /* Required capability exceeds device limits. */ + MLXSW_CMD_STATUS_EXCEED_LIM = 0x08, + /* Resource is not in the appropriate state or ownership. */ + MLXSW_CMD_STATUS_BAD_RES_STATE = 0x09, + /* Index out of range (might be beyond table size or attempt to + * access a reserved resource). + */ + MLXSW_CMD_STATUS_BAD_INDEX = 0x0A, + /* NVMEM checksum/CRC failed. */ + MLXSW_CMD_STATUS_BAD_NVMEM = 0x0B, + /* Bad management packet (silently discarded). */ + MLXSW_CMD_STATUS_BAD_PKT = 0x30, +}; + +static inline const char *mlxsw_cmd_status_str(u8 status) +{ + switch (status) { + case MLXSW_CMD_STATUS_OK: + return "OK"; + case MLXSW_CMD_STATUS_INTERNAL_ERR: + return "INTERNAL_ERR"; + case MLXSW_CMD_STATUS_BAD_OP: + return "BAD_OP"; + case MLXSW_CMD_STATUS_BAD_PARAM: + return "BAD_PARAM"; + case MLXSW_CMD_STATUS_BAD_SYS_STATE: + return "BAD_SYS_STATE"; + case MLXSW_CMD_STATUS_BAD_RESOURCE: + return "BAD_RESOURCE"; + case MLXSW_CMD_STATUS_RESOURCE_BUSY: + return "RESOURCE_BUSY"; + case MLXSW_CMD_STATUS_EXCEED_LIM: + return "EXCEED_LIM"; + case MLXSW_CMD_STATUS_BAD_RES_STATE: + return "BAD_RES_STATE"; + case MLXSW_CMD_STATUS_BAD_INDEX: + return "BAD_INDEX"; + case MLXSW_CMD_STATUS_BAD_NVMEM: + return "BAD_NVMEM"; + case MLXSW_CMD_STATUS_BAD_PKT: + return "BAD_PKT"; + default: + return "*UNKNOWN*"; + } +} + +/* QUERY_FW - Query Firmware + * ------------------------- + * OpMod == 0, INMmod == 0 + * ----------------------- + * The QUERY_FW command retrieves information related to firmware, command + * interface version and the amount of resources that should be allocated to + * the firmware. + */ + +static inline int mlxsw_cmd_query_fw(struct mlxsw_core *mlxsw_core, + char *out_mbox) +{ + return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_FW, + 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* cmd_mbox_query_fw_fw_pages + * Amount of physical memory to be allocatedfor firmware usage in 4KB pages. + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_pages, 0x00, 16, 16); + +/* cmd_mbox_query_fw_fw_rev_major + * Firmware Revision - Major + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_major, 0x00, 0, 16); + +/* cmd_mbox_query_fw_fw_rev_subminor + * Firmware Sub-minor version (Patch level) + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_subminor, 0x04, 16, 16); + +/* cmd_mbox_query_fw_fw_rev_minor + * Firmware Revision - Minor + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_minor, 0x04, 0, 16); + +/* cmd_mbox_query_fw_core_clk + * Internal Clock Frequency (in MHz) + */ +MLXSW_ITEM32(cmd_mbox, query_fw, core_clk, 0x08, 16, 16); + +/* cmd_mbox_query_fw_cmd_interface_rev + * Command Interface Interpreter Revision ID. This number is bumped up + * every time a non-backward-compatible change is done for the command + * interface. The current cmd_interface_rev is 1. + */ +MLXSW_ITEM32(cmd_mbox, query_fw, cmd_interface_rev, 0x08, 0, 16); + +/* cmd_mbox_query_fw_dt + * If set, Debug Trace is supported + */ +MLXSW_ITEM32(cmd_mbox, query_fw, dt, 0x0C, 31, 1); + +/* cmd_mbox_query_fw_api_version + * Indicates the version of the API, to enable software querying + * for compatibility. The current api_version is 1. + */ +MLXSW_ITEM32(cmd_mbox, query_fw, api_version, 0x0C, 0, 16); + +/* cmd_mbox_query_fw_fw_hour + * Firmware timestamp - hour + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_hour, 0x10, 24, 8); + +/* cmd_mbox_query_fw_fw_minutes + * Firmware timestamp - minutes + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_minutes, 0x10, 16, 8); + +/* cmd_mbox_query_fw_fw_seconds + * Firmware timestamp - seconds + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_seconds, 0x10, 8, 8); + +/* cmd_mbox_query_fw_fw_year + * Firmware timestamp - year + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_year, 0x14, 16, 16); + +/* cmd_mbox_query_fw_fw_month + * Firmware timestamp - month + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_month, 0x14, 8, 8); + +/* cmd_mbox_query_fw_fw_day + * Firmware timestamp - day + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8); + +/* cmd_mbox_query_fw_clr_int_base_offset + * Clear Interrupt register's offset from clr_int_bar register + * in PCI address space. + */ +MLXSW_ITEM64(cmd_mbox, query_fw, clr_int_base_offset, 0x20, 0, 64); + +/* cmd_mbox_query_fw_clr_int_bar + * PCI base address register (BAR) where clr_int register is located. + * 00 - BAR 0-1 (64 bit BAR) + */ +MLXSW_ITEM32(cmd_mbox, query_fw, clr_int_bar, 0x28, 30, 2); + +/* cmd_mbox_query_fw_error_buf_offset + * Read Only buffer for internal error reports of offset + * from error_buf_bar register in PCI address space). + */ +MLXSW_ITEM64(cmd_mbox, query_fw, error_buf_offset, 0x30, 0, 64); + +/* cmd_mbox_query_fw_error_buf_size + * Internal error buffer size in DWORDs + */ +MLXSW_ITEM32(cmd_mbox, query_fw, error_buf_size, 0x38, 0, 32); + +/* cmd_mbox_query_fw_error_int_bar + * PCI base address register (BAR) where error buffer + * register is located. + * 00 - BAR 0-1 (64 bit BAR) + */ +MLXSW_ITEM32(cmd_mbox, query_fw, error_int_bar, 0x3C, 30, 2); + +/* cmd_mbox_query_fw_doorbell_page_offset + * Offset of the doorbell page + */ +MLXSW_ITEM64(cmd_mbox, query_fw, doorbell_page_offset, 0x40, 0, 64); + +/* cmd_mbox_query_fw_doorbell_page_bar + * PCI base address register (BAR) of the doorbell page + * 00 - BAR 0-1 (64 bit BAR) + */ +MLXSW_ITEM32(cmd_mbox, query_fw, doorbell_page_bar, 0x48, 30, 2); + +/* QUERY_BOARDINFO - Query Board Information + * ----------------------------------------- + * OpMod == 0 (N/A), INMmod == 0 (N/A) + * ----------------------------------- + * The QUERY_BOARDINFO command retrieves adapter specific parameters. + */ + +static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core, + char *out_mbox) +{ + return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_BOARDINFO, + 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* cmd_mbox_boardinfo_intapin + * When PCIe interrupt messages are being used, this value is used for clearing + * an interrupt. When using MSI-X, this register is not used. + */ +MLXSW_ITEM32(cmd_mbox, boardinfo, intapin, 0x10, 24, 8); + +/* cmd_mbox_boardinfo_vsd_vendor_id + * PCISIG Vendor ID (www.pcisig.com/membership/vid_search) of the vendor + * specifying/formatting the VSD. The vsd_vendor_id identifies the management + * domain of the VSD/PSID data. Different vendors may choose different VSD/PSID + * format and encoding as long as they use their assigned vsd_vendor_id. + */ +MLXSW_ITEM32(cmd_mbox, boardinfo, vsd_vendor_id, 0x1C, 0, 16); + +/* cmd_mbox_boardinfo_vsd + * Vendor Specific Data. The VSD string that is burnt to the Flash + * with the firmware. + */ +#define MLXSW_CMD_BOARDINFO_VSD_LEN 208 +MLXSW_ITEM_BUF(cmd_mbox, boardinfo, vsd, 0x20, MLXSW_CMD_BOARDINFO_VSD_LEN); + +/* cmd_mbox_boardinfo_psid + * The PSID field is a 16-ascii (byte) character string which acts as + * the board ID. The PSID format is used in conjunction with + * Mellanox vsd_vendor_id (15B3h). + */ +#define MLXSW_CMD_BOARDINFO_PSID_LEN 16 +MLXSW_ITEM_BUF(cmd_mbox, boardinfo, psid, 0xF0, MLXSW_CMD_BOARDINFO_PSID_LEN); + +/* QUERY_AQ_CAP - Query Asynchronous Queues Capabilities + * ----------------------------------------------------- + * OpMod == 0 (N/A), INMmod == 0 (N/A) + * ----------------------------------- + * The QUERY_AQ_CAP command returns the device asynchronous queues + * capabilities supported. + */ + +static inline int mlxsw_cmd_query_aq_cap(struct mlxsw_core *mlxsw_core, + char *out_mbox) +{ + return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_AQ_CAP, + 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* cmd_mbox_query_aq_cap_log_max_sdq_sz + * Log (base 2) of max WQEs allowed on SDQ. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_sdq_sz, 0x00, 24, 8); + +/* cmd_mbox_query_aq_cap_max_num_sdqs + * Maximum number of SDQs. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_sdqs, 0x00, 0, 8); + +/* cmd_mbox_query_aq_cap_log_max_rdq_sz + * Log (base 2) of max WQEs allowed on RDQ. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8); + +/* cmd_mbox_query_aq_cap_max_num_rdqs + * Maximum number of RDQs. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8); + +/* cmd_mbox_query_aq_cap_log_max_cq_sz + * Log (base 2) of max CQEs allowed on CQ. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8); + +/* cmd_mbox_query_aq_cap_max_num_cqs + * Maximum number of CQs. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_cqs, 0x08, 0, 8); + +/* cmd_mbox_query_aq_cap_log_max_eq_sz + * Log (base 2) of max EQEs allowed on EQ. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_eq_sz, 0x0C, 24, 8); + +/* cmd_mbox_query_aq_cap_max_num_eqs + * Maximum number of EQs. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_eqs, 0x0C, 0, 8); + +/* cmd_mbox_query_aq_cap_max_sg_sq + * The maximum S/G list elements in an DSQ. DSQ must not contain + * more S/G entries than indicated here. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_sq, 0x10, 8, 8); + +/* cmd_mbox_query_aq_cap_ + * The maximum S/G list elements in an DRQ. DRQ must not contain + * more S/G entries than indicated here. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8); + +/* MAP_FA - Map Firmware Area + * -------------------------- + * OpMod == 0 (N/A), INMmod == Number of VPM entries + * ------------------------------------------------- + * The MAP_FA command passes physical pages to the switch. These pages + * are used to store the device firmware. MAP_FA can be executed multiple + * times until all the firmware area is mapped (the size that should be + * mapped is retrieved through the QUERY_FW command). All required pages + * must be mapped to finish the initialization phase. Physical memory + * passed in this command must be pinned. + */ + +static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core, + char *in_mbox, u32 vpm_entries_count) +{ + return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_MAP_FA, + 0, vpm_entries_count, + in_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* cmd_mbox_map_fa_pa + * Physical Address. + */ +MLXSW_ITEM64_INDEXED(cmd_mbox, map_fa, pa, 0x00, 12, 52, 0x08, 0x00, true); + +/* cmd_mbox_map_fa_log2size + * Log (base 2) of the size in 4KB pages of the physical and contiguous memory + * that starts at PA_L/H. + */ +MLXSW_ITEM32_INDEXED(cmd_mbox, map_fa, log2size, 0x00, 0, 5, 0x08, 0x04, false); + +/* UNMAP_FA - Unmap Firmware Area + * ------------------------------ + * OpMod == 0 (N/A), INMmod == 0 (N/A) + * ----------------------------------- + * The UNMAP_FA command unload the firmware and unmaps all the + * firmware area. After this command is completed the device will not access + * the pages that were mapped to the firmware area. After executing UNMAP_FA + * command, software reset must be done prior to execution of MAP_FW command. + */ + +static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core) +{ + return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0); +} + +/* CONFIG_PROFILE (Set) - Configure Switch Profile + * ------------------------------ + * OpMod == 1 (Set), INMmod == 0 (N/A) + * ----------------------------------- + * The CONFIG_PROFILE command sets the switch profile. The command can be + * executed on the device only once at startup in order to allocate and + * configure all switch resources and prepare it for operational mode. + * It is not possible to change the device profile after the chip is + * in operational mode. + * Failure of the CONFIG_PROFILE command leaves the hardware in an indeterminate + * state therefore it is required to perform software reset to the device + * following an unsuccessful completion of the command. It is required + * to perform software reset to the device to change an existing profile. + */ + +static inline int mlxsw_cmd_config_profile_set(struct mlxsw_core *mlxsw_core, + char *in_mbox) +{ + return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_CONFIG_PROFILE, + 1, 0, in_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* cmd_mbox_config_profile_set_max_vepa_channels + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vepa_channels, 0x0C, 0, 1); + +/* cmd_mbox_config_profile_set_max_lag + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_lag, 0x0C, 1, 1); + +/* cmd_mbox_config_profile_set_max_port_per_lag + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_port_per_lag, 0x0C, 2, 1); + +/* cmd_mbox_config_profile_set_max_mid + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_mid, 0x0C, 3, 1); + +/* cmd_mbox_config_profile_set_max_pgt + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pgt, 0x0C, 4, 1); + +/* cmd_mbox_config_profile_set_max_system_port + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_system_port, 0x0C, 5, 1); + +/* cmd_mbox_config_profile_set_max_vlan_groups + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1); + +/* cmd_mbox_config_profile_set_max_regions + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1); + +/* cmd_mbox_config_profile_set_fid_based + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_mode, 0x0C, 8, 1); + +/* cmd_mbox_config_profile_set_max_flood_tables + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_tables, 0x0C, 9, 1); + +/* cmd_mbox_config_profile_set_max_ib_mc + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_ib_mc, 0x0C, 12, 1); + +/* cmd_mbox_config_profile_set_max_pkey + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pkey, 0x0C, 13, 1); + +/* cmd_mbox_config_profile_set_adaptive_routing_group_cap + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, + set_adaptive_routing_group_cap, 0x0C, 14, 1); + +/* cmd_mbox_config_profile_set_ar_sec + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1); + +/* cmd_mbox_config_profile_max_vepa_channels + * Maximum number of VEPA channels per port (0 through 16) + * 0 - multi-channel VEPA is disabled + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_vepa_channels, 0x10, 0, 8); + +/* cmd_mbox_config_profile_max_lag + * Maximum number of LAG IDs requested. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_lag, 0x14, 0, 16); + +/* cmd_mbox_config_profile_max_port_per_lag + * Maximum number of ports per LAG requested. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_port_per_lag, 0x18, 0, 16); + +/* cmd_mbox_config_profile_max_mid + * Maximum Multicast IDs. + * Multicast IDs are allocated from 0 to max_mid-1 + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_mid, 0x1C, 0, 16); + +/* cmd_mbox_config_profile_max_pgt + * Maximum records in the Port Group Table per Switch Partition. + * Port Group Table indexes are from 0 to max_pgt-1 + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_pgt, 0x20, 0, 16); + +/* cmd_mbox_config_profile_max_system_port + * The maximum number of system ports that can be allocated. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_system_port, 0x24, 0, 16); + +/* cmd_mbox_config_profile_max_vlan_groups + * Maximum number VLAN Groups for VLAN binding. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12); + +/* cmd_mbox_config_profile_max_regions + * Maximum number of TCAM Regions. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16); + +/* cmd_mbox_config_profile_max_flood_tables + * Maximum number of Flooding Tables. Flooding Tables are associated to + * the different packet types for the different switch partitions. + * Note that the table size depends on the fid_based mode. + * In SwitchX silicon, tables are split equally between the switch + * partitions. e.g. for 2 swids and 8 tables, the first 4 are associated + * with swid-1 and the last 4 are associated with swid-2. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4); + +/* cmd_mbox_config_profile_max_vid_flood_tables + * Maximum number of per-vid flooding tables. Flooding tables are associated + * to the different packet types for the different switch partitions. + * Table size is 4K entries covering all VID space. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4); + +/* cmd_mbox_config_profile_fid_based + * FID Based Flood Mode + * 00 Do not use FID to offset the index into the Port Group Table/Multicast ID + * 01 Use FID to offset the index to the Port Group Table (pgi) + * 10 Use FID to offset the index to the Port Group Table (pgi) and + * the Multicast ID + */ +MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2); + +/* cmd_mbox_config_profile_max_ib_mc + * Maximum number of multicast FDB records for InfiniBand + * FDB (in 512 chunks) per InfiniBand switch partition. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_ib_mc, 0x40, 0, 15); + +/* cmd_mbox_config_profile_max_pkey + * Maximum per port PKEY table size (for PKEY enforcement) + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_pkey, 0x44, 0, 15); + +/* cmd_mbox_config_profile_ar_sec + * Primary/secondary capability + * Describes the number of adaptive routing sub-groups + * 0 - disable primary/secondary (single group) + * 1 - enable primary/secondary (2 sub-groups) + * 2 - 3 sub-groups: Not supported in SwitchX, SwitchX-2 + * 3 - 4 sub-groups: Not supported in SwitchX, SwitchX-2 + */ +MLXSW_ITEM32(cmd_mbox, config_profile, ar_sec, 0x4C, 24, 2); + +/* cmd_mbox_config_profile_adaptive_routing_group_cap + * Adaptive Routing Group Capability. Indicates the number of AR groups + * supported. Note that when Primary/secondary is enabled, each + * primary/secondary couple consumes 2 adaptive routing entries. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16); + +/* cmd_mbox_config_profile_arn + * Adaptive Routing Notification Enable + * Not supported in SwitchX, SwitchX-2 + */ +MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1); + +/* cmd_mbox_config_profile_swid_config_mask + * Modify Switch Partition Configuration mask. When set, the configu- + * ration value for the Switch Partition are taken from the mailbox. + * When clear, the current configuration values are used. + * Bit 0 - set type + * Bit 1 - properties + * Other - reserved + */ +MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_mask, + 0x60, 24, 8, 0x08, 0x00, false); + +/* cmd_mbox_config_profile_swid_config_type + * Switch Partition type. + * 0000 - disabled (Switch Partition does not exist) + * 0001 - InfiniBand + * 0010 - Ethernet + * 1000 - router port (SwitchX-2 only) + * Other - reserved + */ +MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type, + 0x60, 20, 4, 0x08, 0x00, false); + +/* cmd_mbox_config_profile_swid_config_properties + * Switch Partition properties. + */ +MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties, + 0x60, 0, 8, 0x08, 0x00, false); + +/* ACCESS_REG - Access EMAD Supported Register + * ---------------------------------- + * OpMod == 0 (N/A), INMmod == 0 (N/A) + * ------------------------------------- + * The ACCESS_REG command supports accessing device registers. This access + * is mainly used for bootstrapping. + */ + +static inline int mlxsw_cmd_access_reg(struct mlxsw_core *mlxsw_core, + char *in_mbox, char *out_mbox) +{ + return mlxsw_cmd_exec(mlxsw_core, MLXSW_CMD_OPCODE_ACCESS_REG, + 0, 0, false, in_mbox, MLXSW_CMD_MBOX_SIZE, + out_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* SW2HW_DQ - Software to Hardware DQ + * ---------------------------------- + * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ) + * INMmod == DQ number + * ---------------------------------------------- + * The SW2HW_DQ command transitions a descriptor queue from software to + * hardware ownership. The command enables posting WQEs and ringing DoorBells + * on the descriptor queue. + */ + +static inline int __mlxsw_cmd_sw2hw_dq(struct mlxsw_core *mlxsw_core, + char *in_mbox, u32 dq_number, + u8 opcode_mod) +{ + return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_DQ, + opcode_mod, dq_number, + in_mbox, MLXSW_CMD_MBOX_SIZE); +} + +enum { + MLXSW_CMD_OPCODE_MOD_SDQ = 0, + MLXSW_CMD_OPCODE_MOD_RDQ = 1, +}; + +static inline int mlxsw_cmd_sw2hw_sdq(struct mlxsw_core *mlxsw_core, + char *in_mbox, u32 dq_number) +{ + return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number, + MLXSW_CMD_OPCODE_MOD_SDQ); +} + +static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core, + char *in_mbox, u32 dq_number) +{ + return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number, + MLXSW_CMD_OPCODE_MOD_RDQ); +} + +/* cmd_mbox_sw2hw_dq_cq + * Number of the CQ that this Descriptor Queue reports completions to. + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8); + +/* cmd_mbox_sw2hw_dq_sdq_tclass + * SDQ: CPU Egress TClass + * RDQ: Reserved + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_tclass, 0x00, 16, 6); + +/* cmd_mbox_sw2hw_dq_log2_dq_sz + * Log (base 2) of the Descriptor Queue size in 4KB pages. + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_dq, log2_dq_sz, 0x00, 0, 6); + +/* cmd_mbox_sw2hw_dq_pa + * Physical Address. + */ +MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_dq, pa, 0x10, 12, 52, 0x08, 0x00, true); + +/* HW2SW_DQ - Hardware to Software DQ + * ---------------------------------- + * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ) + * INMmod == DQ number + * ---------------------------------------------- + * The HW2SW_DQ command transitions a descriptor queue from hardware to + * software ownership. Incoming packets on the DQ are silently discarded, + * SW should not post descriptors on nonoperational DQs. + */ + +static inline int __mlxsw_cmd_hw2sw_dq(struct mlxsw_core *mlxsw_core, + u32 dq_number, u8 opcode_mod) +{ + return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_DQ, + opcode_mod, dq_number); +} + +static inline int mlxsw_cmd_hw2sw_sdq(struct mlxsw_core *mlxsw_core, + u32 dq_number) +{ + return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number, + MLXSW_CMD_OPCODE_MOD_SDQ); +} + +static inline int mlxsw_cmd_hw2sw_rdq(struct mlxsw_core *mlxsw_core, + u32 dq_number) +{ + return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number, + MLXSW_CMD_OPCODE_MOD_RDQ); +} + +/* 2ERR_DQ - To Error DQ + * --------------------- + * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ) + * INMmod == DQ number + * ---------------------------------------------- + * The 2ERR_DQ command transitions the DQ into the error state from the state + * in which it has been. While the command is executed, some in-process + * descriptors may complete. Once the DQ transitions into the error state, + * if there are posted descriptors on the RDQ/SDQ, the hardware writes + * a completion with error (flushed) for all descriptors posted in the RDQ/SDQ. + * When the command is completed successfully, the DQ is already in + * the error state. + */ + +static inline int __mlxsw_cmd_2err_dq(struct mlxsw_core *mlxsw_core, + u32 dq_number, u8 opcode_mod) +{ + return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ, + opcode_mod, dq_number); +} + +static inline int mlxsw_cmd_2err_sdq(struct mlxsw_core *mlxsw_core, + u32 dq_number) +{ + return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number, + MLXSW_CMD_OPCODE_MOD_SDQ); +} + +static inline int mlxsw_cmd_2err_rdq(struct mlxsw_core *mlxsw_core, + u32 dq_number) +{ + return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number, + MLXSW_CMD_OPCODE_MOD_RDQ); +} + +/* QUERY_DQ - Query DQ + * --------------------- + * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ) + * INMmod == DQ number + * ---------------------------------------------- + * The QUERY_DQ command retrieves a snapshot of DQ parameters from the hardware. + * + * Note: Output mailbox has the same format as SW2HW_DQ. + */ + +static inline int __mlxsw_cmd_query_dq(struct mlxsw_core *mlxsw_core, + char *out_mbox, u32 dq_number, + u8 opcode_mod) +{ + return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ, + opcode_mod, dq_number, false, + out_mbox, MLXSW_CMD_MBOX_SIZE); +} + +static inline int mlxsw_cmd_query_sdq(struct mlxsw_core *mlxsw_core, + char *out_mbox, u32 dq_number) +{ + return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number, + MLXSW_CMD_OPCODE_MOD_SDQ); +} + +static inline int mlxsw_cmd_query_rdq(struct mlxsw_core *mlxsw_core, + char *out_mbox, u32 dq_number) +{ + return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number, + MLXSW_CMD_OPCODE_MOD_RDQ); +} + +/* SW2HW_CQ - Software to Hardware CQ + * ---------------------------------- + * OpMod == 0 (N/A), INMmod == CQ number + * ------------------------------------- + * The SW2HW_CQ command transfers ownership of a CQ context entry from software + * to hardware. The command takes the CQ context entry from the input mailbox + * and stores it in the CQC in the ownership of the hardware. The command fails + * if the requested CQC entry is already in the ownership of the hardware. + */ + +static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core, + char *in_mbox, u32 cq_number) +{ + return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_CQ, + 0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* cmd_mbox_sw2hw_cq_cv + * CQE Version. + * 0 - CQE Version 0, 1 - CQE Version 1 + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4); + +/* cmd_mbox_sw2hw_cq_c_eqn + * Event Queue this CQ reports completion events to. + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1); + +/* cmd_mbox_sw2hw_cq_oi + * When set, overrun ignore is enabled. When set, updates of + * CQ consumer counter (poll for completion) or Request completion + * notifications (Arm CQ) DoorBells should not be rung on that CQ. + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1); + +/* cmd_mbox_sw2hw_cq_st + * Event delivery state machine + * 0x0 - FIRED + * 0x1 - ARMED (Request for Notification) + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_cq, st, 0x00, 8, 1); + +/* cmd_mbox_sw2hw_cq_log_cq_size + * Log (base 2) of the CQ size (in entries). + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_cq, log_cq_size, 0x00, 0, 4); + +/* cmd_mbox_sw2hw_cq_producer_counter + * Producer Counter. The counter is incremented for each CQE that is + * written by the HW to the CQ. + * Maintained by HW (valid for the QUERY_CQ command only) + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_cq, producer_counter, 0x04, 0, 16); + +/* cmd_mbox_sw2hw_cq_pa + * Physical Address. + */ +MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_cq, pa, 0x10, 11, 53, 0x08, 0x00, true); + +/* HW2SW_CQ - Hardware to Software CQ + * ---------------------------------- + * OpMod == 0 (N/A), INMmod == CQ number + * ------------------------------------- + * The HW2SW_CQ command transfers ownership of a CQ context entry from hardware + * to software. The CQC entry is invalidated as a result of this command. + */ + +static inline int mlxsw_cmd_hw2sw_cq(struct mlxsw_core *mlxsw_core, + u32 cq_number) +{ + return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_CQ, + 0, cq_number); +} + +/* QUERY_CQ - Query CQ + * ---------------------------------- + * OpMod == 0 (N/A), INMmod == CQ number + * ------------------------------------- + * The QUERY_CQ command retrieves a snapshot of the current CQ context entry. + * The command stores the snapshot in the output mailbox in the software format. + * Note that the CQ context state and values are not affected by the QUERY_CQ + * command. The QUERY_CQ command is for debug purposes only. + * + * Note: Output mailbox has the same format as SW2HW_CQ. + */ + +static inline int mlxsw_cmd_query_cq(struct mlxsw_core *mlxsw_core, + char *out_mbox, u32 cq_number) +{ + return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_CQ, + 0, cq_number, false, + out_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* SW2HW_EQ - Software to Hardware EQ + * ---------------------------------- + * OpMod == 0 (N/A), INMmod == EQ number + * ------------------------------------- + * The SW2HW_EQ command transfers ownership of an EQ context entry from software + * to hardware. The command takes the EQ context entry from the input mailbox + * and stores it in the EQC in the ownership of the hardware. The command fails + * if the requested EQC entry is already in the ownership of the hardware. + */ + +static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core, + char *in_mbox, u32 eq_number) +{ + return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_EQ, + 0, eq_number, in_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* cmd_mbox_sw2hw_eq_int_msix + * When set, MSI-X cycles will be generated by this EQ. + * When cleared, an interrupt will be generated by this EQ. + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1); + +/* cmd_mbox_sw2hw_eq_int_oi + * When set, overrun ignore is enabled. + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1); + +/* cmd_mbox_sw2hw_eq_int_st + * Event delivery state machine + * 0x0 - FIRED + * 0x1 - ARMED (Request for Notification) + * 0x11 - Always ARMED + * other - reserved + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2); + +/* cmd_mbox_sw2hw_eq_int_log_eq_size + * Log (base 2) of the EQ size (in entries). + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4); + +/* cmd_mbox_sw2hw_eq_int_producer_counter + * Producer Counter. The counter is incremented for each EQE that is written + * by the HW to the EQ. + * Maintained by HW (valid for the QUERY_EQ command only) + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16); + +/* cmd_mbox_sw2hw_eq_int_pa + * Physical Address. + */ +MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true); + +/* HW2SW_EQ - Hardware to Software EQ + * ---------------------------------- + * OpMod == 0 (N/A), INMmod == EQ number + * ------------------------------------- + */ + +static inline int mlxsw_cmd_hw2sw_eq(struct mlxsw_core *mlxsw_core, + u32 eq_number) +{ + return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_EQ, + 0, eq_number); +} + +/* QUERY_EQ - Query EQ + * ---------------------------------- + * OpMod == 0 (N/A), INMmod == EQ number + * ------------------------------------- + * + * Note: Output mailbox has the same format as SW2HW_EQ. + */ + +static inline int mlxsw_cmd_query_eq(struct mlxsw_core *mlxsw_core, + char *out_mbox, u32 eq_number) +{ + return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_EQ, + 0, eq_number, false, + out_mbox, MLXSW_CMD_MBOX_SIZE); +} + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c new file mode 100644 index 000000000..28c19cc1a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -0,0 +1,1300 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core.c + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko + * Copyright (c) 2015 Ido Schimmel + * Copyright (c) 2015 Elad Raz + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core.h" +#include "item.h" +#include "cmd.h" +#include "port.h" +#include "trap.h" +#include "emad.h" +#include "reg.h" + +static LIST_HEAD(mlxsw_core_driver_list); +static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); + +static const char mlxsw_core_driver_name[] = "mlxsw_core"; + +static struct dentry *mlxsw_core_dbg_root; + +struct mlxsw_core_pcpu_stats { + u64 trap_rx_packets[MLXSW_TRAP_ID_MAX]; + u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX]; + u64 port_rx_packets[MLXSW_PORT_MAX_PORTS]; + u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS]; + struct u64_stats_sync syncp; + u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX]; + u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS]; + u32 trap_rx_invalid; + u32 port_rx_invalid; +}; + +struct mlxsw_core { + struct mlxsw_driver *driver; + const struct mlxsw_bus *bus; + void *bus_priv; + const struct mlxsw_bus_info *bus_info; + struct list_head rx_listener_list; + struct list_head event_listener_list; + struct { + struct sk_buff *resp_skb; + u64 tid; + wait_queue_head_t wait; + bool trans_active; + struct mutex lock; /* One EMAD transaction at a time. */ + bool use_emad; + } emad; + struct mlxsw_core_pcpu_stats __percpu *pcpu_stats; + struct dentry *dbg_dir; + struct { + struct debugfs_blob_wrapper vsd_blob; + struct debugfs_blob_wrapper psid_blob; + } dbg; + unsigned long driver_priv[0]; + /* driver_priv has to be always the last item */ +}; + +struct mlxsw_rx_listener_item { + struct list_head list; + struct mlxsw_rx_listener rxl; + void *priv; +}; + +struct mlxsw_event_listener_item { + struct list_head list; + struct mlxsw_event_listener el; + void *priv; +}; + +/****************** + * EMAD processing + ******************/ + +/* emad_eth_hdr_dmac + * Destination MAC in EMAD's Ethernet header. + * Must be set to 01:02:c9:00:00:01 + */ +MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6); + +/* emad_eth_hdr_smac + * Source MAC in EMAD's Ethernet header. + * Must be set to 00:02:c9:01:02:03 + */ +MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6); + +/* emad_eth_hdr_ethertype + * Ethertype in EMAD's Ethernet header. + * Must be set to 0x8932 + */ +MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16); + +/* emad_eth_hdr_mlx_proto + * Mellanox protocol. + * Must be set to 0x0. + */ +MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8); + +/* emad_eth_hdr_ver + * Mellanox protocol version. + * Must be set to 0x0. + */ +MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4); + +/* emad_op_tlv_type + * Type of the TLV. + * Must be set to 0x1 (operation TLV). + */ +MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5); + +/* emad_op_tlv_len + * Length of the operation TLV in u32. + * Must be set to 0x4. + */ +MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11); + +/* emad_op_tlv_dr + * Direct route bit. Setting to 1 indicates the EMAD is a direct route + * EMAD. DR TLV must follow. + * + * Note: Currently not supported and must not be set. + */ +MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1); + +/* emad_op_tlv_status + * Returned status in case of EMAD response. Must be set to 0 in case + * of EMAD request. + * 0x0 - success + * 0x1 - device is busy. Requester should retry + * 0x2 - Mellanox protocol version not supported + * 0x3 - unknown TLV + * 0x4 - register not supported + * 0x5 - operation class not supported + * 0x6 - EMAD method not supported + * 0x7 - bad parameter (e.g. port out of range) + * 0x8 - resource not available + * 0x9 - message receipt acknowledgment. Requester should retry + * 0x70 - internal error + */ +MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7); + +/* emad_op_tlv_register_id + * Register ID of register within register TLV. + */ +MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16); + +/* emad_op_tlv_r + * Response bit. Setting to 1 indicates Response, otherwise request. + */ +MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1); + +/* emad_op_tlv_method + * EMAD method type. + * 0x1 - query + * 0x2 - write + * 0x3 - send (currently not supported) + * 0x4 - event + */ +MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7); + +/* emad_op_tlv_class + * EMAD operation class. Must be set to 0x1 (REG_ACCESS). + */ +MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8); + +/* emad_op_tlv_tid + * EMAD transaction ID. Used for pairing request and response EMADs. + */ +MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64); + +/* emad_reg_tlv_type + * Type of the TLV. + * Must be set to 0x3 (register TLV). + */ +MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5); + +/* emad_reg_tlv_len + * Length of the operation TLV in u32. + */ +MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11); + +/* emad_end_tlv_type + * Type of the TLV. + * Must be set to 0x0 (end TLV). + */ +MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5); + +/* emad_end_tlv_len + * Length of the end TLV in u32. + * Must be set to 1. + */ +MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11); + +enum mlxsw_core_reg_access_type { + MLXSW_CORE_REG_ACCESS_TYPE_QUERY, + MLXSW_CORE_REG_ACCESS_TYPE_WRITE, +}; + +static inline const char * +mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type) +{ + switch (type) { + case MLXSW_CORE_REG_ACCESS_TYPE_QUERY: + return "query"; + case MLXSW_CORE_REG_ACCESS_TYPE_WRITE: + return "write"; + } + BUG(); +} + +static void mlxsw_emad_pack_end_tlv(char *end_tlv) +{ + mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END); + mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN); +} + +static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, + const struct mlxsw_reg_info *reg, + char *payload) +{ + mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG); + mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1); + memcpy(reg_tlv + sizeof(u32), payload, reg->len); +} + +static void mlxsw_emad_pack_op_tlv(char *op_tlv, + const struct mlxsw_reg_info *reg, + enum mlxsw_core_reg_access_type type, + struct mlxsw_core *mlxsw_core) +{ + mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); + mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); + mlxsw_emad_op_tlv_dr_set(op_tlv, 0); + mlxsw_emad_op_tlv_status_set(op_tlv, 0); + mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id); + mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST); + if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type) + mlxsw_emad_op_tlv_method_set(op_tlv, + MLXSW_EMAD_OP_TLV_METHOD_QUERY); + else + mlxsw_emad_op_tlv_method_set(op_tlv, + MLXSW_EMAD_OP_TLV_METHOD_WRITE); + mlxsw_emad_op_tlv_class_set(op_tlv, + MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); + mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid); +} + +static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) +{ + char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN); + + mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC); + mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC); + mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE); + mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO); + mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION); + + skb_reset_mac_header(skb); + + return 0; +} + +static void mlxsw_emad_construct(struct sk_buff *skb, + const struct mlxsw_reg_info *reg, + char *payload, + enum mlxsw_core_reg_access_type type, + struct mlxsw_core *mlxsw_core) +{ + char *buf; + + buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32)); + mlxsw_emad_pack_end_tlv(buf); + + buf = skb_push(skb, reg->len + sizeof(u32)); + mlxsw_emad_pack_reg_tlv(buf, reg, payload); + + buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); + mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core); + + mlxsw_emad_construct_eth_hdr(skb); +} + +static char *mlxsw_emad_op_tlv(const struct sk_buff *skb) +{ + return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN)); +} + +static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb) +{ + return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN + + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32))); +} + +static char *mlxsw_emad_reg_payload(const char *op_tlv) +{ + return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32))); +} + +static u64 mlxsw_emad_get_tid(const struct sk_buff *skb) +{ + char *op_tlv; + + op_tlv = mlxsw_emad_op_tlv(skb); + return mlxsw_emad_op_tlv_tid_get(op_tlv); +} + +static bool mlxsw_emad_is_resp(const struct sk_buff *skb) +{ + char *op_tlv; + + op_tlv = mlxsw_emad_op_tlv(skb); + return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv)); +} + +#define MLXSW_EMAD_TIMEOUT_MS 200 + +static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, + struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info) +{ + int err; + int ret; + + mlxsw_core->emad.trans_active = true; + + err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info); + if (err) { + dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n", + mlxsw_core->emad.tid); + dev_kfree_skb(skb); + goto trans_inactive_out; + } + + ret = wait_event_timeout(mlxsw_core->emad.wait, + !(mlxsw_core->emad.trans_active), + msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS)); + if (!ret) { + dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n", + mlxsw_core->emad.tid); + err = -EIO; + goto trans_inactive_out; + } + + return 0; + +trans_inactive_out: + mlxsw_core->emad.trans_active = false; + return err; +} + +static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core, + char *op_tlv) +{ + enum mlxsw_emad_op_tlv_status status; + u64 tid; + + status = mlxsw_emad_op_tlv_status_get(op_tlv); + tid = mlxsw_emad_op_tlv_tid_get(op_tlv); + + switch (status) { + case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: + return 0; + case MLXSW_EMAD_OP_TLV_STATUS_BUSY: + case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: + dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n", + tid, status, mlxsw_emad_op_tlv_status_str(status)); + return -EAGAIN; + case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: + case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: + case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED: + case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED: + case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED: + case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER: + case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: + case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: + default: + dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n", + tid, status, mlxsw_emad_op_tlv_status_str(status)); + return -EIO; + } +} + +static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core, + struct sk_buff *skb) +{ + return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb)); +} + +static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, + struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info) +{ + struct sk_buff *trans_skb; + int n_retry; + int err; + + n_retry = 0; +retry: + /* We copy the EMAD to a new skb, since we might need + * to retransmit it in case of failure. + */ + trans_skb = skb_copy(skb, GFP_KERNEL); + if (!trans_skb) { + err = -ENOMEM; + goto out; + } + + err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info); + if (!err) { + struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb; + + err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb); + if (err) + dev_kfree_skb(resp_skb); + if (!err || err != -EAGAIN) + goto out; + } + if (n_retry++ < MLXSW_EMAD_MAX_RETRY) + goto retry; + +out: + dev_kfree_skb(skb); + mlxsw_core->emad.tid++; + return err; +} + +static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, + void *priv) +{ + struct mlxsw_core *mlxsw_core = priv; + + if (mlxsw_emad_is_resp(skb) && + mlxsw_core->emad.trans_active && + mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) { + mlxsw_core->emad.resp_skb = skb; + mlxsw_core->emad.trans_active = false; + wake_up(&mlxsw_core->emad.wait); + } else { + dev_kfree_skb(skb); + } +} + +static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = { + .func = mlxsw_emad_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_ETHEMAD, +}; + +static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core) +{ + char htgt_pl[MLXSW_REG_HTGT_LEN]; + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + int err; + + mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD); + err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); + if (err) + return err; + + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, + MLXSW_REG_HTGT_TRAP_GROUP_EMAD, + MLXSW_TRAP_ID_ETHEMAD); + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); +} + +static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) +{ + int err; + + /* Set the upper 32 bits of the transaction ID field to a random + * number. This allows us to discard EMADs addressed to other + * devices. + */ + get_random_bytes(&mlxsw_core->emad.tid, 4); + mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32; + + init_waitqueue_head(&mlxsw_core->emad.wait); + mlxsw_core->emad.trans_active = false; + mutex_init(&mlxsw_core->emad.lock); + + err = mlxsw_core_rx_listener_register(mlxsw_core, + &mlxsw_emad_rx_listener, + mlxsw_core); + if (err) + return err; + + err = mlxsw_emad_traps_set(mlxsw_core); + if (err) + goto err_emad_trap_set; + + mlxsw_core->emad.use_emad = true; + + return 0; + +err_emad_trap_set: + mlxsw_core_rx_listener_unregister(mlxsw_core, + &mlxsw_emad_rx_listener, + mlxsw_core); + return err; +} + +static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) +{ + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, + MLXSW_REG_HTGT_TRAP_GROUP_EMAD, + MLXSW_TRAP_ID_ETHEMAD); + mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); + + mlxsw_core_rx_listener_unregister(mlxsw_core, + &mlxsw_emad_rx_listener, + mlxsw_core); +} + +static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, + u16 reg_len) +{ + struct sk_buff *skb; + u16 emad_len; + + emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN + + (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) * + sizeof(u32) + mlxsw_core->driver->txhdr_len); + if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN) + return NULL; + + skb = netdev_alloc_skb(NULL, emad_len); + if (!skb) + return NULL; + memset(skb->data, 0, emad_len); + skb_reserve(skb, emad_len); + + return skb; +} + +/***************** + * Core functions + *****************/ + +static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data) +{ + struct mlxsw_core *mlxsw_core = file->private; + struct mlxsw_core_pcpu_stats *p; + u64 rx_packets, rx_bytes; + u64 tmp_rx_packets, tmp_rx_bytes; + u32 rx_dropped, rx_invalid; + unsigned int start; + int i; + int j; + static const char hdr[] = + " NUM RX_PACKETS RX_BYTES RX_DROPPED\n"; + + seq_printf(file, hdr); + for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) { + rx_packets = 0; + rx_bytes = 0; + rx_dropped = 0; + for_each_possible_cpu(j) { + p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); + do { + start = u64_stats_fetch_begin(&p->syncp); + tmp_rx_packets = p->trap_rx_packets[i]; + tmp_rx_bytes = p->trap_rx_bytes[i]; + } while (u64_stats_fetch_retry(&p->syncp, start)); + + rx_packets += tmp_rx_packets; + rx_bytes += tmp_rx_bytes; + rx_dropped += p->trap_rx_dropped[i]; + } + seq_printf(file, "trap %3d %12llu %12llu %10u\n", + i, rx_packets, rx_bytes, rx_dropped); + } + rx_invalid = 0; + for_each_possible_cpu(j) { + p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); + rx_invalid += p->trap_rx_invalid; + } + seq_printf(file, "trap INV %10u\n", + rx_invalid); + + for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) { + rx_packets = 0; + rx_bytes = 0; + rx_dropped = 0; + for_each_possible_cpu(j) { + p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); + do { + start = u64_stats_fetch_begin(&p->syncp); + tmp_rx_packets = p->port_rx_packets[i]; + tmp_rx_bytes = p->port_rx_bytes[i]; + } while (u64_stats_fetch_retry(&p->syncp, start)); + + rx_packets += tmp_rx_packets; + rx_bytes += tmp_rx_bytes; + rx_dropped += p->port_rx_dropped[i]; + } + seq_printf(file, "port %3d %12llu %12llu %10u\n", + i, rx_packets, rx_bytes, rx_dropped); + } + rx_invalid = 0; + for_each_possible_cpu(j) { + p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); + rx_invalid += p->port_rx_invalid; + } + seq_printf(file, "port INV %10u\n", + rx_invalid); + return 0; +} + +static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f) +{ + struct mlxsw_core *mlxsw_core = inode->i_private; + + return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core); +} + +static const struct file_operations mlxsw_core_rx_stats_dbg_ops = { + .owner = THIS_MODULE, + .open = mlxsw_core_rx_stats_dbg_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek +}; + +static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, + const char *buf, size_t size) +{ + __be32 *m = (__be32 *) buf; + int i; + int count = size / sizeof(__be32); + + for (i = count - 1; i >= 0; i--) + if (m[i]) + break; + i++; + count = i ? i : 1; + for (i = 0; i < count; i += 4) + dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", + i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), + be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); +} + +int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) +{ + spin_lock(&mlxsw_core_driver_list_lock); + list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list); + spin_unlock(&mlxsw_core_driver_list_lock); + return 0; +} +EXPORT_SYMBOL(mlxsw_core_driver_register); + +void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver) +{ + spin_lock(&mlxsw_core_driver_list_lock); + list_del(&mlxsw_driver->list); + spin_unlock(&mlxsw_core_driver_list_lock); +} +EXPORT_SYMBOL(mlxsw_core_driver_unregister); + +static struct mlxsw_driver *__driver_find(const char *kind) +{ + struct mlxsw_driver *mlxsw_driver; + + list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) { + if (strcmp(mlxsw_driver->kind, kind) == 0) + return mlxsw_driver; + } + return NULL; +} + +static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind) +{ + struct mlxsw_driver *mlxsw_driver; + + spin_lock(&mlxsw_core_driver_list_lock); + mlxsw_driver = __driver_find(kind); + if (!mlxsw_driver) { + spin_unlock(&mlxsw_core_driver_list_lock); + request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind); + spin_lock(&mlxsw_core_driver_list_lock); + mlxsw_driver = __driver_find(kind); + } + if (mlxsw_driver) { + if (!try_module_get(mlxsw_driver->owner)) + mlxsw_driver = NULL; + } + + spin_unlock(&mlxsw_core_driver_list_lock); + return mlxsw_driver; +} + +static void mlxsw_core_driver_put(const char *kind) +{ + struct mlxsw_driver *mlxsw_driver; + + spin_lock(&mlxsw_core_driver_list_lock); + mlxsw_driver = __driver_find(kind); + spin_unlock(&mlxsw_core_driver_list_lock); + if (!mlxsw_driver) + return; + module_put(mlxsw_driver->owner); +} + +static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core) +{ + const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info; + + mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name, + mlxsw_core_dbg_root); + if (!mlxsw_core->dbg_dir) + return -ENOMEM; + debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir, + mlxsw_core, &mlxsw_core_rx_stats_dbg_ops); + mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd; + mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd); + debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir, + &mlxsw_core->dbg.vsd_blob); + mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid; + mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid); + debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir, + &mlxsw_core->dbg.psid_blob); + return 0; +} + +static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core) +{ + debugfs_remove_recursive(mlxsw_core->dbg_dir); +} + +int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, + const struct mlxsw_bus *mlxsw_bus, + void *bus_priv) +{ + const char *device_kind = mlxsw_bus_info->device_kind; + struct mlxsw_core *mlxsw_core; + struct mlxsw_driver *mlxsw_driver; + size_t alloc_size; + int err; + + mlxsw_driver = mlxsw_core_driver_get(device_kind); + if (!mlxsw_driver) + return -EINVAL; + alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; + mlxsw_core = kzalloc(alloc_size, GFP_KERNEL); + if (!mlxsw_core) { + err = -ENOMEM; + goto err_core_alloc; + } + + INIT_LIST_HEAD(&mlxsw_core->rx_listener_list); + INIT_LIST_HEAD(&mlxsw_core->event_listener_list); + mlxsw_core->driver = mlxsw_driver; + mlxsw_core->bus = mlxsw_bus; + mlxsw_core->bus_priv = bus_priv; + mlxsw_core->bus_info = mlxsw_bus_info; + + mlxsw_core->pcpu_stats = + netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats); + if (!mlxsw_core->pcpu_stats) { + err = -ENOMEM; + goto err_alloc_stats; + } + + err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile); + if (err) + goto err_bus_init; + + err = mlxsw_emad_init(mlxsw_core); + if (err) + goto err_emad_init; + + err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core, + mlxsw_bus_info); + if (err) + goto err_driver_init; + + err = mlxsw_core_debugfs_init(mlxsw_core); + if (err) + goto err_debugfs_init; + + return 0; + +err_debugfs_init: + mlxsw_core->driver->fini(mlxsw_core->driver_priv); +err_driver_init: + mlxsw_emad_fini(mlxsw_core); +err_emad_init: + mlxsw_bus->fini(bus_priv); +err_bus_init: + free_percpu(mlxsw_core->pcpu_stats); +err_alloc_stats: + kfree(mlxsw_core); +err_core_alloc: + mlxsw_core_driver_put(device_kind); + return err; +} +EXPORT_SYMBOL(mlxsw_core_bus_device_register); + +void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) +{ + const char *device_kind = mlxsw_core->bus_info->device_kind; + + mlxsw_core_debugfs_fini(mlxsw_core); + mlxsw_core->driver->fini(mlxsw_core->driver_priv); + mlxsw_emad_fini(mlxsw_core); + mlxsw_core->bus->fini(mlxsw_core->bus_priv); + free_percpu(mlxsw_core->pcpu_stats); + kfree(mlxsw_core); + mlxsw_core_driver_put(device_kind); +} +EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); + +static struct mlxsw_core *__mlxsw_core_get(void *driver_priv) +{ + return container_of(driver_priv, struct mlxsw_core, driver_priv); +} + +bool mlxsw_core_skb_transmit_busy(void *driver_priv, + const struct mlxsw_tx_info *tx_info) +{ + struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv); + + return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, + tx_info); +} +EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); + +int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info) +{ + struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv); + + return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, + tx_info); +} +EXPORT_SYMBOL(mlxsw_core_skb_transmit); + +static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a, + const struct mlxsw_rx_listener *rxl_b) +{ + return (rxl_a->func == rxl_b->func && + rxl_a->local_port == rxl_b->local_port && + rxl_a->trap_id == rxl_b->trap_id); +} + +static struct mlxsw_rx_listener_item * +__find_rx_listener_item(struct mlxsw_core *mlxsw_core, + const struct mlxsw_rx_listener *rxl, + void *priv) +{ + struct mlxsw_rx_listener_item *rxl_item; + + list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) { + if (__is_rx_listener_equal(&rxl_item->rxl, rxl) && + rxl_item->priv == priv) + return rxl_item; + } + return NULL; +} + +int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, + const struct mlxsw_rx_listener *rxl, + void *priv) +{ + struct mlxsw_rx_listener_item *rxl_item; + + rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); + if (rxl_item) + return -EEXIST; + rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL); + if (!rxl_item) + return -ENOMEM; + rxl_item->rxl = *rxl; + rxl_item->priv = priv; + + list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list); + return 0; +} +EXPORT_SYMBOL(mlxsw_core_rx_listener_register); + +void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core, + const struct mlxsw_rx_listener *rxl, + void *priv) +{ + struct mlxsw_rx_listener_item *rxl_item; + + rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); + if (!rxl_item) + return; + list_del_rcu(&rxl_item->list); + synchronize_rcu(); + kfree(rxl_item); +} +EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister); + +static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port, + void *priv) +{ + struct mlxsw_event_listener_item *event_listener_item = priv; + struct mlxsw_reg_info reg; + char *payload; + char *op_tlv = mlxsw_emad_op_tlv(skb); + char *reg_tlv = mlxsw_emad_reg_tlv(skb); + + reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv); + reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32); + payload = mlxsw_emad_reg_payload(op_tlv); + event_listener_item->el.func(®, payload, event_listener_item->priv); + dev_kfree_skb(skb); +} + +static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a, + const struct mlxsw_event_listener *el_b) +{ + return (el_a->func == el_b->func && + el_a->trap_id == el_b->trap_id); +} + +static struct mlxsw_event_listener_item * +__find_event_listener_item(struct mlxsw_core *mlxsw_core, + const struct mlxsw_event_listener *el, + void *priv) +{ + struct mlxsw_event_listener_item *el_item; + + list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) { + if (__is_event_listener_equal(&el_item->el, el) && + el_item->priv == priv) + return el_item; + } + return NULL; +} + +int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, + const struct mlxsw_event_listener *el, + void *priv) +{ + int err; + struct mlxsw_event_listener_item *el_item; + const struct mlxsw_rx_listener rxl = { + .func = mlxsw_core_event_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = el->trap_id, + }; + + el_item = __find_event_listener_item(mlxsw_core, el, priv); + if (el_item) + return -EEXIST; + el_item = kmalloc(sizeof(*el_item), GFP_KERNEL); + if (!el_item) + return -ENOMEM; + el_item->el = *el; + el_item->priv = priv; + + err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item); + if (err) + goto err_rx_listener_register; + + /* No reason to save item if we did not manage to register an RX + * listener for it. + */ + list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list); + + return 0; + +err_rx_listener_register: + kfree(el_item); + return err; +} +EXPORT_SYMBOL(mlxsw_core_event_listener_register); + +void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, + const struct mlxsw_event_listener *el, + void *priv) +{ + struct mlxsw_event_listener_item *el_item; + const struct mlxsw_rx_listener rxl = { + .func = mlxsw_core_event_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = el->trap_id, + }; + + el_item = __find_event_listener_item(mlxsw_core, el, priv); + if (!el_item) + return; + mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item); + list_del(&el_item->list); + kfree(el_item); +} +EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); + +static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, + char *payload, + enum mlxsw_core_reg_access_type type) +{ + int err; + char *op_tlv; + struct sk_buff *skb; + struct mlxsw_tx_info tx_info = { + .local_port = MLXSW_PORT_CPU_PORT, + .is_emad = true, + }; + + skb = mlxsw_emad_alloc(mlxsw_core, reg->len); + if (!skb) + return -ENOMEM; + + mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core); + mlxsw_core->driver->txhdr_construct(skb, &tx_info); + + dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n", + mlxsw_core->emad.tid); + mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len); + + err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info); + if (!err) { + op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb); + memcpy(payload, mlxsw_emad_reg_payload(op_tlv), + reg->len); + + dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n", + mlxsw_core->emad.tid - 1); + mlxsw_core_buf_dump_dbg(mlxsw_core, + mlxsw_core->emad.resp_skb->data, + mlxsw_core->emad.resp_skb->len); + + dev_kfree_skb(mlxsw_core->emad.resp_skb); + } + + return err; +} + +static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, + char *payload, + enum mlxsw_core_reg_access_type type) +{ + int err, n_retry; + char *in_mbox, *out_mbox, *tmp; + + in_mbox = mlxsw_cmd_mbox_alloc(); + if (!in_mbox) + return -ENOMEM; + + out_mbox = mlxsw_cmd_mbox_alloc(); + if (!out_mbox) { + err = -ENOMEM; + goto free_in_mbox; + } + + mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core); + tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); + mlxsw_emad_pack_reg_tlv(tmp, reg, payload); + + n_retry = 0; +retry: + err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox); + if (!err) { + err = mlxsw_emad_process_status(mlxsw_core, out_mbox); + if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) + goto retry; + } + + if (!err) + memcpy(payload, mlxsw_emad_reg_payload(out_mbox), + reg->len); + + mlxsw_core->emad.tid++; + mlxsw_cmd_mbox_free(out_mbox); +free_in_mbox: + mlxsw_cmd_mbox_free(in_mbox); + return err; +} + +static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, + char *payload, + enum mlxsw_core_reg_access_type type) +{ + u64 cur_tid; + int err; + + if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) { + dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n", + reg->id, mlxsw_reg_id_str(reg->id), + mlxsw_core_reg_access_type_str(type)); + return -EINTR; + } + + cur_tid = mlxsw_core->emad.tid; + dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", + cur_tid, reg->id, mlxsw_reg_id_str(reg->id), + mlxsw_core_reg_access_type_str(type)); + + /* During initialization EMAD interface is not available to us, + * so we default to command interface. We switch to EMAD interface + * after setting the appropriate traps. + */ + if (!mlxsw_core->emad.use_emad) + err = mlxsw_core_reg_access_cmd(mlxsw_core, reg, + payload, type); + else + err = mlxsw_core_reg_access_emad(mlxsw_core, reg, + payload, type); + + if (err) + dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n", + cur_tid, reg->id, mlxsw_reg_id_str(reg->id), + mlxsw_core_reg_access_type_str(type)); + + mutex_unlock(&mlxsw_core->emad.lock); + return err; +} + +int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, char *payload) +{ + return mlxsw_core_reg_access(mlxsw_core, reg, payload, + MLXSW_CORE_REG_ACCESS_TYPE_QUERY); +} +EXPORT_SYMBOL(mlxsw_reg_query); + +int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, char *payload) +{ + return mlxsw_core_reg_access(mlxsw_core, reg, payload, + MLXSW_CORE_REG_ACCESS_TYPE_WRITE); +} +EXPORT_SYMBOL(mlxsw_reg_write); + +void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, + struct mlxsw_rx_info *rx_info) +{ + struct mlxsw_rx_listener_item *rxl_item; + const struct mlxsw_rx_listener *rxl; + struct mlxsw_core_pcpu_stats *pcpu_stats; + u8 local_port = rx_info->sys_port; + bool found = false; + + dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n", + __func__, rx_info->sys_port, rx_info->trap_id); + + if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || + (local_port >= MLXSW_PORT_MAX_PORTS)) + goto drop; + + rcu_read_lock(); + list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) { + rxl = &rxl_item->rxl; + if ((rxl->local_port == MLXSW_PORT_DONT_CARE || + rxl->local_port == local_port) && + rxl->trap_id == rx_info->trap_id) { + found = true; + break; + } + } + rcu_read_unlock(); + if (!found) + goto drop; + + pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats); + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->port_rx_packets[local_port]++; + pcpu_stats->port_rx_bytes[local_port] += skb->len; + pcpu_stats->trap_rx_packets[rx_info->trap_id]++; + pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len; + u64_stats_update_end(&pcpu_stats->syncp); + + rxl->func(skb, local_port, rxl_item->priv); + return; + +drop: + if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX) + this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid); + else + this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]); + if (local_port >= MLXSW_PORT_MAX_PORTS) + this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid); + else + this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]); + dev_kfree_skb(skb); +} +EXPORT_SYMBOL(mlxsw_core_skb_receive); + +int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, + u32 in_mod, bool out_mbox_direct, + char *in_mbox, size_t in_mbox_size, + char *out_mbox, size_t out_mbox_size) +{ + u8 status; + int err; + + BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32)); + if (!mlxsw_core->bus->cmd_exec) + return -EOPNOTSUPP; + + dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", + opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod); + if (in_mbox) { + dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n"); + mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size); + } + + err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode, + opcode_mod, in_mod, out_mbox_direct, + in_mbox, in_mbox_size, + out_mbox, out_mbox_size, &status); + + if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { + dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n", + opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, + in_mod, status, mlxsw_cmd_status_str(status)); + } else if (err == -ETIMEDOUT) { + dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", + opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, + in_mod); + } + + if (!err && out_mbox) { + dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); + mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); + } + return err; +} +EXPORT_SYMBOL(mlxsw_cmd_exec); + +static int __init mlxsw_core_module_init(void) +{ + mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); + if (!mlxsw_core_dbg_root) + return -ENOMEM; + return 0; +} + +static void __exit mlxsw_core_module_exit(void) +{ + debugfs_remove_recursive(mlxsw_core_dbg_root); +} + +module_init(mlxsw_core_module_init); +module_exit(mlxsw_core_module_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jiri Pirko "); +MODULE_DESCRIPTION("Mellanox switch device core driver"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h new file mode 100644 index 000000000..165808471 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -0,0 +1,207 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko + * Copyright (c) 2015 Ido Schimmel + * Copyright (c) 2015 Elad Raz + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_CORE_H +#define _MLXSW_CORE_H + +#include +#include +#include +#include +#include +#include + +#include "trap.h" +#include "reg.h" + +#include "cmd.h" + +#define MLXSW_MODULE_ALIAS_PREFIX "mlxsw-driver-" +#define MODULE_MLXSW_DRIVER_ALIAS(kind) \ + MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind) + +#define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2" + +struct mlxsw_core; +struct mlxsw_driver; +struct mlxsw_bus; +struct mlxsw_bus_info; + +int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver); +void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver); + +int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, + const struct mlxsw_bus *mlxsw_bus, + void *bus_priv); +void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core); + +struct mlxsw_tx_info { + u8 local_port; + bool is_emad; +}; + +bool mlxsw_core_skb_transmit_busy(void *driver_priv, + const struct mlxsw_tx_info *tx_info); + +int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info); + +struct mlxsw_rx_listener { + void (*func)(struct sk_buff *skb, u8 local_port, void *priv); + u8 local_port; + u16 trap_id; +}; + +struct mlxsw_event_listener { + void (*func)(const struct mlxsw_reg_info *reg, + char *payload, void *priv); + enum mlxsw_event_trap_id trap_id; +}; + +int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, + const struct mlxsw_rx_listener *rxl, + void *priv); +void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core, + const struct mlxsw_rx_listener *rxl, + void *priv); + +int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, + const struct mlxsw_event_listener *el, + void *priv); +void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, + const struct mlxsw_event_listener *el, + void *priv); + +int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, char *payload); +int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, char *payload); + +struct mlxsw_rx_info { + u16 sys_port; + int trap_id; +}; + +void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, + struct mlxsw_rx_info *rx_info); + +#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8 + +struct mlxsw_swid_config { + u8 used_type:1, + used_properties:1; + u8 type; + u8 properties; +}; + +struct mlxsw_config_profile { + u16 used_max_vepa_channels:1, + used_max_lag:1, + used_max_port_per_lag:1, + used_max_mid:1, + used_max_pgt:1, + used_max_system_port:1, + used_max_vlan_groups:1, + used_max_regions:1, + used_flood_tables:1, + used_flood_mode:1, + used_max_ib_mc:1, + used_max_pkey:1, + used_ar_sec:1, + used_adaptive_routing_group_cap:1; + u8 max_vepa_channels; + u16 max_lag; + u16 max_port_per_lag; + u16 max_mid; + u16 max_pgt; + u16 max_system_port; + u16 max_vlan_groups; + u16 max_regions; + u8 max_flood_tables; + u8 max_vid_flood_tables; + u8 flood_mode; + u16 max_ib_mc; + u16 max_pkey; + u8 ar_sec; + u16 adaptive_routing_group_cap; + u8 arn; + struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT]; +}; + +struct mlxsw_driver { + struct list_head list; + const char *kind; + struct module *owner; + size_t priv_size; + int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info); + void (*fini)(void *driver_priv); + void (*txhdr_construct)(struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info); + u8 txhdr_len; + const struct mlxsw_config_profile *profile; +}; + +struct mlxsw_bus { + const char *kind; + int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core, + const struct mlxsw_config_profile *profile); + void (*fini)(void *bus_priv); + bool (*skb_transmit_busy)(void *bus_priv, + const struct mlxsw_tx_info *tx_info); + int (*skb_transmit)(void *bus_priv, struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info); + int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod, + u32 in_mod, bool out_mbox_direct, + char *in_mbox, size_t in_mbox_size, + char *out_mbox, size_t out_mbox_size, + u8 *p_status); +}; + +struct mlxsw_bus_info { + const char *device_kind; + const char *device_name; + struct device *dev; + struct { + u16 major; + u16 minor; + u16 subminor; + } fw_rev; + u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN]; + u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN]; +}; + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h new file mode 100644 index 000000000..97b6bb5d9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h @@ -0,0 +1,127 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/emad.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Ido Schimmel + * Copyright (c) 2015 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_EMAD_H +#define _MLXSW_EMAD_H + +#define MLXSW_EMAD_MAX_FRAME_LEN 1518 /* Length in u8 */ +#define MLXSW_EMAD_MAX_RETRY 5 + +/* EMAD Ethernet header */ +#define MLXSW_EMAD_ETH_HDR_LEN 0x10 /* Length in u8 */ +#define MLXSW_EMAD_EH_DMAC "\x01\x02\xc9\x00\x00\x01" +#define MLXSW_EMAD_EH_SMAC "\x00\x02\xc9\x01\x02\x03" +#define MLXSW_EMAD_EH_ETHERTYPE 0x8932 +#define MLXSW_EMAD_EH_MLX_PROTO 0 +#define MLXSW_EMAD_EH_PROTO_VERSION 0 + +/* EMAD TLV Types */ +enum { + MLXSW_EMAD_TLV_TYPE_END, + MLXSW_EMAD_TLV_TYPE_OP, + MLXSW_EMAD_TLV_TYPE_DR, + MLXSW_EMAD_TLV_TYPE_REG, + MLXSW_EMAD_TLV_TYPE_USERDATA, + MLXSW_EMAD_TLV_TYPE_OOBETH, +}; + +/* OP TLV */ +#define MLXSW_EMAD_OP_TLV_LEN 4 /* Length in u32 */ + +enum { + MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS = 1, + MLXSW_EMAD_OP_TLV_CLASS_IPC = 2, +}; + +enum mlxsw_emad_op_tlv_status { + MLXSW_EMAD_OP_TLV_STATUS_SUCCESS, + MLXSW_EMAD_OP_TLV_STATUS_BUSY, + MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED, + MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV, + MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED, + MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED, + MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED, + MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER, + MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE, + MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK, + MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR = 0x70, +}; + +static inline char *mlxsw_emad_op_tlv_status_str(u8 status) +{ + switch (status) { + case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: + return "operation performed"; + case MLXSW_EMAD_OP_TLV_STATUS_BUSY: + return "device is busy"; + case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: + return "version not supported"; + case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: + return "unknown TLV"; + case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED: + return "register not supported"; + case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED: + return "class not supported"; + case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED: + return "method not supported"; + case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER: + return "bad parameter"; + case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: + return "resource not available"; + case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: + return "acknowledged. retransmit"; + case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: + return "internal error"; + default: + return "*UNKNOWN*"; + } +} + +enum { + MLXSW_EMAD_OP_TLV_REQUEST, + MLXSW_EMAD_OP_TLV_RESPONSE +}; + +enum { + MLXSW_EMAD_OP_TLV_METHOD_QUERY = 1, + MLXSW_EMAD_OP_TLV_METHOD_WRITE = 2, + MLXSW_EMAD_OP_TLV_METHOD_SEND = 3, + MLXSW_EMAD_OP_TLV_METHOD_EVENT = 5, +}; + +/* END TLV */ +#define MLXSW_EMAD_END_TLV_LEN 1 /* Length in u32 */ + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h new file mode 100644 index 000000000..36fb1cec5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -0,0 +1,407 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/item.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko + * Copyright (c) 2015 Ido Schimmel + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_ITEM_H +#define _MLXSW_ITEM_H + +#include +#include +#include + +struct mlxsw_item { + unsigned short offset; /* bytes in container */ + unsigned short step; /* step in bytes for indexed items */ + unsigned short in_step_offset; /* offset within one step */ + unsigned char shift; /* shift in bits */ + unsigned char element_size; /* size of element in bit array */ + bool no_real_shift; + union { + unsigned char bits; + unsigned short bytes; + } size; + const char *name; +}; + +static inline unsigned int +__mlxsw_item_offset(struct mlxsw_item *item, unsigned short index, + size_t typesize) +{ + BUG_ON(index && !item->step); + if (item->offset % typesize != 0 || + item->step % typesize != 0 || + item->in_step_offset % typesize != 0) { + pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n", + item->name, item->offset, item->step, + item->in_step_offset, typesize); + BUG(); + } + + return ((item->offset + item->step * index + item->in_step_offset) / + typesize); +} + +static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item, + unsigned short index) +{ + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16)); + __be16 *b = (__be16 *) buf; + u16 tmp; + + tmp = be16_to_cpu(b[offset]); + tmp >>= item->shift; + tmp &= GENMASK(item->size.bits - 1, 0); + if (item->no_real_shift) + tmp <<= item->shift; + return tmp; +} + +static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item, + unsigned short index, u16 val) +{ + unsigned int offset = __mlxsw_item_offset(item, index, + sizeof(u16)); + __be16 *b = (__be16 *) buf; + u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift; + u16 tmp; + + if (!item->no_real_shift) + val <<= item->shift; + val &= mask; + tmp = be16_to_cpu(b[offset]); + tmp &= ~mask; + tmp |= val; + b[offset] = cpu_to_be16(tmp); +} + +static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item, + unsigned short index) +{ + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32)); + __be32 *b = (__be32 *) buf; + u32 tmp; + + tmp = be32_to_cpu(b[offset]); + tmp >>= item->shift; + tmp &= GENMASK(item->size.bits - 1, 0); + if (item->no_real_shift) + tmp <<= item->shift; + return tmp; +} + +static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item, + unsigned short index, u32 val) +{ + unsigned int offset = __mlxsw_item_offset(item, index, + sizeof(u32)); + __be32 *b = (__be32 *) buf; + u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift; + u32 tmp; + + if (!item->no_real_shift) + val <<= item->shift; + val &= mask; + tmp = be32_to_cpu(b[offset]); + tmp &= ~mask; + tmp |= val; + b[offset] = cpu_to_be32(tmp); +} + +static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item, + unsigned short index) +{ + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64)); + __be64 *b = (__be64 *) buf; + u64 tmp; + + tmp = be64_to_cpu(b[offset]); + tmp >>= item->shift; + tmp &= GENMASK_ULL(item->size.bits - 1, 0); + if (item->no_real_shift) + tmp <<= item->shift; + return tmp; +} + +static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item, + unsigned short index, u64 val) +{ + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64)); + __be64 *b = (__be64 *) buf; + u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift; + u64 tmp; + + if (!item->no_real_shift) + val <<= item->shift; + val &= mask; + tmp = be64_to_cpu(b[offset]); + tmp &= ~mask; + tmp |= val; + b[offset] = cpu_to_be64(tmp); +} + +static inline void __mlxsw_item_memcpy_from(char *buf, char *dst, + struct mlxsw_item *item) +{ + memcpy(dst, &buf[item->offset], item->size.bytes); +} + +static inline void __mlxsw_item_memcpy_to(char *buf, char *src, + struct mlxsw_item *item) +{ + memcpy(&buf[item->offset], src, item->size.bytes); +} + +static inline u16 +__mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift) +{ + u16 max_index, be_index; + u16 offset; /* byte offset inside the array */ + u8 in_byte_index; + + BUG_ON(index && !item->element_size); + if (item->offset % sizeof(u32) != 0 || + BITS_PER_BYTE % item->element_size != 0) { + pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n", + item->name, item->offset, item->element_size); + BUG(); + } + + max_index = (item->size.bytes << 3) / item->element_size - 1; + be_index = max_index - index; + offset = be_index * item->element_size >> 3; + in_byte_index = index % (BITS_PER_BYTE / item->element_size); + *shift = in_byte_index * item->element_size; + + return item->offset + offset; +} + +static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item, + u16 index) +{ + u8 shift, tmp; + u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift); + + tmp = buf[offset]; + tmp >>= shift; + tmp &= GENMASK(item->element_size - 1, 0); + return tmp; +} + +static inline void __mlxsw_item_bit_array_set(char *buf, struct mlxsw_item *item, + u16 index, u8 val) +{ + u8 shift, tmp; + u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift); + u8 mask = GENMASK(item->element_size - 1, 0) << shift; + + val <<= shift; + val &= mask; + tmp = buf[offset]; + tmp &= ~mask; + tmp |= val; + buf[offset] = tmp; +} + +#define __ITEM_NAME(_type, _cname, _iname) \ + mlxsw_##_type##_##_cname##_##_iname##_item + +/* _type: cmd_mbox, reg, etc. + * _cname: containter name (e.g. command name, register name) + * _iname: item name within the container + */ + +#define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .shift = _shift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \ +{ \ + return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ +} \ +static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\ +{ \ + __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ +} + +#define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \ + _step, _instepoffset, _norealshift) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .step = _step, \ + .in_step_offset = _instepoffset, \ + .shift = _shift, \ + .no_real_shift = _norealshift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u16 \ +mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \ +{ \ + return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index); \ +} \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ + u16 val) \ +{ \ + __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index, val); \ +} + +#define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .shift = _shift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \ +{ \ + return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ +} \ +static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\ +{ \ + __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ +} + +#define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \ + _step, _instepoffset, _norealshift) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .step = _step, \ + .in_step_offset = _instepoffset, \ + .shift = _shift, \ + .no_real_shift = _norealshift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u32 \ +mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \ +{ \ + return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index); \ +} \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ + u32 val) \ +{ \ + __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index, val); \ +} + +#define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .shift = _shift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \ +{ \ + return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ +} \ +static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\ +{ \ + __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ +} + +#define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift, \ + _sizebits, _step, _instepoffset, _norealshift) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .step = _step, \ + .in_step_offset = _instepoffset, \ + .shift = _shift, \ + .no_real_shift = _norealshift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u64 \ +mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \ +{ \ + return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index); \ +} \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ + u64 val) \ +{ \ + __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index, val); \ +} + +#define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .size = {.bytes = _sizebytes,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst) \ +{ \ + __mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\ +} \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src) \ +{ \ + __mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname)); \ +} + +#define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \ + _element_size) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .element_size = _element_size, \ + .size = {.bytes = _sizebytes,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u8 \ +mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, u16 index) \ +{ \ + return __mlxsw_item_bit_array_get(buf, \ + &__ITEM_NAME(_type, _cname, _iname), \ + index); \ +} \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \ +{ \ + return __mlxsw_item_bit_array_set(buf, \ + &__ITEM_NAME(_type, _cname, _iname), \ + index, val); \ +} \ + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c new file mode 100644 index 000000000..cef866c37 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -0,0 +1,1826 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/pci.c + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pci.h" +#include "core.h" +#include "cmd.h" +#include "port.h" + +static const char mlxsw_pci_driver_name[] = "mlxsw_pci"; + +static const struct pci_device_id mlxsw_pci_id_table[] = { + {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0}, + {0, } +}; + +static struct dentry *mlxsw_pci_dbg_root; + +static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id) +{ + switch (id->device) { + case PCI_DEVICE_ID_MELLANOX_SWITCHX2: + return MLXSW_DEVICE_KIND_SWITCHX2; + default: + BUG(); + } +} + +#define mlxsw_pci_write32(mlxsw_pci, reg, val) \ + iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg)) +#define mlxsw_pci_read32(mlxsw_pci, reg) \ + ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg)) + +enum mlxsw_pci_queue_type { + MLXSW_PCI_QUEUE_TYPE_SDQ, + MLXSW_PCI_QUEUE_TYPE_RDQ, + MLXSW_PCI_QUEUE_TYPE_CQ, + MLXSW_PCI_QUEUE_TYPE_EQ, +}; + +static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type) +{ + switch (q_type) { + case MLXSW_PCI_QUEUE_TYPE_SDQ: + return "sdq"; + case MLXSW_PCI_QUEUE_TYPE_RDQ: + return "rdq"; + case MLXSW_PCI_QUEUE_TYPE_CQ: + return "cq"; + case MLXSW_PCI_QUEUE_TYPE_EQ: + return "eq"; + } + BUG(); +} + +#define MLXSW_PCI_QUEUE_TYPE_COUNT 4 + +static const u16 mlxsw_pci_doorbell_type_offset[] = { + MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */ + MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */ + MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */ + MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */ +}; + +static const u16 mlxsw_pci_doorbell_arm_type_offset[] = { + 0, /* unused */ + 0, /* unused */ + MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */ + MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */ +}; + +struct mlxsw_pci_mem_item { + char *buf; + dma_addr_t mapaddr; + size_t size; +}; + +struct mlxsw_pci_queue_elem_info { + char *elem; /* pointer to actual dma mapped element mem chunk */ + union { + struct { + struct sk_buff *skb; + } sdq; + struct { + struct sk_buff *skb; + } rdq; + } u; +}; + +struct mlxsw_pci_queue { + spinlock_t lock; /* for queue accesses */ + struct mlxsw_pci_mem_item mem_item; + struct mlxsw_pci_queue_elem_info *elem_info; + u16 producer_counter; + u16 consumer_counter; + u16 count; /* number of elements in queue */ + u8 num; /* queue number */ + u8 elem_size; /* size of one element */ + enum mlxsw_pci_queue_type type; + struct tasklet_struct tasklet; /* queue processing tasklet */ + struct mlxsw_pci *pci; + union { + struct { + u32 comp_sdq_count; + u32 comp_rdq_count; + } cq; + struct { + u32 ev_cmd_count; + u32 ev_comp_count; + u32 ev_other_count; + } eq; + } u; +}; + +struct mlxsw_pci_queue_type_group { + struct mlxsw_pci_queue *q; + u8 count; /* number of queues in group */ +}; + +struct mlxsw_pci { + struct pci_dev *pdev; + u8 __iomem *hw_addr; + struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT]; + u32 doorbell_offset; + struct msix_entry msix_entry; + struct mlxsw_core *core; + struct { + u16 num_pages; + struct mlxsw_pci_mem_item *items; + } fw_area; + struct { + struct mlxsw_pci_mem_item out_mbox; + struct mlxsw_pci_mem_item in_mbox; + struct mutex lock; /* Lock access to command registers */ + bool nopoll; + wait_queue_head_t wait; + bool wait_done; + struct { + u8 status; + u64 out_param; + } comp; + } cmd; + struct mlxsw_bus_info bus_info; + struct dentry *dbg_dir; +}; + +static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q) +{ + tasklet_schedule(&q->tasklet); +} + +static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, + size_t elem_size, int elem_index) +{ + return q->mem_item.buf + (elem_size * elem_index); +} + +static struct mlxsw_pci_queue_elem_info * +mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index) +{ + return &q->elem_info[elem_index]; +} + +static struct mlxsw_pci_queue_elem_info * +mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q) +{ + int index = q->producer_counter & (q->count - 1); + + if ((q->producer_counter - q->consumer_counter) == q->count) + return NULL; + return mlxsw_pci_queue_elem_info_get(q, index); +} + +static struct mlxsw_pci_queue_elem_info * +mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q) +{ + int index = q->consumer_counter & (q->count - 1); + + return mlxsw_pci_queue_elem_info_get(q, index); +} + +static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index) +{ + return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem; +} + +static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit) +{ + return owner_bit != !!(q->consumer_counter & q->count); +} + +static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q, + u32 (*get_elem_owner_func)(char *)) +{ + struct mlxsw_pci_queue_elem_info *elem_info; + char *elem; + bool owner_bit; + + elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); + elem = elem_info->elem; + owner_bit = get_elem_owner_func(elem); + if (mlxsw_pci_elem_hw_owned(q, owner_bit)) + return NULL; + q->consumer_counter++; + rmb(); /* make sure we read owned bit before the rest of elem */ + return elem; +} + +static struct mlxsw_pci_queue_type_group * +mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci, + enum mlxsw_pci_queue_type q_type) +{ + return &mlxsw_pci->queues[q_type]; +} + +static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci, + enum mlxsw_pci_queue_type q_type) +{ + struct mlxsw_pci_queue_type_group *queue_group; + + queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type); + return queue_group->count; +} + +static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci) +{ + return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ); +} + +static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci) +{ + return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ); +} + +static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci) +{ + return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ); +} + +static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci) +{ + return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ); +} + +static struct mlxsw_pci_queue * +__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci, + enum mlxsw_pci_queue_type q_type, u8 q_num) +{ + return &mlxsw_pci->queues[q_type].q[q_num]; +} + +static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci, + u8 q_num) +{ + return __mlxsw_pci_queue_get(mlxsw_pci, + MLXSW_PCI_QUEUE_TYPE_SDQ, q_num); +} + +static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci, + u8 q_num) +{ + return __mlxsw_pci_queue_get(mlxsw_pci, + MLXSW_PCI_QUEUE_TYPE_RDQ, q_num); +} + +static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci, + u8 q_num) +{ + return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num); +} + +static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci, + u8 q_num) +{ + return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num); +} + +static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q, + u16 val) +{ + mlxsw_pci_write32(mlxsw_pci, + DOORBELL(mlxsw_pci->doorbell_offset, + mlxsw_pci_doorbell_type_offset[q->type], + q->num), val); +} + +static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q, + u16 val) +{ + mlxsw_pci_write32(mlxsw_pci, + DOORBELL(mlxsw_pci->doorbell_offset, + mlxsw_pci_doorbell_arm_type_offset[q->type], + q->num), val); +} + +static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + wmb(); /* ensure all writes are done before we ring a bell */ + __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter); +} + +static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + wmb(); /* ensure all writes are done before we ring a bell */ + __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, + q->consumer_counter + q->count); +} + +static void +mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + wmb(); /* ensure all writes are done before we ring a bell */ + __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter); +} + +static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q, + int page_index) +{ + return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index; +} + +static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, + struct mlxsw_pci_queue *q) +{ + int i; + int err; + + q->producer_counter = 0; + q->consumer_counter = 0; + + /* Set CQ of same number of this SDQ. */ + mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num); + mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7); + mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ + for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { + dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); + + mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr); + } + + err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num); + if (err) + return err; + mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); + return 0; +} + +static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num); +} + +static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data) +{ + struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); + struct mlxsw_pci_queue *q; + int i; + static const char hdr[] = + "NUM PROD_COUNT CONS_COUNT COUNT\n"; + + seq_printf(file, hdr); + for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) { + q = mlxsw_pci_sdq_get(mlxsw_pci, i); + spin_lock_bh(&q->lock); + seq_printf(file, "%3d %10d %10d %5d\n", + i, q->producer_counter, q->consumer_counter, + q->count); + spin_unlock_bh(&q->lock); + } + return 0; +} + +static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe, + int index, char *frag_data, size_t frag_len, + int direction) +{ + struct pci_dev *pdev = mlxsw_pci->pdev; + dma_addr_t mapaddr; + + mapaddr = pci_map_single(pdev, frag_data, frag_len, direction); + if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) { + if (net_ratelimit()) + dev_err(&pdev->dev, "failed to dma map tx frag\n"); + return -EIO; + } + mlxsw_pci_wqe_address_set(wqe, index, mapaddr); + mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len); + return 0; +} + +static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe, + int index, int direction) +{ + struct pci_dev *pdev = mlxsw_pci->pdev; + size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index); + dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index); + + if (!frag_len) + return; + pci_unmap_single(pdev, mapaddr, frag_len, direction); +} + +static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue_elem_info *elem_info) +{ + size_t buf_len = MLXSW_PORT_MAX_MTU; + char *wqe = elem_info->elem; + struct sk_buff *skb; + int err; + + elem_info->u.rdq.skb = NULL; + skb = netdev_alloc_skb_ip_align(NULL, buf_len); + if (!skb) + return -ENOMEM; + + /* Assume that wqe was previously zeroed. */ + + err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, + buf_len, DMA_FROM_DEVICE); + if (err) + goto err_frag_map; + + elem_info->u.rdq.skb = skb; + return 0; + +err_frag_map: + dev_kfree_skb_any(skb); + return err; +} + +static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue_elem_info *elem_info) +{ + struct sk_buff *skb; + char *wqe; + + skb = elem_info->u.rdq.skb; + wqe = elem_info->elem; + + mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); +} + +static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, + struct mlxsw_pci_queue *q) +{ + struct mlxsw_pci_queue_elem_info *elem_info; + int i; + int err; + + q->producer_counter = 0; + q->consumer_counter = 0; + + /* Set CQ of same number of this RDQ with base + * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs. + */ + mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT); + mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ + for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { + dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); + + mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr); + } + + err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num); + if (err) + return err; + + mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); + + for (i = 0; i < q->count; i++) { + elem_info = mlxsw_pci_queue_elem_info_producer_get(q); + BUG_ON(!elem_info); + err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); + if (err) + goto rollback; + /* Everything is set up, ring doorbell to pass elem to HW */ + q->producer_counter++; + mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); + } + + return 0; + +rollback: + for (i--; i >= 0; i--) { + elem_info = mlxsw_pci_queue_elem_info_get(q, i); + mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info); + } + mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); + + return err; +} + +static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + struct mlxsw_pci_queue_elem_info *elem_info; + int i; + + mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); + for (i = 0; i < q->count; i++) { + elem_info = mlxsw_pci_queue_elem_info_get(q, i); + mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info); + } +} + +static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data) +{ + struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); + struct mlxsw_pci_queue *q; + int i; + static const char hdr[] = + "NUM PROD_COUNT CONS_COUNT COUNT\n"; + + seq_printf(file, hdr); + for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) { + q = mlxsw_pci_rdq_get(mlxsw_pci, i); + spin_lock_bh(&q->lock); + seq_printf(file, "%3d %10d %10d %5d\n", + i, q->producer_counter, q->consumer_counter, + q->count); + spin_unlock_bh(&q->lock); + } + return 0; +} + +static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, + struct mlxsw_pci_queue *q) +{ + int i; + int err; + + q->consumer_counter = 0; + + for (i = 0; i < q->count; i++) { + char *elem = mlxsw_pci_queue_elem_get(q, i); + + mlxsw_pci_cqe_owner_set(elem, 1); + } + + mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */ + mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM); + mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0); + mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0); + mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count)); + for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { + dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); + + mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr); + } + err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num); + if (err) + return err; + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); + return 0; +} + +static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num); +} + +static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data) +{ + struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); + + struct mlxsw_pci_queue *q; + int i; + static const char hdr[] = + "NUM CONS_INDEX SDQ_COUNT RDQ_COUNT COUNT\n"; + + seq_printf(file, hdr); + for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) { + q = mlxsw_pci_cq_get(mlxsw_pci, i); + spin_lock_bh(&q->lock); + seq_printf(file, "%3d %10d %10d %10d %5d\n", + i, q->consumer_counter, q->u.cq.comp_sdq_count, + q->u.cq.comp_rdq_count, q->count); + spin_unlock_bh(&q->lock); + } + return 0; +} + +static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q, + u16 consumer_counter_limit, + char *cqe) +{ + struct pci_dev *pdev = mlxsw_pci->pdev; + struct mlxsw_pci_queue_elem_info *elem_info; + char *wqe; + struct sk_buff *skb; + int i; + + spin_lock(&q->lock); + elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); + skb = elem_info->u.sdq.skb; + wqe = elem_info->elem; + for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++) + mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + elem_info->u.sdq.skb = NULL; + + if (q->consumer_counter++ != consumer_counter_limit) + dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n"); + spin_unlock(&q->lock); +} + +static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q, + u16 consumer_counter_limit, + char *cqe) +{ + struct pci_dev *pdev = mlxsw_pci->pdev; + struct mlxsw_pci_queue_elem_info *elem_info; + char *wqe; + struct sk_buff *skb; + struct mlxsw_rx_info rx_info; + u16 byte_count; + int err; + + elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); + skb = elem_info->u.sdq.skb; + if (!skb) + return; + wqe = elem_info->elem; + mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); + + if (q->consumer_counter++ != consumer_counter_limit) + dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); + + /* We do not support lag now */ + if (mlxsw_pci_cqe_lag_get(cqe)) + goto drop; + + rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe); + rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe); + + byte_count = mlxsw_pci_cqe_byte_count_get(cqe); + if (mlxsw_pci_cqe_crc_get(cqe)) + byte_count -= ETH_FCS_LEN; + skb_put(skb, byte_count); + mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); + +put_new_skb: + memset(wqe, 0, q->elem_size); + err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); + if (err && net_ratelimit()) + dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n"); + /* Everything is set up, ring doorbell to pass elem to HW */ + q->producer_counter++; + mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); + return; + +drop: + dev_kfree_skb_any(skb); + goto put_new_skb; +} + +static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q) +{ + return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get); +} + +static void mlxsw_pci_cq_tasklet(unsigned long data) +{ + struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data; + struct mlxsw_pci *mlxsw_pci = q->pci; + char *cqe; + int items = 0; + int credits = q->count >> 1; + + while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) { + u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); + u8 sendq = mlxsw_pci_cqe_sr_get(cqe); + u8 dqn = mlxsw_pci_cqe_dqn_get(cqe); + + if (sendq) { + struct mlxsw_pci_queue *sdq; + + sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); + mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, + wqe_counter, cqe); + q->u.cq.comp_sdq_count++; + } else { + struct mlxsw_pci_queue *rdq; + + rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); + mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, + wqe_counter, cqe); + q->u.cq.comp_rdq_count++; + } + if (++items == credits) + break; + } + if (items) { + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); + } +} + +static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, + struct mlxsw_pci_queue *q) +{ + int i; + int err; + + q->consumer_counter = 0; + + for (i = 0; i < q->count; i++) { + char *elem = mlxsw_pci_queue_elem_get(q, i); + + mlxsw_pci_eqe_owner_set(elem, 1); + } + + mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */ + mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0); + mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */ + mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count)); + for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { + dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); + + mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr); + } + err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num); + if (err) + return err; + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); + return 0; +} + +static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num); +} + +static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data) +{ + struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); + struct mlxsw_pci_queue *q; + int i; + static const char hdr[] = + "NUM CONS_COUNT EV_CMD EV_COMP EV_OTHER COUNT\n"; + + seq_printf(file, hdr); + for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) { + q = mlxsw_pci_eq_get(mlxsw_pci, i); + spin_lock_bh(&q->lock); + seq_printf(file, "%3d %10d %10d %10d %10d %5d\n", + i, q->consumer_counter, q->u.eq.ev_cmd_count, + q->u.eq.ev_comp_count, q->u.eq.ev_other_count, + q->count); + spin_unlock_bh(&q->lock); + } + return 0; +} + +static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe) +{ + mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe); + mlxsw_pci->cmd.comp.out_param = + ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 | + mlxsw_pci_eqe_cmd_out_param_l_get(eqe); + mlxsw_pci->cmd.wait_done = true; + wake_up(&mlxsw_pci->cmd.wait); +} + +static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q) +{ + return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get); +} + +static void mlxsw_pci_eq_tasklet(unsigned long data) +{ + struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data; + struct mlxsw_pci *mlxsw_pci = q->pci; + unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)]; + char *eqe; + u8 cqn; + bool cq_handle = false; + int items = 0; + int credits = q->count >> 1; + + memset(&active_cqns, 0, sizeof(active_cqns)); + + while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) { + u8 event_type = mlxsw_pci_eqe_event_type_get(eqe); + + switch (event_type) { + case MLXSW_PCI_EQE_EVENT_TYPE_CMD: + mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe); + q->u.eq.ev_cmd_count++; + break; + case MLXSW_PCI_EQE_EVENT_TYPE_COMP: + cqn = mlxsw_pci_eqe_cqn_get(eqe); + set_bit(cqn, active_cqns); + cq_handle = true; + q->u.eq.ev_comp_count++; + break; + default: + q->u.eq.ev_other_count++; + } + if (++items == credits) + break; + } + if (items) { + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); + } + + if (!cq_handle) + return; + for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) { + q = mlxsw_pci_cq_get(mlxsw_pci, cqn); + mlxsw_pci_queue_tasklet_schedule(q); + } +} + +struct mlxsw_pci_queue_ops { + const char *name; + enum mlxsw_pci_queue_type type; + int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox, + struct mlxsw_pci_queue *q); + void (*fini)(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q); + void (*tasklet)(unsigned long data); + int (*dbg_read)(struct seq_file *s, void *data); + u16 elem_count; + u8 elem_size; +}; + +static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = { + .type = MLXSW_PCI_QUEUE_TYPE_SDQ, + .init = mlxsw_pci_sdq_init, + .fini = mlxsw_pci_sdq_fini, + .dbg_read = mlxsw_pci_sdq_dbg_read, + .elem_count = MLXSW_PCI_WQE_COUNT, + .elem_size = MLXSW_PCI_WQE_SIZE, +}; + +static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = { + .type = MLXSW_PCI_QUEUE_TYPE_RDQ, + .init = mlxsw_pci_rdq_init, + .fini = mlxsw_pci_rdq_fini, + .dbg_read = mlxsw_pci_rdq_dbg_read, + .elem_count = MLXSW_PCI_WQE_COUNT, + .elem_size = MLXSW_PCI_WQE_SIZE +}; + +static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = { + .type = MLXSW_PCI_QUEUE_TYPE_CQ, + .init = mlxsw_pci_cq_init, + .fini = mlxsw_pci_cq_fini, + .tasklet = mlxsw_pci_cq_tasklet, + .dbg_read = mlxsw_pci_cq_dbg_read, + .elem_count = MLXSW_PCI_CQE_COUNT, + .elem_size = MLXSW_PCI_CQE_SIZE +}; + +static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = { + .type = MLXSW_PCI_QUEUE_TYPE_EQ, + .init = mlxsw_pci_eq_init, + .fini = mlxsw_pci_eq_fini, + .tasklet = mlxsw_pci_eq_tasklet, + .dbg_read = mlxsw_pci_eq_dbg_read, + .elem_count = MLXSW_PCI_EQE_COUNT, + .elem_size = MLXSW_PCI_EQE_SIZE +}; + +static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox, + const struct mlxsw_pci_queue_ops *q_ops, + struct mlxsw_pci_queue *q, u8 q_num) +{ + struct mlxsw_pci_mem_item *mem_item = &q->mem_item; + int i; + int err; + + spin_lock_init(&q->lock); + q->num = q_num; + q->count = q_ops->elem_count; + q->elem_size = q_ops->elem_size; + q->type = q_ops->type; + q->pci = mlxsw_pci; + + if (q_ops->tasklet) + tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q); + + mem_item->size = MLXSW_PCI_AQ_SIZE; + mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev, + mem_item->size, + &mem_item->mapaddr); + if (!mem_item->buf) + return -ENOMEM; + memset(mem_item->buf, 0, mem_item->size); + + q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL); + if (!q->elem_info) { + err = -ENOMEM; + goto err_elem_info_alloc; + } + + /* Initialize dma mapped elements info elem_info for + * future easy access. + */ + for (i = 0; i < q->count; i++) { + struct mlxsw_pci_queue_elem_info *elem_info; + + elem_info = mlxsw_pci_queue_elem_info_get(q, i); + elem_info->elem = + __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i); + } + + mlxsw_cmd_mbox_zero(mbox); + err = q_ops->init(mlxsw_pci, mbox, q); + if (err) + goto err_q_ops_init; + return 0; + +err_q_ops_init: + kfree(q->elem_info); +err_elem_info_alloc: + pci_free_consistent(mlxsw_pci->pdev, mem_item->size, + mem_item->buf, mem_item->mapaddr); + return err; +} + +static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci, + const struct mlxsw_pci_queue_ops *q_ops, + struct mlxsw_pci_queue *q) +{ + struct mlxsw_pci_mem_item *mem_item = &q->mem_item; + + q_ops->fini(mlxsw_pci, q); + kfree(q->elem_info); + pci_free_consistent(mlxsw_pci->pdev, mem_item->size, + mem_item->buf, mem_item->mapaddr); +} + +static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox, + const struct mlxsw_pci_queue_ops *q_ops, + u8 num_qs) +{ + struct pci_dev *pdev = mlxsw_pci->pdev; + struct mlxsw_pci_queue_type_group *queue_group; + char tmp[16]; + int i; + int err; + + queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type); + queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL); + if (!queue_group->q) + return -ENOMEM; + + for (i = 0; i < num_qs; i++) { + err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops, + &queue_group->q[i], i); + if (err) + goto err_queue_init; + } + queue_group->count = num_qs; + + sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type)); + debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir, + q_ops->dbg_read); + + return 0; + +err_queue_init: + for (i--; i >= 0; i--) + mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]); + kfree(queue_group->q); + return err; +} + +static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci, + const struct mlxsw_pci_queue_ops *q_ops) +{ + struct mlxsw_pci_queue_type_group *queue_group; + int i; + + queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type); + for (i = 0; i < queue_group->count; i++) + mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]); + kfree(queue_group->q); +} + +static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) +{ + struct pci_dev *pdev = mlxsw_pci->pdev; + u8 num_sdqs; + u8 sdq_log2sz; + u8 num_rdqs; + u8 rdq_log2sz; + u8 num_cqs; + u8 cq_log2sz; + u8 num_eqs; + u8 eq_log2sz; + int err; + + mlxsw_cmd_mbox_zero(mbox); + err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox); + if (err) + return err; + + num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox); + sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox); + num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox); + rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox); + num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox); + cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox); + num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox); + eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox); + + if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) || + (num_rdqs != MLXSW_PCI_RDQS_COUNT) || + (num_cqs != MLXSW_PCI_CQS_COUNT) || + (num_eqs != MLXSW_PCI_EQS_COUNT)) { + dev_err(&pdev->dev, "Unsupported number of queues\n"); + return -EINVAL; + } + + if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) || + (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) || + (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) || + (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) { + dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n"); + return -EINVAL; + } + + err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops, + num_eqs); + if (err) { + dev_err(&pdev->dev, "Failed to initialize event queues\n"); + return err; + } + + err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops, + num_cqs); + if (err) { + dev_err(&pdev->dev, "Failed to initialize completion queues\n"); + goto err_cqs_init; + } + + err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops, + num_sdqs); + if (err) { + dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n"); + goto err_sdqs_init; + } + + err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops, + num_rdqs); + if (err) { + dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n"); + goto err_rdqs_init; + } + + /* We have to poll in command interface until queues are initialized */ + mlxsw_pci->cmd.nopoll = true; + return 0; + +err_rdqs_init: + mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops); +err_sdqs_init: + mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops); +err_cqs_init: + mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops); + return err; +} + +static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci) +{ + mlxsw_pci->cmd.nopoll = false; + mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops); + mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops); + mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops); + mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops); +} + +static void +mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci, + char *mbox, int index, + const struct mlxsw_swid_config *swid) +{ + u8 mask = 0; + + if (swid->used_type) { + mlxsw_cmd_mbox_config_profile_swid_config_type_set( + mbox, index, swid->type); + mask |= 1; + } + if (swid->used_properties) { + mlxsw_cmd_mbox_config_profile_swid_config_properties_set( + mbox, index, swid->properties); + mask |= 2; + } + mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask); +} + +static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, + const struct mlxsw_config_profile *profile) +{ + int i; + + mlxsw_cmd_mbox_zero(mbox); + + if (profile->used_max_vepa_channels) { + mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_vepa_channels_set( + mbox, profile->max_vepa_channels); + } + if (profile->used_max_lag) { + mlxsw_cmd_mbox_config_profile_set_max_lag_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_lag_set( + mbox, profile->max_lag); + } + if (profile->used_max_port_per_lag) { + mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_port_per_lag_set( + mbox, profile->max_port_per_lag); + } + if (profile->used_max_mid) { + mlxsw_cmd_mbox_config_profile_set_max_mid_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_mid_set( + mbox, profile->max_mid); + } + if (profile->used_max_pgt) { + mlxsw_cmd_mbox_config_profile_set_max_pgt_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_pgt_set( + mbox, profile->max_pgt); + } + if (profile->used_max_system_port) { + mlxsw_cmd_mbox_config_profile_set_max_system_port_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_system_port_set( + mbox, profile->max_system_port); + } + if (profile->used_max_vlan_groups) { + mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_vlan_groups_set( + mbox, profile->max_vlan_groups); + } + if (profile->used_max_regions) { + mlxsw_cmd_mbox_config_profile_set_max_regions_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_regions_set( + mbox, profile->max_regions); + } + if (profile->used_flood_tables) { + mlxsw_cmd_mbox_config_profile_set_flood_tables_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_flood_tables_set( + mbox, profile->max_flood_tables); + mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set( + mbox, profile->max_vid_flood_tables); + } + if (profile->used_flood_mode) { + mlxsw_cmd_mbox_config_profile_set_flood_mode_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_flood_mode_set( + mbox, profile->flood_mode); + } + if (profile->used_max_ib_mc) { + mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_ib_mc_set( + mbox, profile->max_ib_mc); + } + if (profile->used_max_pkey) { + mlxsw_cmd_mbox_config_profile_set_max_pkey_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_pkey_set( + mbox, profile->max_pkey); + } + if (profile->used_ar_sec) { + mlxsw_cmd_mbox_config_profile_set_ar_sec_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_ar_sec_set( + mbox, profile->ar_sec); + } + if (profile->used_adaptive_routing_group_cap) { + mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set( + mbox, profile->adaptive_routing_group_cap); + } + + for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++) + mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i, + &profile->swid_config[i]); + + return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox); +} + +static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox) +{ + struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info; + int err; + + mlxsw_cmd_mbox_zero(mbox); + err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox); + if (err) + return err; + mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd); + mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid); + return 0; +} + +static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, + u16 num_pages) +{ + struct mlxsw_pci_mem_item *mem_item; + int i; + int err; + + mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item), + GFP_KERNEL); + if (!mlxsw_pci->fw_area.items) + return -ENOMEM; + mlxsw_pci->fw_area.num_pages = num_pages; + + mlxsw_cmd_mbox_zero(mbox); + for (i = 0; i < num_pages; i++) { + mem_item = &mlxsw_pci->fw_area.items[i]; + + mem_item->size = MLXSW_PCI_PAGE_SIZE; + mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev, + mem_item->size, + &mem_item->mapaddr); + if (!mem_item->buf) { + err = -ENOMEM; + goto err_alloc; + } + mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr); + mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */ + } + + err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages); + if (err) + goto err_cmd_map_fa; + + return 0; + +err_cmd_map_fa: +err_alloc: + for (i--; i >= 0; i--) { + mem_item = &mlxsw_pci->fw_area.items[i]; + + pci_free_consistent(mlxsw_pci->pdev, mem_item->size, + mem_item->buf, mem_item->mapaddr); + } + kfree(mlxsw_pci->fw_area.items); + return err; +} + +static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci) +{ + struct mlxsw_pci_mem_item *mem_item; + int i; + + mlxsw_cmd_unmap_fa(mlxsw_pci->core); + + for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) { + mem_item = &mlxsw_pci->fw_area.items[i]; + + pci_free_consistent(mlxsw_pci->pdev, mem_item->size, + mem_item->buf, mem_item->mapaddr); + } + kfree(mlxsw_pci->fw_area.items); +} + +static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id) +{ + struct mlxsw_pci *mlxsw_pci = dev_id; + struct mlxsw_pci_queue *q; + int i; + + for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) { + q = mlxsw_pci_eq_get(mlxsw_pci, i); + mlxsw_pci_queue_tasklet_schedule(q); + } + return IRQ_HANDLED; +} + +static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_mem_item *mbox) +{ + struct pci_dev *pdev = mlxsw_pci->pdev; + int err = 0; + + mbox->size = MLXSW_CMD_MBOX_SIZE; + mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE, + &mbox->mapaddr); + if (!mbox->buf) { + dev_err(&pdev->dev, "Failed allocating memory for mailbox\n"); + err = -ENOMEM; + } + + return err; +} + +static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_mem_item *mbox) +{ + struct pci_dev *pdev = mlxsw_pci->pdev; + + pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf, + mbox->mapaddr); +} + +static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, + const struct mlxsw_config_profile *profile) +{ + struct mlxsw_pci *mlxsw_pci = bus_priv; + struct pci_dev *pdev = mlxsw_pci->pdev; + char *mbox; + u16 num_pages; + int err; + + mutex_init(&mlxsw_pci->cmd.lock); + init_waitqueue_head(&mlxsw_pci->cmd.wait); + + mlxsw_pci->core = mlxsw_core; + + mbox = mlxsw_cmd_mbox_alloc(); + if (!mbox) + return -ENOMEM; + + err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); + if (err) + goto mbox_put; + + err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); + if (err) + goto err_out_mbox_alloc; + + err = mlxsw_cmd_query_fw(mlxsw_core, mbox); + if (err) + goto err_query_fw; + + mlxsw_pci->bus_info.fw_rev.major = + mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox); + mlxsw_pci->bus_info.fw_rev.minor = + mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox); + mlxsw_pci->bus_info.fw_rev.subminor = + mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox); + + if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) { + dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n"); + err = -EINVAL; + goto err_iface_rev; + } + if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) { + dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n"); + err = -EINVAL; + goto err_doorbell_page_bar; + } + + mlxsw_pci->doorbell_offset = + mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox); + + num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox); + err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages); + if (err) + goto err_fw_area_init; + + err = mlxsw_pci_boardinfo(mlxsw_pci, mbox); + if (err) + goto err_boardinfo; + + err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile); + if (err) + goto err_config_profile; + + err = mlxsw_pci_aqs_init(mlxsw_pci, mbox); + if (err) + goto err_aqs_init; + + err = request_irq(mlxsw_pci->msix_entry.vector, + mlxsw_pci_eq_irq_handler, 0, + mlxsw_pci_driver_name, mlxsw_pci); + if (err) { + dev_err(&pdev->dev, "IRQ request failed\n"); + goto err_request_eq_irq; + } + + goto mbox_put; + +err_request_eq_irq: + mlxsw_pci_aqs_fini(mlxsw_pci); +err_aqs_init: +err_config_profile: +err_boardinfo: + mlxsw_pci_fw_area_fini(mlxsw_pci); +err_fw_area_init: +err_doorbell_page_bar: +err_iface_rev: +err_query_fw: + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); +err_out_mbox_alloc: + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); +mbox_put: + mlxsw_cmd_mbox_free(mbox); + return err; +} + +static void mlxsw_pci_fini(void *bus_priv) +{ + struct mlxsw_pci *mlxsw_pci = bus_priv; + + free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci); + mlxsw_pci_aqs_fini(mlxsw_pci); + mlxsw_pci_fw_area_fini(mlxsw_pci); + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); +} + +static struct mlxsw_pci_queue * +mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci, + const struct mlxsw_tx_info *tx_info) +{ + u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci); + + return mlxsw_pci_sdq_get(mlxsw_pci, sdqn); +} + +static bool mlxsw_pci_skb_transmit_busy(void *bus_priv, + const struct mlxsw_tx_info *tx_info) +{ + struct mlxsw_pci *mlxsw_pci = bus_priv; + struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info); + + return !mlxsw_pci_queue_elem_info_producer_get(q); +} + +static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info) +{ + struct mlxsw_pci *mlxsw_pci = bus_priv; + struct mlxsw_pci_queue *q; + struct mlxsw_pci_queue_elem_info *elem_info; + char *wqe; + int i; + int err; + + if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) { + err = skb_linearize(skb); + if (err) + return err; + } + + q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info); + spin_lock_bh(&q->lock); + elem_info = mlxsw_pci_queue_elem_info_producer_get(q); + if (!elem_info) { + /* queue is full */ + err = -EAGAIN; + goto unlock; + } + elem_info->u.sdq.skb = skb; + + wqe = elem_info->elem; + mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */ + mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad); + mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET); + + err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (err) + goto unlock; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1, + skb_frag_address(frag), + skb_frag_size(frag), + DMA_TO_DEVICE); + if (err) + goto unmap_frags; + } + + /* Set unused sq entries byte count to zero. */ + for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++) + mlxsw_pci_wqe_byte_count_set(wqe, i, 0); + + /* Everything is set up, ring producer doorbell to get HW going */ + q->producer_counter++; + mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); + + goto unlock; + +unmap_frags: + for (; i >= 0; i--) + mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE); +unlock: + spin_unlock_bh(&q->lock); + return err; +} + +static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, + u32 in_mod, bool out_mbox_direct, + char *in_mbox, size_t in_mbox_size, + char *out_mbox, size_t out_mbox_size, + u8 *p_status) +{ + struct mlxsw_pci *mlxsw_pci = bus_priv; + dma_addr_t in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr; + dma_addr_t out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr; + bool evreq = mlxsw_pci->cmd.nopoll; + unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS); + bool *p_wait_done = &mlxsw_pci->cmd.wait_done; + int err; + + *p_status = MLXSW_CMD_STATUS_OK; + + err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock); + if (err) + return err; + + if (in_mbox) + memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size); + mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr)); + mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr)); + + mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr)); + mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr)); + + mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod); + mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0); + + *p_wait_done = false; + + wmb(); /* all needs to be written before we write control register */ + mlxsw_pci_write32(mlxsw_pci, CIR_CTRL, + MLXSW_PCI_CIR_CTRL_GO_BIT | + (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) | + (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) | + opcode); + + if (!evreq) { + unsigned long end; + + end = jiffies + timeout; + do { + u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL); + + if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) { + *p_wait_done = true; + *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT; + break; + } + cond_resched(); + } while (time_before(jiffies, end)); + } else { + wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout); + *p_status = mlxsw_pci->cmd.comp.status; + } + + err = 0; + if (*p_wait_done) { + if (*p_status) + err = -EIO; + } else { + err = -ETIMEDOUT; + } + + if (!err && out_mbox && out_mbox_direct) { + /* Some commands don't use output param as address to mailbox + * but they store output directly into registers. In that case, + * copy registers into mbox buffer. + */ + __be32 tmp; + + if (!evreq) { + tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, + CIR_OUT_PARAM_HI)); + memcpy(out_mbox, &tmp, sizeof(tmp)); + tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, + CIR_OUT_PARAM_LO)); + memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp)); + } + } else if (!err && out_mbox) + memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size); + + mutex_unlock(&mlxsw_pci->cmd.lock); + + return err; +} + +static const struct mlxsw_bus mlxsw_pci_bus = { + .kind = "pci", + .init = mlxsw_pci_init, + .fini = mlxsw_pci_fini, + .skb_transmit_busy = mlxsw_pci_skb_transmit_busy, + .skb_transmit = mlxsw_pci_skb_transmit, + .cmd_exec = mlxsw_pci_cmd_exec, +}; + +static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci) +{ + mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT); + /* Current firware does not let us know when the reset is done. + * So we just wait here for constant time and hope for the best. + */ + msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); + return 0; +} + +static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct mlxsw_pci *mlxsw_pci; + int err; + + mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL); + if (!mlxsw_pci) + return -ENOMEM; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "pci_enable_device failed\n"); + goto err_pci_enable_device; + } + + err = pci_request_regions(pdev, mlxsw_pci_driver_name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed\n"); + goto err_pci_request_regions; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (!err) { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "pci_set_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } + + if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) { + dev_err(&pdev->dev, "invalid PCI region size\n"); + err = -EINVAL; + goto err_pci_resource_len_check; + } + + mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!mlxsw_pci->hw_addr) { + dev_err(&pdev->dev, "ioremap failed\n"); + err = -EIO; + goto err_ioremap; + } + pci_set_master(pdev); + + mlxsw_pci->pdev = pdev; + pci_set_drvdata(pdev, mlxsw_pci); + + err = mlxsw_pci_sw_reset(mlxsw_pci); + if (err) { + dev_err(&pdev->dev, "Software reset failed\n"); + goto err_sw_reset; + } + + err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1); + if (err) { + dev_err(&pdev->dev, "MSI-X init failed\n"); + goto err_msix_init; + } + + mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id); + mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); + mlxsw_pci->bus_info.dev = &pdev->dev; + + mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name, + mlxsw_pci_dbg_root); + if (!mlxsw_pci->dbg_dir) { + dev_err(&pdev->dev, "Failed to create debugfs dir\n"); + err = -ENOMEM; + goto err_dbg_create_dir; + } + + err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info, + &mlxsw_pci_bus, mlxsw_pci); + if (err) { + dev_err(&pdev->dev, "cannot register bus device\n"); + goto err_bus_device_register; + } + + return 0; + +err_bus_device_register: + debugfs_remove_recursive(mlxsw_pci->dbg_dir); +err_dbg_create_dir: + pci_disable_msix(mlxsw_pci->pdev); +err_msix_init: +err_sw_reset: + iounmap(mlxsw_pci->hw_addr); +err_ioremap: +err_pci_resource_len_check: +err_pci_set_dma_mask: + pci_release_regions(pdev); +err_pci_request_regions: + pci_disable_device(pdev); +err_pci_enable_device: + kfree(mlxsw_pci); + return err; +} + +static void mlxsw_pci_remove(struct pci_dev *pdev) +{ + struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); + + mlxsw_core_bus_device_unregister(mlxsw_pci->core); + debugfs_remove_recursive(mlxsw_pci->dbg_dir); + pci_disable_msix(mlxsw_pci->pdev); + iounmap(mlxsw_pci->hw_addr); + pci_release_regions(mlxsw_pci->pdev); + pci_disable_device(mlxsw_pci->pdev); + kfree(mlxsw_pci); +} + +static struct pci_driver mlxsw_pci_driver = { + .name = mlxsw_pci_driver_name, + .id_table = mlxsw_pci_id_table, + .probe = mlxsw_pci_probe, + .remove = mlxsw_pci_remove, +}; + +static int __init mlxsw_pci_module_init(void) +{ + int err; + + mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL); + if (!mlxsw_pci_dbg_root) + return -ENOMEM; + err = pci_register_driver(&mlxsw_pci_driver); + if (err) + goto err_register_driver; + return 0; + +err_register_driver: + debugfs_remove_recursive(mlxsw_pci_dbg_root); + return err; +} + +static void __exit mlxsw_pci_module_exit(void) +{ + pci_unregister_driver(&mlxsw_pci_driver); + debugfs_remove_recursive(mlxsw_pci_dbg_root); +} + +module_init(mlxsw_pci_module_init); +module_exit(mlxsw_pci_module_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jiri Pirko "); +MODULE_DESCRIPTION("Mellanox switch PCI interface driver"); +MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h new file mode 100644 index 000000000..1ef9664b4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -0,0 +1,227 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/pci.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_PCI_H +#define _MLXSW_PCI_H + +#include + +#include "item.h" + +#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738 +#define MLXSW_PCI_BAR0_SIZE (1024 * 1024) /* 1MB */ +#define MLXSW_PCI_PAGE_SIZE 4096 + +#define MLXSW_PCI_CIR_BASE 0x71000 +#define MLXSW_PCI_CIR_IN_PARAM_HI MLXSW_PCI_CIR_BASE +#define MLXSW_PCI_CIR_IN_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x04) +#define MLXSW_PCI_CIR_IN_MODIFIER (MLXSW_PCI_CIR_BASE + 0x08) +#define MLXSW_PCI_CIR_OUT_PARAM_HI (MLXSW_PCI_CIR_BASE + 0x0C) +#define MLXSW_PCI_CIR_OUT_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x10) +#define MLXSW_PCI_CIR_TOKEN (MLXSW_PCI_CIR_BASE + 0x14) +#define MLXSW_PCI_CIR_CTRL (MLXSW_PCI_CIR_BASE + 0x18) +#define MLXSW_PCI_CIR_CTRL_GO_BIT BIT(23) +#define MLXSW_PCI_CIR_CTRL_EVREQ_BIT BIT(22) +#define MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT 12 +#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT 24 +#define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000 + +#define MLXSW_PCI_SW_RESET 0xF0010 +#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) +#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 + +#define MLXSW_PCI_DOORBELL_SDQ_OFFSET 0x000 +#define MLXSW_PCI_DOORBELL_RDQ_OFFSET 0x200 +#define MLXSW_PCI_DOORBELL_CQ_OFFSET 0x400 +#define MLXSW_PCI_DOORBELL_EQ_OFFSET 0x600 +#define MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET 0x800 +#define MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET 0xA00 + +#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \ + ((offset) + (type_offset) + (num) * 4) + +#define MLXSW_PCI_RDQS_COUNT 24 +#define MLXSW_PCI_SDQS_COUNT 24 +#define MLXSW_PCI_CQS_COUNT (MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT) +#define MLXSW_PCI_EQS_COUNT 2 +#define MLXSW_PCI_EQ_ASYNC_NUM 0 +#define MLXSW_PCI_EQ_COMP_NUM 1 + +#define MLXSW_PCI_AQ_PAGES 8 +#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES) +#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ +#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */ +#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ +#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) +#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE) +#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE) +#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80 + +#define MLXSW_PCI_WQE_SG_ENTRIES 3 +#define MLXSW_PCI_WQE_TYPE_ETHERNET 0xA + +/* pci_wqe_c + * If set it indicates that a completion should be reported upon + * execution of this descriptor. + */ +MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1); + +/* pci_wqe_lp + * Local Processing, set if packet should be processed by the local + * switch hardware: + * For Ethernet EMAD (Direct Route and non Direct Route) - + * must be set if packet destination is local device + * For InfiniBand CTL - must be set if packet destination is local device + * Otherwise it must be clear + * Local Process packets must not exceed the size of 2K (including payload + * and headers). + */ +MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1); + +/* pci_wqe_type + * Packet type. + */ +MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4); + +/* pci_wqe_byte_count + * Size of i-th scatter/gather entry, 0 if entry is unused. + */ +MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false); + +/* pci_wqe_address + * Physical address of i-th scatter/gather entry. + * Gather Entries must be 2Byte aligned. + */ +MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false); + +/* pci_cqe_lag + * Packet arrives from a port which is a LAG + */ +MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1); + +/* pci_cqe_system_port + * When lag=0: System port on which the packet was received + * When lag=1: + * bits [15:4] LAG ID on which the packet was received + * bits [3:0] sub_port on which the packet was received + */ +MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); + +/* pci_cqe_wqe_counter + * WQE count of the WQEs completed on the associated dqn + */ +MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16); + +/* pci_cqe_byte_count + * Byte count of received packets including additional two + * Reserved Bytes that are append to the end of the frame. + * Reserved for Send CQE. + */ +MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14); + +/* pci_cqe_trap_id + * Trap ID that captured the packet. + */ +MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8); + +/* pci_cqe_crc + * Length include CRC. Indicates the length field includes + * the packet's CRC. + */ +MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1); + +/* pci_cqe_e + * CQE with Error. + */ +MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1); + +/* pci_cqe_sr + * 1 - Send Queue + * 0 - Receive Queue + */ +MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1); + +/* pci_cqe_dqn + * Descriptor Queue (DQ) Number. + */ +MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5); + +/* pci_cqe_owner + * Ownership bit. + */ +MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1); + +/* pci_eqe_event_type + * Event type. + */ +MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8); +#define MLXSW_PCI_EQE_EVENT_TYPE_COMP 0x00 +#define MLXSW_PCI_EQE_EVENT_TYPE_CMD 0x0A + +/* pci_eqe_event_sub_type + * Event type. + */ +MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8); + +/* pci_eqe_cqn + * Completion Queue that triggeret this EQE. + */ +MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7); + +/* pci_eqe_owner + * Ownership bit. + */ +MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1); + +/* pci_eqe_cmd_token + * Command completion event - token + */ +MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16); + +/* pci_eqe_cmd_status + * Command completion event - status + */ +MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8); + +/* pci_eqe_cmd_out_param_h + * Command completion event - output parameter - higher part + */ +MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32); + +/* pci_eqe_cmd_out_param_l + * Command completion event - output parameter - lower part + */ +MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h new file mode 100644 index 000000000..726f5435b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/port.h @@ -0,0 +1,75 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/port.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Elad Raz + * Copyright (c) 2015 Jiri Pirko + * Copyright (c) 2015 Ido Schimmel + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _MLXSW_PORT_H +#define _MLXSW_PORT_H + +#include + +#define MLXSW_PORT_MAX_MTU 10000 + +#define MLXSW_PORT_DEFAULT_VID 1 + +#define MLXSW_PORT_SWID_DISABLED_PORT 255 +#define MLXSW_PORT_SWID_ALL_SWIDS 254 +#define MLXSW_PORT_SWID_TYPE_ETH 2 + +#define MLXSW_PORT_MID 0xd000 + +#define MLXSW_PORT_MAX_PHY_PORTS 0x40 +#define MLXSW_PORT_MAX_PORTS MLXSW_PORT_MAX_PHY_PORTS + +#define MLXSW_PORT_DEVID_BITS_OFFSET 10 +#define MLXSW_PORT_PHY_BITS_OFFSET 4 +#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1) + +#define MLXSW_PORT_CPU_PORT 0x0 + +#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS) + +enum mlxsw_port_admin_status { + MLXSW_PORT_ADMIN_STATUS_UP = 1, + MLXSW_PORT_ADMIN_STATUS_DOWN = 2, + MLXSW_PORT_ADMIN_STATUS_UP_ONCE = 3, + MLXSW_PORT_ADMIN_STATUS_DISABLED = 4, +}; + +enum mlxsw_reg_pude_oper_status { + MLXSW_PORT_OPER_STATUS_UP = 1, + MLXSW_PORT_OPER_STATUS_DOWN = 2, + MLXSW_PORT_OPER_STATUS_FAILURE = 4, /* Can be set to up again. */ +}; + +#endif /* _MLXSW_PORT_H */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h new file mode 100644 index 000000000..096e1c121 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -0,0 +1,1349 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/reg.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Ido Schimmel + * Copyright (c) 2015 Elad Raz + * Copyright (c) 2015 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_REG_H +#define _MLXSW_REG_H + +#include +#include +#include + +#include "item.h" +#include "port.h" + +struct mlxsw_reg_info { + u16 id; + u16 len; /* In u8 */ +}; + +#define MLXSW_REG(type) (&mlxsw_reg_##type) +#define MLXSW_REG_LEN(type) MLXSW_REG(type)->len +#define MLXSW_REG_ZERO(type, payload) memset(payload, 0, MLXSW_REG(type)->len) + +/* SGCR - Switch General Configuration Register + * -------------------------------------------- + * This register is used for configuration of the switch capabilities. + */ +#define MLXSW_REG_SGCR_ID 0x2000 +#define MLXSW_REG_SGCR_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_sgcr = { + .id = MLXSW_REG_SGCR_ID, + .len = MLXSW_REG_SGCR_LEN, +}; + +/* reg_sgcr_llb + * Link Local Broadcast (Default=0) + * When set, all Link Local packets (224.0.0.X) will be treated as broadcast + * packets and ignore the IGMP snooping entries. + * Access: RW + */ +MLXSW_ITEM32(reg, sgcr, llb, 0x04, 0, 1); + +static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb) +{ + MLXSW_REG_ZERO(sgcr, payload); + mlxsw_reg_sgcr_llb_set(payload, !!llb); +} + +/* SPAD - Switch Physical Address Register + * --------------------------------------- + * The SPAD register configures the switch physical MAC address. + */ +#define MLXSW_REG_SPAD_ID 0x2002 +#define MLXSW_REG_SPAD_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_spad = { + .id = MLXSW_REG_SPAD_ID, + .len = MLXSW_REG_SPAD_LEN, +}; + +/* reg_spad_base_mac + * Base MAC address for the switch partitions. + * Per switch partition MAC address is equal to: + * base_mac + swid + * Access: RW + */ +MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6); + +/* SMID - Switch Multicast ID + * -------------------------- + * In multi-chip configuration, each device should maintain mapping between + * Multicast ID (MID) into a list of local ports. This mapping is used in all + * the devices other than the ingress device, and is implemented as part of the + * FDB. The MID record maps from a MID, which is a unique identi- fier of the + * multicast group within the stacking domain, into a list of local ports into + * which the packet is replicated. + */ +#define MLXSW_REG_SMID_ID 0x2007 +#define MLXSW_REG_SMID_LEN 0x420 + +static const struct mlxsw_reg_info mlxsw_reg_smid = { + .id = MLXSW_REG_SMID_ID, + .len = MLXSW_REG_SMID_LEN, +}; + +/* reg_smid_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8); + +/* reg_smid_mid + * Multicast identifier - global identifier that represents the multicast group + * across all devices + * Access: Index + */ +MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16); + +/* reg_smid_port + * Local port memebership (1 bit per port). + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1); + +/* reg_smid_port_mask + * Local port mask (1 bit per port). + * Access: W + */ +MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1); + +static inline void mlxsw_reg_smid_pack(char *payload, u16 mid) +{ + MLXSW_REG_ZERO(smid, payload); + mlxsw_reg_smid_swid_set(payload, 0); + mlxsw_reg_smid_mid_set(payload, mid); + mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1); + mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1); +} + +/* SSPR - Switch System Port Record Register + * ----------------------------------------- + * Configures the system port to local port mapping. + */ +#define MLXSW_REG_SSPR_ID 0x2008 +#define MLXSW_REG_SSPR_LEN 0x8 + +static const struct mlxsw_reg_info mlxsw_reg_sspr = { + .id = MLXSW_REG_SSPR_ID, + .len = MLXSW_REG_SSPR_LEN, +}; + +/* reg_sspr_m + * Master - if set, then the record describes the master system port. + * This is needed in case a local port is mapped into several system ports + * (for multipathing). That number will be reported as the source system + * port when packets are forwarded to the CPU. Only one master port is allowed + * per local port. + * + * Note: Must be set for Spectrum. + * Access: RW + */ +MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1); + +/* reg_sspr_local_port + * Local port number. + * + * Access: RW + */ +MLXSW_ITEM32(reg, sspr, local_port, 0x00, 16, 8); + +/* reg_sspr_sub_port + * Virtual port within the physical port. + * Should be set to 0 when virtual ports are not enabled on the port. + * + * Access: RW + */ +MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8); + +/* reg_sspr_system_port + * Unique identifier within the stacking domain that represents all the ports + * that are available in the system (external ports). + * + * Currently, only single-ASIC configurations are supported, so we default to + * 1:1 mapping between system ports and local ports. + * Access: Index + */ +MLXSW_ITEM32(reg, sspr, system_port, 0x04, 0, 16); + +static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(sspr, payload); + mlxsw_reg_sspr_m_set(payload, 1); + mlxsw_reg_sspr_local_port_set(payload, local_port); + mlxsw_reg_sspr_sub_port_set(payload, 0); + mlxsw_reg_sspr_system_port_set(payload, local_port); +} + +/* SPMS - Switch Port MSTP/RSTP State Register + * ------------------------------------------- + * Configures the spanning tree state of a physical port. + */ +#define MLXSW_REG_SPMS_ID 0x200d +#define MLXSW_REG_SPMS_LEN 0x404 + +static const struct mlxsw_reg_info mlxsw_reg_spms = { + .id = MLXSW_REG_SPMS_ID, + .len = MLXSW_REG_SPMS_LEN, +}; + +/* reg_spms_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8); + +enum mlxsw_reg_spms_state { + MLXSW_REG_SPMS_STATE_NO_CHANGE, + MLXSW_REG_SPMS_STATE_DISCARDING, + MLXSW_REG_SPMS_STATE_LEARNING, + MLXSW_REG_SPMS_STATE_FORWARDING, +}; + +/* reg_spms_state + * Spanning tree state of each VLAN ID (VID) of the local port. + * 0 - Do not change spanning tree state (used only when writing). + * 1 - Discarding. No learning or forwarding to/from this port (default). + * 2 - Learning. Port is learning, but not forwarding. + * 3 - Forwarding. Port is learning and forwarding. + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2); + +static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid, + enum mlxsw_reg_spms_state state) +{ + MLXSW_REG_ZERO(spms, payload); + mlxsw_reg_spms_local_port_set(payload, local_port); + mlxsw_reg_spms_state_set(payload, vid, state); +} + +/* SFGC - Switch Flooding Group Configuration + * ------------------------------------------ + * The following register controls the association of flooding tables and MIDs + * to packet types used for flooding. + */ +#define MLXSW_REG_SFGC_ID 0x2011 +#define MLXSW_REG_SFGC_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_sfgc = { + .id = MLXSW_REG_SFGC_ID, + .len = MLXSW_REG_SFGC_LEN, +}; + +enum mlxsw_reg_sfgc_type { + MLXSW_REG_SFGC_TYPE_BROADCAST = 0, + MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5, + MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6, + MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7, +}; + +/* reg_sfgc_type + * The traffic type to reach the flooding table. + * Access: Index + */ +MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4); + +enum mlxsw_reg_sfgc_bridge_type { + MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0, + MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1, +}; + +/* reg_sfgc_bridge_type + * Access: Index + * + * Note: SwitchX-2 only supports 802.1Q mode. + */ +MLXSW_ITEM32(reg, sfgc, bridge_type, 0x04, 24, 3); + +enum mlxsw_flood_table_type { + MLXSW_REG_SFGC_TABLE_TYPE_VID = 1, + MLXSW_REG_SFGC_TABLE_TYPE_SINGLE = 2, + MLXSW_REG_SFGC_TABLE_TYPE_ANY = 0, + MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST = 3, + MLXSW_REG_SFGC_TABLE_TYPE_FID = 4, +}; + +/* reg_sfgc_table_type + * See mlxsw_flood_table_type + * Access: RW + * + * Note: FID offset and FID types are not supported in SwitchX-2. + */ +MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3); + +/* reg_sfgc_flood_table + * Flooding table index to associate with the specific type on the specific + * switch partition. + * Access: RW + */ +MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6); + +/* reg_sfgc_mid + * The multicast ID for the swid. Not supported for Spectrum + * Access: RW + */ +MLXSW_ITEM32(reg, sfgc, mid, 0x08, 0, 16); + +/* reg_sfgc_counter_set_type + * Counter Set Type for flow counters. + * Access: RW + */ +MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8); + +/* reg_sfgc_counter_index + * Counter Index for flow counters. + * Access: RW + */ +MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24); + +static inline void +mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type, + enum mlxsw_reg_sfgc_bridge_type bridge_type, + enum mlxsw_flood_table_type table_type, + unsigned int flood_table) +{ + MLXSW_REG_ZERO(sfgc, payload); + mlxsw_reg_sfgc_type_set(payload, type); + mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type); + mlxsw_reg_sfgc_table_type_set(payload, table_type); + mlxsw_reg_sfgc_flood_table_set(payload, flood_table); + mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID); +} + +/* SFTR - Switch Flooding Table Register + * ------------------------------------- + * The switch flooding table is used for flooding packet replication. The table + * defines a bit mask of ports for packet replication. + */ +#define MLXSW_REG_SFTR_ID 0x2012 +#define MLXSW_REG_SFTR_LEN 0x420 + +static const struct mlxsw_reg_info mlxsw_reg_sftr = { + .id = MLXSW_REG_SFTR_ID, + .len = MLXSW_REG_SFTR_LEN, +}; + +/* reg_sftr_swid + * Switch partition ID with which to associate the port. + * Access: Index + */ +MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8); + +/* reg_sftr_flood_table + * Flooding table index to associate with the specific type on the specific + * switch partition. + * Access: Index + */ +MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6); + +/* reg_sftr_index + * Index. Used as an index into the Flooding Table in case the table is + * configured to use VID / FID or FID Offset. + * Access: Index + */ +MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16); + +/* reg_sftr_table_type + * See mlxsw_flood_table_type + * Access: RW + */ +MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3); + +/* reg_sftr_range + * Range of entries to update + * Access: Index + */ +MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16); + +/* reg_sftr_port + * Local port membership (1 bit per port). + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1); + +/* reg_sftr_cpu_port_mask + * CPU port mask (1 bit per port). + * Access: W + */ +MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1); + +static inline void mlxsw_reg_sftr_pack(char *payload, + unsigned int flood_table, + unsigned int index, + enum mlxsw_flood_table_type table_type, + unsigned int range) +{ + MLXSW_REG_ZERO(sftr, payload); + mlxsw_reg_sftr_swid_set(payload, 0); + mlxsw_reg_sftr_flood_table_set(payload, flood_table); + mlxsw_reg_sftr_index_set(payload, index); + mlxsw_reg_sftr_table_type_set(payload, table_type); + mlxsw_reg_sftr_range_set(payload, range); + mlxsw_reg_sftr_port_set(payload, MLXSW_PORT_CPU_PORT, 1); + mlxsw_reg_sftr_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1); +} + +/* SPMLR - Switch Port MAC Learning Register + * ----------------------------------------- + * Controls the Switch MAC learning policy per port. + */ +#define MLXSW_REG_SPMLR_ID 0x2018 +#define MLXSW_REG_SPMLR_LEN 0x8 + +static const struct mlxsw_reg_info mlxsw_reg_spmlr = { + .id = MLXSW_REG_SPMLR_ID, + .len = MLXSW_REG_SPMLR_LEN, +}; + +/* reg_spmlr_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8); + +/* reg_spmlr_sub_port + * Virtual port within the physical port. + * Should be set to 0 when virtual ports are not enabled on the port. + * Access: Index + */ +MLXSW_ITEM32(reg, spmlr, sub_port, 0x00, 8, 8); + +enum mlxsw_reg_spmlr_learn_mode { + MLXSW_REG_SPMLR_LEARN_MODE_DISABLE = 0, + MLXSW_REG_SPMLR_LEARN_MODE_ENABLE = 2, + MLXSW_REG_SPMLR_LEARN_MODE_SEC = 3, +}; + +/* reg_spmlr_learn_mode + * Learning mode on the port. + * 0 - Learning disabled. + * 2 - Learning enabled. + * 3 - Security mode. + * + * In security mode the switch does not learn MACs on the port, but uses the + * SMAC to see if it exists on another ingress port. If so, the packet is + * classified as a bad packet and is discarded unless the software registers + * to receive port security error packets usign HPKT. + */ +MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2); + +static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port, + enum mlxsw_reg_spmlr_learn_mode mode) +{ + MLXSW_REG_ZERO(spmlr, payload); + mlxsw_reg_spmlr_local_port_set(payload, local_port); + mlxsw_reg_spmlr_sub_port_set(payload, 0); + mlxsw_reg_spmlr_learn_mode_set(payload, mode); +} + +/* PMLP - Ports Module to Local Port Register + * ------------------------------------------ + * Configures the assignment of modules to local ports. + */ +#define MLXSW_REG_PMLP_ID 0x5002 +#define MLXSW_REG_PMLP_LEN 0x40 + +static const struct mlxsw_reg_info mlxsw_reg_pmlp = { + .id = MLXSW_REG_PMLP_ID, + .len = MLXSW_REG_PMLP_LEN, +}; + +/* reg_pmlp_rxtx + * 0 - Tx value is used for both Tx and Rx. + * 1 - Rx value is taken from a separte field. + * Access: RW + */ +MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1); + +/* reg_pmlp_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8); + +/* reg_pmlp_width + * 0 - Unmap local port. + * 1 - Lane 0 is used. + * 2 - Lanes 0 and 1 are used. + * 4 - Lanes 0, 1, 2 and 3 are used. + * Access: RW + */ +MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8); + +/* reg_pmlp_module + * Module number. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false); + +/* reg_pmlp_tx_lane + * Tx Lane. When rxtx field is cleared, this field is used for Rx as well. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false); + +/* reg_pmlp_rx_lane + * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is + * equal to Tx lane. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false); + +static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(pmlp, payload); + mlxsw_reg_pmlp_local_port_set(payload, local_port); +} + +/* PMTU - Port MTU Register + * ------------------------ + * Configures and reports the port MTU. + */ +#define MLXSW_REG_PMTU_ID 0x5003 +#define MLXSW_REG_PMTU_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_pmtu = { + .id = MLXSW_REG_PMTU_ID, + .len = MLXSW_REG_PMTU_LEN, +}; + +/* reg_pmtu_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8); + +/* reg_pmtu_max_mtu + * Maximum MTU. + * When port type (e.g. Ethernet) is configured, the relevant MTU is + * reported, otherwise the minimum between the max_mtu of the different + * types is reported. + * Access: RO + */ +MLXSW_ITEM32(reg, pmtu, max_mtu, 0x04, 16, 16); + +/* reg_pmtu_admin_mtu + * MTU value to set port to. Must be smaller or equal to max_mtu. + * Note: If port type is Infiniband, then port must be disabled, when its + * MTU is set. + * Access: RW + */ +MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16); + +/* reg_pmtu_oper_mtu + * The actual MTU configured on the port. Packets exceeding this size + * will be dropped. + * Note: In Ethernet and FC oper_mtu == admin_mtu, however, in Infiniband + * oper_mtu might be smaller than admin_mtu. + * Access: RO + */ +MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16); + +static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port, + u16 new_mtu) +{ + MLXSW_REG_ZERO(pmtu, payload); + mlxsw_reg_pmtu_local_port_set(payload, local_port); + mlxsw_reg_pmtu_max_mtu_set(payload, 0); + mlxsw_reg_pmtu_admin_mtu_set(payload, new_mtu); + mlxsw_reg_pmtu_oper_mtu_set(payload, 0); +} + +/* PTYS - Port Type and Speed Register + * ----------------------------------- + * Configures and reports the port speed type. + * + * Note: When set while the link is up, the changes will not take effect + * until the port transitions from down to up state. + */ +#define MLXSW_REG_PTYS_ID 0x5004 +#define MLXSW_REG_PTYS_LEN 0x40 + +static const struct mlxsw_reg_info mlxsw_reg_ptys = { + .id = MLXSW_REG_PTYS_ID, + .len = MLXSW_REG_PTYS_LEN, +}; + +/* reg_ptys_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8); + +#define MLXSW_REG_PTYS_PROTO_MASK_ETH BIT(2) + +/* reg_ptys_proto_mask + * Protocol mask. Indicates which protocol is used. + * 0 - Infiniband. + * 1 - Fibre Channel. + * 2 - Ethernet. + * Access: Index + */ +MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3); + +#define MLXSW_REG_PTYS_ETH_SPEED_SGMII BIT(0) +#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX BIT(1) +#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2) +#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 BIT(3) +#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR BIT(4) +#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2 BIT(5) +#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 BIT(6) +#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 BIT(7) +#define MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4 BIT(8) +#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR BIT(12) +#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR BIT(13) +#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR BIT(14) +#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 BIT(15) +#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4 BIT(16) +#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 BIT(19) +#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 BIT(20) +#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21) +#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 BIT(22) +#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4 BIT(23) +#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX BIT(24) +#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T BIT(25) +#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T BIT(26) +#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR BIT(27) +#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR BIT(28) +#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR BIT(29) +#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 BIT(30) +#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2 BIT(31) + +/* reg_ptys_eth_proto_cap + * Ethernet port supported speeds and protocols. + * Access: RO + */ +MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32); + +/* reg_ptys_eth_proto_admin + * Speed and protocol to set port to. + * Access: RW + */ +MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32); + +/* reg_ptys_eth_proto_oper + * The current speed and protocol configured for the port. + * Access: RO + */ +MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32); + +static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port, + u32 proto_admin) +{ + MLXSW_REG_ZERO(ptys, payload); + mlxsw_reg_ptys_local_port_set(payload, local_port); + mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH); + mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin); +} + +static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap, + u32 *p_eth_proto_adm, + u32 *p_eth_proto_oper) +{ + if (p_eth_proto_cap) + *p_eth_proto_cap = mlxsw_reg_ptys_eth_proto_cap_get(payload); + if (p_eth_proto_adm) + *p_eth_proto_adm = mlxsw_reg_ptys_eth_proto_admin_get(payload); + if (p_eth_proto_oper) + *p_eth_proto_oper = mlxsw_reg_ptys_eth_proto_oper_get(payload); +} + +/* PPAD - Port Physical Address Register + * ------------------------------------- + * The PPAD register configures the per port physical MAC address. + */ +#define MLXSW_REG_PPAD_ID 0x5005 +#define MLXSW_REG_PPAD_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_ppad = { + .id = MLXSW_REG_PPAD_ID, + .len = MLXSW_REG_PPAD_LEN, +}; + +/* reg_ppad_single_base_mac + * 0: base_mac, local port should be 0 and mac[7:0] is + * reserved. HW will set incremental + * 1: single_mac - mac of the local_port + * Access: RW + */ +MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1); + +/* reg_ppad_local_port + * port number, if single_base_mac = 0 then local_port is reserved + * Access: RW + */ +MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8); + +/* reg_ppad_mac + * If single_base_mac = 0 - base MAC address, mac[7:0] is reserved. + * If single_base_mac = 1 - the per port MAC address + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6); + +static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac, + u8 local_port) +{ + MLXSW_REG_ZERO(ppad, payload); + mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac); + mlxsw_reg_ppad_local_port_set(payload, local_port); +} + +/* PAOS - Ports Administrative and Operational Status Register + * ----------------------------------------------------------- + * Configures and retrieves per port administrative and operational status. + */ +#define MLXSW_REG_PAOS_ID 0x5006 +#define MLXSW_REG_PAOS_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_paos = { + .id = MLXSW_REG_PAOS_ID, + .len = MLXSW_REG_PAOS_LEN, +}; + +/* reg_paos_swid + * Switch partition ID with which to associate the port. + * Note: while external ports uses unique local port numbers (and thus swid is + * redundant), router ports use the same local port number where swid is the + * only indication for the relevant port. + * Access: Index + */ +MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8); + +/* reg_paos_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8); + +/* reg_paos_admin_status + * Port administrative state (the desired state of the port): + * 1 - Up. + * 2 - Down. + * 3 - Up once. This means that in case of link failure, the port won't go + * into polling mode, but will wait to be re-enabled by software. + * 4 - Disabled by system. Can only be set by hardware. + * Access: RW + */ +MLXSW_ITEM32(reg, paos, admin_status, 0x00, 8, 4); + +/* reg_paos_oper_status + * Port operational state (the current state): + * 1 - Up. + * 2 - Down. + * 3 - Down by port failure. This means that the device will not let the + * port up again until explicitly specified by software. + * Access: RO + */ +MLXSW_ITEM32(reg, paos, oper_status, 0x00, 0, 4); + +/* reg_paos_ase + * Admin state update enabled. + * Access: WO + */ +MLXSW_ITEM32(reg, paos, ase, 0x04, 31, 1); + +/* reg_paos_ee + * Event update enable. If this bit is set, event generation will be + * updated based on the e field. + * Access: WO + */ +MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1); + +/* reg_paos_e + * Event generation on operational state change: + * 0 - Do not generate event. + * 1 - Generate Event. + * 2 - Generate Single Event. + * Access: RW + */ +MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2); + +static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port, + enum mlxsw_port_admin_status status) +{ + MLXSW_REG_ZERO(paos, payload); + mlxsw_reg_paos_swid_set(payload, 0); + mlxsw_reg_paos_local_port_set(payload, local_port); + mlxsw_reg_paos_admin_status_set(payload, status); + mlxsw_reg_paos_oper_status_set(payload, 0); + mlxsw_reg_paos_ase_set(payload, 1); + mlxsw_reg_paos_ee_set(payload, 1); + mlxsw_reg_paos_e_set(payload, 1); +} + +/* PPCNT - Ports Performance Counters Register + * ------------------------------------------- + * The PPCNT register retrieves per port performance counters. + */ +#define MLXSW_REG_PPCNT_ID 0x5008 +#define MLXSW_REG_PPCNT_LEN 0x100 + +static const struct mlxsw_reg_info mlxsw_reg_ppcnt = { + .id = MLXSW_REG_PPCNT_ID, + .len = MLXSW_REG_PPCNT_LEN, +}; + +/* reg_ppcnt_swid + * For HCA: must be always 0. + * Switch partition ID to associate port with. + * Switch partitions are numbered from 0 to 7 inclusively. + * Switch partition 254 indicates stacking ports. + * Switch partition 255 indicates all switch partitions. + * Only valid on Set() operation with local_port=255. + * Access: Index + */ +MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8); + +/* reg_ppcnt_local_port + * Local port number. + * 255 indicates all ports on the device, and is only allowed + * for Set() operation. + * Access: Index + */ +MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8); + +/* reg_ppcnt_pnat + * Port number access type: + * 0 - Local port number + * 1 - IB port number + * Access: Index + */ +MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2); + +/* reg_ppcnt_grp + * Performance counter group. + * Group 63 indicates all groups. Only valid on Set() operation with + * clr bit set. + * 0x0: IEEE 802.3 Counters + * 0x1: RFC 2863 Counters + * 0x2: RFC 2819 Counters + * 0x3: RFC 3635 Counters + * 0x5: Ethernet Extended Counters + * 0x8: Link Level Retransmission Counters + * 0x10: Per Priority Counters + * 0x11: Per Traffic Class Counters + * 0x12: Physical Layer Counters + * Access: Index + */ +MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6); + +/* reg_ppcnt_clr + * Clear counters. Setting the clr bit will reset the counter value + * for all counters in the counter group. This bit can be set + * for both Set() and Get() operation. + * Access: OP + */ +MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1); + +/* reg_ppcnt_prio_tc + * Priority for counter set that support per priority, valid values: 0-7. + * Traffic class for counter set that support per traffic class, + * valid values: 0- cap_max_tclass-1 . + * For HCA: cap_max_tclass is always 8. + * Otherwise must be 0. + * Access: Index + */ +MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5); + +/* reg_ppcnt_a_frames_transmitted_ok + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_frames_transmitted_ok, + 0x08 + 0x00, 0, 64); + +/* reg_ppcnt_a_frames_received_ok + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_frames_received_ok, + 0x08 + 0x08, 0, 64); + +/* reg_ppcnt_a_frame_check_sequence_errors + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_frame_check_sequence_errors, + 0x08 + 0x10, 0, 64); + +/* reg_ppcnt_a_alignment_errors + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_alignment_errors, + 0x08 + 0x18, 0, 64); + +/* reg_ppcnt_a_octets_transmitted_ok + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_octets_transmitted_ok, + 0x08 + 0x20, 0, 64); + +/* reg_ppcnt_a_octets_received_ok + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_octets_received_ok, + 0x08 + 0x28, 0, 64); + +/* reg_ppcnt_a_multicast_frames_xmitted_ok + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_xmitted_ok, + 0x08 + 0x30, 0, 64); + +/* reg_ppcnt_a_broadcast_frames_xmitted_ok + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_xmitted_ok, + 0x08 + 0x38, 0, 64); + +/* reg_ppcnt_a_multicast_frames_received_ok + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_received_ok, + 0x08 + 0x40, 0, 64); + +/* reg_ppcnt_a_broadcast_frames_received_ok + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_received_ok, + 0x08 + 0x48, 0, 64); + +/* reg_ppcnt_a_in_range_length_errors + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_in_range_length_errors, + 0x08 + 0x50, 0, 64); + +/* reg_ppcnt_a_out_of_range_length_field + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_out_of_range_length_field, + 0x08 + 0x58, 0, 64); + +/* reg_ppcnt_a_frame_too_long_errors + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_frame_too_long_errors, + 0x08 + 0x60, 0, 64); + +/* reg_ppcnt_a_symbol_error_during_carrier + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_symbol_error_during_carrier, + 0x08 + 0x68, 0, 64); + +/* reg_ppcnt_a_mac_control_frames_transmitted + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_transmitted, + 0x08 + 0x70, 0, 64); + +/* reg_ppcnt_a_mac_control_frames_received + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_received, + 0x08 + 0x78, 0, 64); + +/* reg_ppcnt_a_unsupported_opcodes_received + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_unsupported_opcodes_received, + 0x08 + 0x80, 0, 64); + +/* reg_ppcnt_a_pause_mac_ctrl_frames_received + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received, + 0x08 + 0x88, 0, 64); + +/* reg_ppcnt_a_pause_mac_ctrl_frames_transmitted + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted, + 0x08 + 0x90, 0, 64); + +static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(ppcnt, payload); + mlxsw_reg_ppcnt_swid_set(payload, 0); + mlxsw_reg_ppcnt_local_port_set(payload, local_port); + mlxsw_reg_ppcnt_pnat_set(payload, 0); + mlxsw_reg_ppcnt_grp_set(payload, 0); + mlxsw_reg_ppcnt_clr_set(payload, 0); + mlxsw_reg_ppcnt_prio_tc_set(payload, 0); +} + +/* PSPA - Port Switch Partition Allocation + * --------------------------------------- + * Controls the association of a port with a switch partition and enables + * configuring ports as stacking ports. + */ +#define MLXSW_REG_PSPA_ID 0x500d +#define MLXSW_REG_PSPA_LEN 0x8 + +static const struct mlxsw_reg_info mlxsw_reg_pspa = { + .id = MLXSW_REG_PSPA_ID, + .len = MLXSW_REG_PSPA_LEN, +}; + +/* reg_pspa_swid + * Switch partition ID. + * Access: RW + */ +MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8); + +/* reg_pspa_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8); + +/* reg_pspa_sub_port + * Virtual port within the local port. Set to 0 when virtual ports are + * disabled on the local port. + * Access: Index + */ +MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8); + +static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port) +{ + MLXSW_REG_ZERO(pspa, payload); + mlxsw_reg_pspa_swid_set(payload, swid); + mlxsw_reg_pspa_local_port_set(payload, local_port); + mlxsw_reg_pspa_sub_port_set(payload, 0); +} + +/* HTGT - Host Trap Group Table + * ---------------------------- + * Configures the properties for forwarding to CPU. + */ +#define MLXSW_REG_HTGT_ID 0x7002 +#define MLXSW_REG_HTGT_LEN 0x100 + +static const struct mlxsw_reg_info mlxsw_reg_htgt = { + .id = MLXSW_REG_HTGT_ID, + .len = MLXSW_REG_HTGT_LEN, +}; + +/* reg_htgt_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8); + +#define MLXSW_REG_HTGT_PATH_TYPE_LOCAL 0x0 /* For locally attached CPU */ + +/* reg_htgt_type + * CPU path type. + * Access: RW + */ +MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4); + +#define MLXSW_REG_HTGT_TRAP_GROUP_EMAD 0x0 +#define MLXSW_REG_HTGT_TRAP_GROUP_RX 0x1 + +/* reg_htgt_trap_group + * Trap group number. User defined number specifying which trap groups + * should be forwarded to the CPU. The mapping between trap IDs and trap + * groups is configured using HPKT register. + * Access: Index + */ +MLXSW_ITEM32(reg, htgt, trap_group, 0x00, 0, 8); + +enum { + MLXSW_REG_HTGT_POLICER_DISABLE, + MLXSW_REG_HTGT_POLICER_ENABLE, +}; + +/* reg_htgt_pide + * Enable policer ID specified using 'pid' field. + * Access: RW + */ +MLXSW_ITEM32(reg, htgt, pide, 0x04, 15, 1); + +/* reg_htgt_pid + * Policer ID for the trap group. + * Access: RW + */ +MLXSW_ITEM32(reg, htgt, pid, 0x04, 0, 8); + +#define MLXSW_REG_HTGT_TRAP_TO_CPU 0x0 + +/* reg_htgt_mirror_action + * Mirror action to use. + * 0 - Trap to CPU. + * 1 - Trap to CPU and mirror to a mirroring agent. + * 2 - Mirror to a mirroring agent and do not trap to CPU. + * Access: RW + * + * Note: Mirroring to a mirroring agent is only supported in Spectrum. + */ +MLXSW_ITEM32(reg, htgt, mirror_action, 0x08, 8, 2); + +/* reg_htgt_mirroring_agent + * Mirroring agent. + * Access: RW + */ +MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3); + +/* reg_htgt_priority + * Trap group priority. + * In case a packet matches multiple classification rules, the packet will + * only be trapped once, based on the trap ID associated with the group (via + * register HPKT) with the highest priority. + * Supported values are 0-7, with 7 represnting the highest priority. + * Access: RW + * + * Note: In SwitchX-2 this field is ignored and the priority value is replaced + * by the 'trap_group' field. + */ +MLXSW_ITEM32(reg, htgt, priority, 0x0C, 0, 4); + +/* reg_htgt_local_path_cpu_tclass + * CPU ingress traffic class for the trap group. + * Access: RW + */ +MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6); + +#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD 0x15 +#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX 0x14 + +/* reg_htgt_local_path_rdq + * Receive descriptor queue (RDQ) to use for the trap group. + * Access: RW + */ +MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6); + +static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group) +{ + u8 swid, rdq; + + MLXSW_REG_ZERO(htgt, payload); + if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) { + swid = MLXSW_PORT_SWID_ALL_SWIDS; + rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD; + } else { + swid = 0; + rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX; + } + mlxsw_reg_htgt_swid_set(payload, swid); + mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL); + mlxsw_reg_htgt_trap_group_set(payload, trap_group); + mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE); + mlxsw_reg_htgt_pid_set(payload, 0); + mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU); + mlxsw_reg_htgt_mirroring_agent_set(payload, 0); + mlxsw_reg_htgt_priority_set(payload, 0); + mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, 7); + mlxsw_reg_htgt_local_path_rdq_set(payload, rdq); +} + +/* HPKT - Host Packet Trap + * ----------------------- + * Configures trap IDs inside trap groups. + */ +#define MLXSW_REG_HPKT_ID 0x7003 +#define MLXSW_REG_HPKT_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_hpkt = { + .id = MLXSW_REG_HPKT_ID, + .len = MLXSW_REG_HPKT_LEN, +}; + +enum { + MLXSW_REG_HPKT_ACK_NOT_REQUIRED, + MLXSW_REG_HPKT_ACK_REQUIRED, +}; + +/* reg_hpkt_ack + * Require acknowledgements from the host for events. + * If set, then the device will wait for the event it sent to be acknowledged + * by the host. This option is only relevant for event trap IDs. + * Access: RW + * + * Note: Currently not supported by firmware. + */ +MLXSW_ITEM32(reg, hpkt, ack, 0x00, 24, 1); + +enum mlxsw_reg_hpkt_action { + MLXSW_REG_HPKT_ACTION_FORWARD, + MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, + MLXSW_REG_HPKT_ACTION_MIRROR_TO_CPU, + MLXSW_REG_HPKT_ACTION_DISCARD, + MLXSW_REG_HPKT_ACTION_SOFT_DISCARD, + MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD, +}; + +/* reg_hpkt_action + * Action to perform on packet when trapped. + * 0 - No action. Forward to CPU based on switching rules. + * 1 - Trap to CPU (CPU receives sole copy). + * 2 - Mirror to CPU (CPU receives a replica of the packet). + * 3 - Discard. + * 4 - Soft discard (allow other traps to act on the packet). + * 5 - Trap and soft discard (allow other traps to overwrite this trap). + * Access: RW + * + * Note: Must be set to 0 (forward) for event trap IDs, as they are already + * addressed to the CPU. + */ +MLXSW_ITEM32(reg, hpkt, action, 0x00, 20, 3); + +/* reg_hpkt_trap_group + * Trap group to associate the trap with. + * Access: RW + */ +MLXSW_ITEM32(reg, hpkt, trap_group, 0x00, 12, 6); + +/* reg_hpkt_trap_id + * Trap ID. + * Access: Index + * + * Note: A trap ID can only be associated with a single trap group. The device + * will associate the trap ID with the last trap group configured. + */ +MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 9); + +enum { + MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT, + MLXSW_REG_HPKT_CTRL_PACKET_NO_BUFFER, + MLXSW_REG_HPKT_CTRL_PACKET_USE_BUFFER, +}; + +/* reg_hpkt_ctrl + * Configure dedicated buffer resources for control packets. + * 0 - Keep factory defaults. + * 1 - Do not use control buffer for this trap ID. + * 2 - Use control buffer for this trap ID. + * Access: RW + */ +MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2); + +static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, + u8 trap_group, u16 trap_id) +{ + MLXSW_REG_ZERO(hpkt, payload); + mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED); + mlxsw_reg_hpkt_action_set(payload, action); + mlxsw_reg_hpkt_trap_group_set(payload, trap_group); + mlxsw_reg_hpkt_trap_id_set(payload, trap_id); + mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT); +} + +static inline const char *mlxsw_reg_id_str(u16 reg_id) +{ + switch (reg_id) { + case MLXSW_REG_SGCR_ID: + return "SGCR"; + case MLXSW_REG_SPAD_ID: + return "SPAD"; + case MLXSW_REG_SMID_ID: + return "SMID"; + case MLXSW_REG_SSPR_ID: + return "SSPR"; + case MLXSW_REG_SPMS_ID: + return "SPMS"; + case MLXSW_REG_SFGC_ID: + return "SFGC"; + case MLXSW_REG_SFTR_ID: + return "SFTR"; + case MLXSW_REG_SPMLR_ID: + return "SPMLR"; + case MLXSW_REG_PMLP_ID: + return "PMLP"; + case MLXSW_REG_PMTU_ID: + return "PMTU"; + case MLXSW_REG_PTYS_ID: + return "PTYS"; + case MLXSW_REG_PPAD_ID: + return "PPAD"; + case MLXSW_REG_PAOS_ID: + return "PAOS"; + case MLXSW_REG_PPCNT_ID: + return "PPCNT"; + case MLXSW_REG_PSPA_ID: + return "PSPA"; + case MLXSW_REG_HTGT_ID: + return "HTGT"; + case MLXSW_REG_HPKT_ID: + return "HPKT"; + default: + return "*UNKNOWN*"; + } +} + +/* PUDE - Port Up / Down Event + * --------------------------- + * Reports the operational state change of a port. + */ +#define MLXSW_REG_PUDE_LEN 0x10 + +/* reg_pude_swid + * Switch partition ID with which to associate the port. + * Access: Index + */ +MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8); + +/* reg_pude_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8); + +/* reg_pude_admin_status + * Port administrative state (the desired state). + * 1 - Up. + * 2 - Down. + * 3 - Up once. This means that in case of link failure, the port won't go + * into polling mode, but will wait to be re-enabled by software. + * 4 - Disabled by system. Can only be set by hardware. + * Access: RO + */ +MLXSW_ITEM32(reg, pude, admin_status, 0x00, 8, 4); + +/* reg_pude_oper_status + * Port operatioanl state. + * 1 - Up. + * 2 - Down. + * 3 - Down by port failure. This means that the device will not let the + * port up again until explicitly specified by software. + * Access: RO + */ +MLXSW_ITEM32(reg, pude, oper_status, 0x00, 0, 4); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c new file mode 100644 index 000000000..62cbbd1ad --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -0,0 +1,1568 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/switchx2.c + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko + * Copyright (c) 2015 Ido Schimmel + * Copyright (c) 2015 Elad Raz + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core.h" +#include "reg.h" +#include "port.h" +#include "trap.h" +#include "txheader.h" + +static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2"; +static const char mlxsw_sx_driver_version[] = "1.0"; + +struct mlxsw_sx_port; + +#define MLXSW_SW_HW_ID_LEN 6 + +struct mlxsw_sx { + struct mlxsw_sx_port **ports; + struct mlxsw_core *core; + const struct mlxsw_bus_info *bus_info; + u8 hw_id[MLXSW_SW_HW_ID_LEN]; +}; + +struct mlxsw_sx_port_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 tx_dropped; +}; + +struct mlxsw_sx_port { + struct net_device *dev; + struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats; + struct mlxsw_sx *mlxsw_sx; + u8 local_port; +}; + +/* tx_hdr_version + * Tx header version. + * Must be set to 0. + */ +MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); + +/* tx_hdr_ctl + * Packet control type. + * 0 - Ethernet control (e.g. EMADs, LACP) + * 1 - Ethernet data + */ +MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); + +/* tx_hdr_proto + * Packet protocol type. Must be set to 1 (Ethernet). + */ +MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); + +/* tx_hdr_etclass + * Egress TClass to be used on the egress device on the egress port. + * The MSB is specified in the 'ctclass3' field. + * Range is 0-15, where 15 is the highest priority. + */ +MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3); + +/* tx_hdr_swid + * Switch partition ID. + */ +MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); + +/* tx_hdr_port_mid + * Destination local port for unicast packets. + * Destination multicast ID for multicast packets. + * + * Control packets are directed to a specific egress port, while data + * packets are transmitted through the CPU port (0) into the switch partition, + * where forwarding rules are applied. + */ +MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); + +/* tx_hdr_ctclass3 + * See field 'etclass'. + */ +MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1); + +/* tx_hdr_rdq + * RDQ for control packets sent to remote CPU. + * Must be set to 0x1F for EMADs, otherwise 0. + */ +MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5); + +/* tx_hdr_cpu_sig + * Signature control for packets going to CPU. Must be set to 0. + */ +MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9); + +/* tx_hdr_sig + * Stacking protocl signature. Must be set to 0xE0E0. + */ +MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16); + +/* tx_hdr_stclass + * Stacking TClass. + */ +MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3); + +/* tx_hdr_emad + * EMAD bit. Must be set for EMADs. + */ +MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1); + +/* tx_hdr_type + * 0 - Data packets + * 6 - Control packets + */ +MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); + +static void mlxsw_sx_txhdr_construct(struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info) +{ + char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); + bool is_emad = tx_info->is_emad; + + memset(txhdr, 0, MLXSW_TXHDR_LEN); + + /* We currently set default values for the egress tclass (QoS). */ + mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0); + mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); + mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); + mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 : + MLXSW_TXHDR_ETCLASS_5); + mlxsw_tx_hdr_swid_set(txhdr, 0); + mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); + mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3); + mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD : + MLXSW_TXHDR_RDQ_OTHER); + mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG); + mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG); + mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE); + mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD : + MLXSW_TXHDR_NOT_EMAD); + mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); +} + +static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port, + bool is_up) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char paos_pl[MLXSW_REG_PAOS_LEN]; + + mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, + is_up ? MLXSW_PORT_ADMIN_STATUS_UP : + MLXSW_PORT_ADMIN_STATUS_DOWN); + return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl); +} + +static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port, + bool *p_is_up) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char paos_pl[MLXSW_REG_PAOS_LEN]; + u8 oper_status; + int err; + + mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0); + err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl); + if (err) + return err; + oper_status = mlxsw_reg_paos_oper_status_get(paos_pl); + *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false; + return 0; +} + +static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char pmtu_pl[MLXSW_REG_PMTU_LEN]; + int max_mtu; + int err; + + mtu += MLXSW_TXHDR_LEN + ETH_HLEN; + mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0); + err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl); + if (err) + return err; + max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); + + if (mtu > max_mtu) + return -EINVAL; + + mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu); + return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl); +} + +static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char pspa_pl[MLXSW_REG_PSPA_LEN]; + + mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port); + return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl); +} + +static int +mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char sspr_pl[MLXSW_REG_SSPR_LEN]; + + mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port); + return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl); +} + +static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port, + bool *p_usable) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char pmlp_pl[MLXSW_REG_PMLP_LEN]; + int err; + + mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sx_port->local_port); + err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl); + if (err) + return err; + *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false; + return 0; +} + +static int mlxsw_sx_port_open(struct net_device *dev) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + int err; + + err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true); + if (err) + return err; + netif_start_queue(dev); + return 0; +} + +static int mlxsw_sx_port_stop(struct net_device *dev) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + + netif_stop_queue(dev); + return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false); +} + +static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + struct mlxsw_sx_port_pcpu_stats *pcpu_stats; + const struct mlxsw_tx_info tx_info = { + .local_port = mlxsw_sx_port->local_port, + .is_emad = false, + }; + u64 len; + int err; + + if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info)) + return NETDEV_TX_BUSY; + + if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { + struct sk_buff *skb_orig = skb; + + skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); + if (!skb) { + this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb_orig); + return NETDEV_TX_OK; + } + } + mlxsw_sx_txhdr_construct(skb, &tx_info); + len = skb->len; + /* Due to a race we might fail here because of a full queue. In that + * unlikely case we simply drop the packet. + */ + err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info); + + if (!err) { + pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats); + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->tx_packets++; + pcpu_stats->tx_bytes += len; + u64_stats_update_end(&pcpu_stats->syncp); + } else { + this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb); + } + return NETDEV_TX_OK; +} + +static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + int err; + + err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu); + if (err) + return err; + dev->mtu = mtu; + return 0; +} + +static struct rtnl_link_stats64 * +mlxsw_sx_port_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + struct mlxsw_sx_port_pcpu_stats *p; + u64 rx_packets, rx_bytes, tx_packets, tx_bytes; + u32 tx_dropped = 0; + unsigned int start; + int i; + + for_each_possible_cpu(i) { + p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i); + do { + start = u64_stats_fetch_begin_irq(&p->syncp); + rx_packets = p->rx_packets; + rx_bytes = p->rx_bytes; + tx_packets = p->tx_packets; + tx_bytes = p->tx_bytes; + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + /* tx_dropped is u32, updated without syncp protection. */ + tx_dropped += p->tx_dropped; + } + stats->tx_dropped = tx_dropped; + return stats; +} + +static const struct net_device_ops mlxsw_sx_port_netdev_ops = { + .ndo_open = mlxsw_sx_port_open, + .ndo_stop = mlxsw_sx_port_stop, + .ndo_start_xmit = mlxsw_sx_port_xmit, + .ndo_change_mtu = mlxsw_sx_port_change_mtu, + .ndo_get_stats64 = mlxsw_sx_port_get_stats64, +}; + +static void mlxsw_sx_port_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + + strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, mlxsw_sx_driver_version, + sizeof(drvinfo->version)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d", + mlxsw_sx->bus_info->fw_rev.major, + mlxsw_sx->bus_info->fw_rev.minor, + mlxsw_sx->bus_info->fw_rev.subminor); + strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name, + sizeof(drvinfo->bus_info)); +} + +struct mlxsw_sx_port_hw_stats { + char str[ETH_GSTRING_LEN]; + u64 (*getter)(char *payload); +}; + +static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = { + { + .str = "a_frames_transmitted_ok", + .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, + }, + { + .str = "a_frames_received_ok", + .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, + }, + { + .str = "a_frame_check_sequence_errors", + .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, + }, + { + .str = "a_alignment_errors", + .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, + }, + { + .str = "a_octets_transmitted_ok", + .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, + }, + { + .str = "a_octets_received_ok", + .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, + }, + { + .str = "a_multicast_frames_xmitted_ok", + .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, + }, + { + .str = "a_broadcast_frames_xmitted_ok", + .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, + }, + { + .str = "a_multicast_frames_received_ok", + .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, + }, + { + .str = "a_broadcast_frames_received_ok", + .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, + }, + { + .str = "a_in_range_length_errors", + .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, + }, + { + .str = "a_out_of_range_length_field", + .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, + }, + { + .str = "a_frame_too_long_errors", + .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, + }, + { + .str = "a_symbol_error_during_carrier", + .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, + }, + { + .str = "a_mac_control_frames_transmitted", + .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, + }, + { + .str = "a_mac_control_frames_received", + .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, + }, + { + .str = "a_unsupported_opcodes_received", + .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, + }, + { + .str = "a_pause_mac_ctrl_frames_received", + .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, + }, + { + .str = "a_pause_mac_ctrl_frames_xmitted", + .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, + }, +}; + +#define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats) + +static void mlxsw_sx_port_get_strings(struct net_device *dev, + u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) { + memcpy(p, mlxsw_sx_port_hw_stats[i].str, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static void mlxsw_sx_port_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; + int i; + int err; + + mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port); + err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl); + for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) + data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0; +} + +static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return MLXSW_SX_PORT_HW_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +struct mlxsw_sx_port_link_mode { + u32 mask; + u32 supported; + u32 advertised; + u32 speed; +}; + +static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = { + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, + .supported = SUPPORTED_100baseT_Full, + .advertised = ADVERTISED_100baseT_Full, + .speed = 100, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX, + .speed = 100, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | + MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, + .supported = SUPPORTED_1000baseKX_Full, + .advertised = ADVERTISED_1000baseKX_Full, + .speed = 1000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, + .supported = SUPPORTED_10000baseT_Full, + .advertised = ADVERTISED_10000baseT_Full, + .speed = 10000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, + .supported = SUPPORTED_10000baseKX4_Full, + .advertised = ADVERTISED_10000baseKX4_Full, + .speed = 10000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .speed = 10000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, + .supported = SUPPORTED_20000baseKR2_Full, + .advertised = ADVERTISED_20000baseKR2_Full, + .speed = 20000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, + .supported = SUPPORTED_40000baseCR4_Full, + .advertised = ADVERTISED_40000baseCR4_Full, + .speed = 40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, + .supported = SUPPORTED_40000baseKR4_Full, + .advertised = ADVERTISED_40000baseKR4_Full, + .speed = 40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, + .supported = SUPPORTED_40000baseSR4_Full, + .advertised = ADVERTISED_40000baseSR4_Full, + .speed = 40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, + .supported = SUPPORTED_40000baseLR4_Full, + .advertised = ADVERTISED_40000baseLR4_Full, + .speed = 40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, + .speed = 25000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 | + MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, + .speed = 50000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, + .supported = SUPPORTED_56000baseKR4_Full, + .advertised = ADVERTISED_56000baseKR4_Full, + .speed = 56000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, + .speed = 100000, + }, +}; + +#define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode) + +static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto) +{ + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_SGMII)) + return SUPPORTED_FIBRE; + + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) + return SUPPORTED_Backplane; + return 0; +} + +static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto) +{ + u32 modes = 0; + int i; + + for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) + modes |= mlxsw_sx_port_link_mode[i].supported; + } + return modes; +} + +static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto) +{ + u32 modes = 0; + int i; + + for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) + modes |= mlxsw_sx_port_link_mode[i].advertised; + } + return modes; +} + +static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, + struct ethtool_cmd *cmd) +{ + u32 speed = SPEED_UNKNOWN; + u8 duplex = DUPLEX_UNKNOWN; + int i; + + if (!carrier_ok) + goto out; + + for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) { + speed = mlxsw_sx_port_link_mode[i].speed; + duplex = DUPLEX_FULL; + break; + } + } +out: + ethtool_cmd_speed_set(cmd, speed); + cmd->duplex = duplex; +} + +static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto) +{ + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_SGMII)) + return PORT_FIBRE; + + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) + return PORT_DA; + + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) + return PORT_NONE; + + return PORT_OTHER; +} + +static int mlxsw_sx_port_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char ptys_pl[MLXSW_REG_PTYS_LEN]; + u32 eth_proto_cap; + u32 eth_proto_admin; + u32 eth_proto_oper; + int err; + + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0); + err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); + if (err) { + netdev_err(dev, "Failed to get proto"); + return err; + } + mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, + ð_proto_admin, ð_proto_oper); + + cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) | + mlxsw_sx_from_ptys_supported_link(eth_proto_cap) | + SUPPORTED_Pause | SUPPORTED_Asym_Pause; + cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin); + mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev), + eth_proto_oper, cmd); + + eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; + cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper); + cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper); + + cmd->transceiver = XCVR_INTERNAL; + return 0; +} + +static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising) +{ + u32 ptys_proto = 0; + int i; + + for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) { + if (advertising & mlxsw_sx_port_link_mode[i].advertised) + ptys_proto |= mlxsw_sx_port_link_mode[i].mask; + } + return ptys_proto; +} + +static u32 mlxsw_sx_to_ptys_speed(u32 speed) +{ + u32 ptys_proto = 0; + int i; + + for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) { + if (speed == mlxsw_sx_port_link_mode[i].speed) + ptys_proto |= mlxsw_sx_port_link_mode[i].mask; + } + return ptys_proto; +} + +static int mlxsw_sx_port_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char ptys_pl[MLXSW_REG_PTYS_LEN]; + u32 speed; + u32 eth_proto_new; + u32 eth_proto_cap; + u32 eth_proto_admin; + bool is_up; + int err; + + speed = ethtool_cmd_speed(cmd); + + eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ? + mlxsw_sx_to_ptys_advert_link(cmd->advertising) : + mlxsw_sx_to_ptys_speed(speed); + + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0); + err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); + if (err) { + netdev_err(dev, "Failed to get proto"); + return err; + } + mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL); + + eth_proto_new = eth_proto_new & eth_proto_cap; + if (!eth_proto_new) { + netdev_err(dev, "Not supported proto admin requested"); + return -EINVAL; + } + if (eth_proto_new == eth_proto_admin) + return 0; + + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, eth_proto_new); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); + if (err) { + netdev_err(dev, "Failed to set proto admin"); + return err; + } + + err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up); + if (err) { + netdev_err(dev, "Failed to get oper status"); + return err; + } + if (!is_up) + return 0; + + err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false); + if (err) { + netdev_err(dev, "Failed to set admin status"); + return err; + } + + err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true); + if (err) { + netdev_err(dev, "Failed to set admin status"); + return err; + } + + return 0; +} + +static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = { + .get_drvinfo = mlxsw_sx_port_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = mlxsw_sx_port_get_strings, + .get_ethtool_stats = mlxsw_sx_port_get_stats, + .get_sset_count = mlxsw_sx_port_get_sset_count, + .get_settings = mlxsw_sx_port_get_settings, + .set_settings = mlxsw_sx_port_set_settings, +}; + +static int mlxsw_sx_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + + switch (attr->id) { + case SWITCHDEV_ATTR_PORT_PARENT_ID: + attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id); + memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = { + .switchdev_port_attr_get = mlxsw_sx_port_attr_get, +}; + +static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx) +{ + char spad_pl[MLXSW_REG_SPAD_LEN]; + int err; + + err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl); + if (err) + return err; + mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id); + return 0; +} + +static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + struct net_device *dev = mlxsw_sx_port->dev; + char ppad_pl[MLXSW_REG_PPAD_LEN]; + int err; + + mlxsw_reg_ppad_pack(ppad_pl, false, 0); + err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl); + if (err) + return err; + mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr); + /* The last byte value in base mac address is guaranteed + * to be such it does not overflow when adding local_port + * value. + */ + dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port; + return 0; +} + +static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port, + u16 vid, enum mlxsw_reg_spms_state state) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char *spms_pl; + int err; + + spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); + if (!spms_pl) + return -ENOMEM; + mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl); + kfree(spms_pl); + return err; +} + +static int mlxsw_sx_port_speed_set(struct mlxsw_sx_port *mlxsw_sx_port, + u32 speed) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char ptys_pl[MLXSW_REG_PTYS_LEN]; + + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, speed); + return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); +} + +static int +mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port, + enum mlxsw_reg_spmlr_learn_mode mode) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char spmlr_pl[MLXSW_REG_SPMLR_LEN]; + + mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode); + return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl); +} + +static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) +{ + struct mlxsw_sx_port *mlxsw_sx_port; + struct net_device *dev; + bool usable; + int err; + + dev = alloc_etherdev(sizeof(struct mlxsw_sx_port)); + if (!dev) + return -ENOMEM; + mlxsw_sx_port = netdev_priv(dev); + mlxsw_sx_port->dev = dev; + mlxsw_sx_port->mlxsw_sx = mlxsw_sx; + mlxsw_sx_port->local_port = local_port; + + mlxsw_sx_port->pcpu_stats = + netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats); + if (!mlxsw_sx_port->pcpu_stats) { + err = -ENOMEM; + goto err_alloc_stats; + } + + dev->netdev_ops = &mlxsw_sx_port_netdev_ops; + dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops; + dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops; + + err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n", + mlxsw_sx_port->local_port); + goto err_dev_addr_get; + } + + netif_carrier_off(dev); + + dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | + NETIF_F_VLAN_CHALLENGED; + + /* Each packet needs to have a Tx header (metadata) on top all other + * headers. + */ + dev->hard_header_len += MLXSW_TXHDR_LEN; + + err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to check module\n", + mlxsw_sx_port->local_port); + goto err_port_module_check; + } + + if (!usable) { + dev_dbg(mlxsw_sx->bus_info->dev, "Port %d: Not usable, skipping initialization\n", + mlxsw_sx_port->local_port); + goto port_not_usable; + } + + err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n", + mlxsw_sx_port->local_port); + goto err_port_system_port_mapping_set; + } + + err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n", + mlxsw_sx_port->local_port); + goto err_port_swid_set; + } + + err = mlxsw_sx_port_speed_set(mlxsw_sx_port, + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n", + mlxsw_sx_port->local_port); + goto err_port_speed_set; + } + + err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, ETH_DATA_LEN); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n", + mlxsw_sx_port->local_port); + goto err_port_mtu_set; + } + + err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false); + if (err) + goto err_port_admin_status_set; + + err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port, + MLXSW_PORT_DEFAULT_VID, + MLXSW_REG_SPMS_STATE_FORWARDING); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n", + mlxsw_sx_port->local_port); + goto err_port_stp_state_set; + } + + err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port, + MLXSW_REG_SPMLR_LEARN_MODE_DISABLE); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n", + mlxsw_sx_port->local_port); + goto err_port_mac_learning_mode_set; + } + + err = register_netdev(dev); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n", + mlxsw_sx_port->local_port); + goto err_register_netdev; + } + + mlxsw_sx->ports[local_port] = mlxsw_sx_port; + return 0; + +err_register_netdev: +err_port_mac_learning_mode_set: +err_port_stp_state_set: +err_port_admin_status_set: +err_port_mtu_set: +err_port_speed_set: +err_port_swid_set: +err_port_system_port_mapping_set: +port_not_usable: +err_port_module_check: +err_dev_addr_get: + free_percpu(mlxsw_sx_port->pcpu_stats); +err_alloc_stats: + free_netdev(dev); + return err; +} + +static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port) +{ + struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port]; + + if (!mlxsw_sx_port) + return; + unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */ + mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); + free_percpu(mlxsw_sx_port->pcpu_stats); + free_netdev(mlxsw_sx_port->dev); +} + +static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx) +{ + int i; + + for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) + mlxsw_sx_port_remove(mlxsw_sx, i); + kfree(mlxsw_sx->ports); +} + +static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx) +{ + size_t alloc_size; + int i; + int err; + + alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS; + mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL); + if (!mlxsw_sx->ports) + return -ENOMEM; + + for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { + err = mlxsw_sx_port_create(mlxsw_sx, i); + if (err) + goto err_port_create; + } + return 0; + +err_port_create: + for (i--; i >= 1; i--) + mlxsw_sx_port_remove(mlxsw_sx, i); + kfree(mlxsw_sx->ports); + return err; +} + +static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg, + char *pude_pl, void *priv) +{ + struct mlxsw_sx *mlxsw_sx = priv; + struct mlxsw_sx_port *mlxsw_sx_port; + enum mlxsw_reg_pude_oper_status status; + u8 local_port; + + local_port = mlxsw_reg_pude_local_port_get(pude_pl); + mlxsw_sx_port = mlxsw_sx->ports[local_port]; + if (!mlxsw_sx_port) { + dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n", + local_port); + return; + } + + status = mlxsw_reg_pude_oper_status_get(pude_pl); + if (MLXSW_PORT_OPER_STATUS_UP == status) { + netdev_info(mlxsw_sx_port->dev, "link up\n"); + netif_carrier_on(mlxsw_sx_port->dev); + } else { + netdev_info(mlxsw_sx_port->dev, "link down\n"); + netif_carrier_off(mlxsw_sx_port->dev); + } +} + +static struct mlxsw_event_listener mlxsw_sx_pude_event = { + .func = mlxsw_sx_pude_event_func, + .trap_id = MLXSW_TRAP_ID_PUDE, +}; + +static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx, + enum mlxsw_event_trap_id trap_id) +{ + struct mlxsw_event_listener *el; + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + int err; + + switch (trap_id) { + case MLXSW_TRAP_ID_PUDE: + el = &mlxsw_sx_pude_event; + break; + } + err = mlxsw_core_event_listener_register(mlxsw_sx->core, el, mlxsw_sx); + if (err) + return err; + + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, + MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); + if (err) + goto err_event_trap_set; + + return 0; + +err_event_trap_set: + mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx); + return err; +} + +static void mlxsw_sx_event_unregister(struct mlxsw_sx *mlxsw_sx, + enum mlxsw_event_trap_id trap_id) +{ + struct mlxsw_event_listener *el; + + switch (trap_id) { + case MLXSW_TRAP_ID_PUDE: + el = &mlxsw_sx_pude_event; + break; + } + mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx); +} + +static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port, + void *priv) +{ + struct mlxsw_sx *mlxsw_sx = priv; + struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port]; + struct mlxsw_sx_port_pcpu_stats *pcpu_stats; + + if (unlikely(!mlxsw_sx_port)) { + if (net_ratelimit()) + dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n", + local_port); + return; + } + + skb->dev = mlxsw_sx_port->dev; + + pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats); + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->rx_packets++; + pcpu_stats->rx_bytes += skb->len; + u64_stats_update_end(&pcpu_stats->syncp); + + skb->protocol = eth_type_trans(skb, skb->dev); + netif_receive_skb(skb); +} + +static const struct mlxsw_rx_listener mlxsw_sx_rx_listener[] = { + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_FDB_MC, + }, + /* Traps for specific L2 packet types, not trapped as FDB MC */ + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_STP, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_LACP, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_EAPOL, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_LLDP, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_MMRP, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_MVRP, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_RPVST, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_DHCP, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_QUERY, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, + }, +}; + +static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx) +{ + char htgt_pl[MLXSW_REG_HTGT_LEN]; + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + int i; + int err; + + mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl); + if (err) + return err; + + for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) { + err = mlxsw_core_rx_listener_register(mlxsw_sx->core, + &mlxsw_sx_rx_listener[i], + mlxsw_sx); + if (err) + goto err_rx_listener_register; + + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, + MLXSW_REG_HTGT_TRAP_GROUP_RX, + mlxsw_sx_rx_listener[i].trap_id); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); + if (err) + goto err_rx_trap_set; + } + return 0; + +err_rx_trap_set: + mlxsw_core_rx_listener_unregister(mlxsw_sx->core, + &mlxsw_sx_rx_listener[i], + mlxsw_sx); +err_rx_listener_register: + for (i--; i >= 0; i--) { + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, + MLXSW_REG_HTGT_TRAP_GROUP_RX, + mlxsw_sx_rx_listener[i].trap_id); + mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); + + mlxsw_core_rx_listener_unregister(mlxsw_sx->core, + &mlxsw_sx_rx_listener[i], + mlxsw_sx); + } + return err; +} + +static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx) +{ + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + int i; + + for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) { + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, + MLXSW_REG_HTGT_TRAP_GROUP_RX, + mlxsw_sx_rx_listener[i].trap_id); + mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); + + mlxsw_core_rx_listener_unregister(mlxsw_sx->core, + &mlxsw_sx_rx_listener[i], + mlxsw_sx); + } +} + +static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx) +{ + char sfgc_pl[MLXSW_REG_SFGC_LEN]; + char sgcr_pl[MLXSW_REG_SGCR_LEN]; + char *smid_pl; + char *sftr_pl; + int err; + + /* Due to FW bug, we must configure SMID. */ + smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); + if (!smid_pl) + return -ENOMEM; + mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl); + kfree(smid_pl); + if (err) + return err; + + /* Configure a flooding table, which includes only CPU port. */ + sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); + if (!sftr_pl) + return -ENOMEM; + mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl); + kfree(sftr_pl); + if (err) + return err; + + /* Flood different packet types using the flooding table. */ + mlxsw_reg_sfgc_pack(sfgc_pl, + MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST, + MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID, + MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, + 0); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl); + if (err) + return err; + + mlxsw_reg_sfgc_pack(sfgc_pl, + MLXSW_REG_SFGC_TYPE_BROADCAST, + MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID, + MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, + 0); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl); + if (err) + return err; + + mlxsw_reg_sfgc_pack(sfgc_pl, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP, + MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID, + MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, + 0); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl); + if (err) + return err; + + mlxsw_reg_sfgc_pack(sfgc_pl, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6, + MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID, + MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, + 0); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl); + if (err) + return err; + + mlxsw_reg_sfgc_pack(sfgc_pl, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4, + MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID, + MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, + 0); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl); + if (err) + return err; + + mlxsw_reg_sgcr_pack(sgcr_pl, true); + return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl); +} + +static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info) +{ + struct mlxsw_sx *mlxsw_sx = priv; + int err; + + mlxsw_sx->core = mlxsw_core; + mlxsw_sx->bus_info = mlxsw_bus_info; + + err = mlxsw_sx_hw_id_get(mlxsw_sx); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n"); + return err; + } + + err = mlxsw_sx_ports_create(mlxsw_sx); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n"); + return err; + } + + err = mlxsw_sx_event_register(mlxsw_sx, MLXSW_TRAP_ID_PUDE); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Failed to register for PUDE events\n"); + goto err_event_register; + } + + err = mlxsw_sx_traps_init(mlxsw_sx); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps for RX\n"); + goto err_rx_listener_register; + } + + err = mlxsw_sx_flood_init(mlxsw_sx); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n"); + goto err_flood_init; + } + + return 0; + +err_flood_init: + mlxsw_sx_traps_fini(mlxsw_sx); +err_rx_listener_register: + mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE); +err_event_register: + mlxsw_sx_ports_remove(mlxsw_sx); + return err; +} + +static void mlxsw_sx_fini(void *priv) +{ + struct mlxsw_sx *mlxsw_sx = priv; + + mlxsw_sx_traps_fini(mlxsw_sx); + mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE); + mlxsw_sx_ports_remove(mlxsw_sx); +} + +static struct mlxsw_config_profile mlxsw_sx_config_profile = { + .used_max_vepa_channels = 1, + .max_vepa_channels = 0, + .used_max_lag = 1, + .max_lag = 64, + .used_max_port_per_lag = 1, + .max_port_per_lag = 16, + .used_max_mid = 1, + .max_mid = 7000, + .used_max_pgt = 1, + .max_pgt = 0, + .used_max_system_port = 1, + .max_system_port = 48000, + .used_max_vlan_groups = 1, + .max_vlan_groups = 127, + .used_max_regions = 1, + .max_regions = 400, + .used_flood_tables = 1, + .max_flood_tables = 2, + .max_vid_flood_tables = 1, + .used_flood_mode = 1, + .flood_mode = 3, + .used_max_ib_mc = 1, + .max_ib_mc = 0, + .used_max_pkey = 1, + .max_pkey = 0, + .swid_config = { + { + .used_type = 1, + .type = MLXSW_PORT_SWID_TYPE_ETH, + } + }, +}; + +static struct mlxsw_driver mlxsw_sx_driver = { + .kind = MLXSW_DEVICE_KIND_SWITCHX2, + .owner = THIS_MODULE, + .priv_size = sizeof(struct mlxsw_sx), + .init = mlxsw_sx_init, + .fini = mlxsw_sx_fini, + .txhdr_construct = mlxsw_sx_txhdr_construct, + .txhdr_len = MLXSW_TXHDR_LEN, + .profile = &mlxsw_sx_config_profile, +}; + +static int __init mlxsw_sx_module_init(void) +{ + return mlxsw_core_driver_register(&mlxsw_sx_driver); +} + +static void __exit mlxsw_sx_module_exit(void) +{ + mlxsw_core_driver_unregister(&mlxsw_sx_driver); +} + +module_init(mlxsw_sx_module_init); +module_exit(mlxsw_sx_module_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jiri Pirko "); +MODULE_DESCRIPTION("Mellanox SwitchX-2 driver"); +MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2); diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h new file mode 100644 index 000000000..53a9550be --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -0,0 +1,66 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/trap.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Elad Raz + * Copyright (c) 2015 Jiri Pirko + * Copyright (c) 2015 Ido Schimmel + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _MLXSW_TRAP_H +#define _MLXSW_TRAP_H + +enum { + /* Ethernet EMAD and FDB miss */ + MLXSW_TRAP_ID_FDB_MC = 0x01, + MLXSW_TRAP_ID_ETHEMAD = 0x05, + /* L2 traps for specific packet types */ + MLXSW_TRAP_ID_STP = 0x10, + MLXSW_TRAP_ID_LACP = 0x11, + MLXSW_TRAP_ID_EAPOL = 0x12, + MLXSW_TRAP_ID_LLDP = 0x13, + MLXSW_TRAP_ID_MMRP = 0x14, + MLXSW_TRAP_ID_MVRP = 0x15, + MLXSW_TRAP_ID_RPVST = 0x16, + MLXSW_TRAP_ID_DHCP = 0x19, + MLXSW_TRAP_ID_IGMP_QUERY = 0x30, + MLXSW_TRAP_ID_IGMP_V1_REPORT = 0x31, + MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32, + MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33, + MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, + + MLXSW_TRAP_ID_MAX = 0x1FF +}; + +enum mlxsw_event_trap_id { + /* Port Up/Down event generated by hardware */ + MLXSW_TRAP_ID_PUDE = 0x8, +}; + +#endif /* _MLXSW_TRAP_H */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h new file mode 100644 index 000000000..06fc46c78 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h @@ -0,0 +1,80 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/txheader.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Ido Schimmel + * Copyright (c) 2015 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_TXHEADER_H +#define _MLXSW_TXHEADER_H + +#define MLXSW_TXHDR_LEN 0x10 +#define MLXSW_TXHDR_VERSION_0 0 + +enum { + MLXSW_TXHDR_ETH_CTL, + MLXSW_TXHDR_ETH_DATA, +}; + +#define MLXSW_TXHDR_PROTO_ETH 1 + +enum { + MLXSW_TXHDR_ETCLASS_0, + MLXSW_TXHDR_ETCLASS_1, + MLXSW_TXHDR_ETCLASS_2, + MLXSW_TXHDR_ETCLASS_3, + MLXSW_TXHDR_ETCLASS_4, + MLXSW_TXHDR_ETCLASS_5, + MLXSW_TXHDR_ETCLASS_6, + MLXSW_TXHDR_ETCLASS_7, +}; + +enum { + MLXSW_TXHDR_RDQ_OTHER, + MLXSW_TXHDR_RDQ_EMAD = 0x1f, +}; + +#define MLXSW_TXHDR_CTCLASS3 0 +#define MLXSW_TXHDR_CPU_SIG 0 +#define MLXSW_TXHDR_SIG 0xE0E0 +#define MLXSW_TXHDR_STCLASS_NONE 0 + +enum { + MLXSW_TXHDR_NOT_EMAD, + MLXSW_TXHDR_EMAD, +}; + +enum { + MLXSW_TXHDR_TYPE_DATA, + MLXSW_TXHDR_TYPE_CONTROL = 6, +}; + +#endif diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 66d4ab703..60f43ec22 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -1601,6 +1601,7 @@ static const struct of_device_id ks8851_match_table[] = { { .compatible = "micrel,ks8851" }, { } }; +MODULE_DEVICE_TABLE(of, ks8851_match_table); static struct spi_driver ks8851_driver = { .driver = { diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index becbb5f1f..a10c928bb 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -552,6 +552,7 @@ static const struct of_device_id moxart_mac_match[] = { { .compatible = "moxa,moxart-mac" }, { } }; +MODULE_DEVICE_TABLE(of, moxart_mac_match); static struct platform_driver moxart_mac_driver = { .probe = moxart_mac_probe, diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index c28111749..2d1b94274 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -8226,31 +8226,7 @@ static void s2io_rem_nic(struct pci_dev *pdev) pci_disable_device(pdev); } -/** - * s2io_starter - Entry point for the driver - * Description: This function is the entry point for the driver. It verifies - * the module loadable parameters and initializes PCI configuration space. - */ - -static int __init s2io_starter(void) -{ - return pci_register_driver(&s2io_driver); -} - -/** - * s2io_closer - Cleanup routine for the driver - * Description: This function is the cleanup routine for the driver. It - * unregisters the driver. - */ - -static __exit void s2io_closer(void) -{ - pci_unregister_driver(&s2io_driver); - DBG_PRINT(INIT_DBG, "cleanup done\n"); -} - -module_init(s2io_starter); -module_exit(s2io_closer); +module_pci_driver(s2io_driver); static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip, struct tcphdr **tcp, struct RxD_t *rxdp, diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h index d89b6ed82..6c5997dc8 100644 --- a/drivers/net/ethernet/neterion/s2io.h +++ b/drivers/net/ethernet/neterion/s2io.h @@ -1085,8 +1085,6 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp); static void tx_intr_handler(struct fifo_info *fifo_data); static void s2io_handle_errors(void * dev_id); -static int s2io_starter(void); -static void s2io_closer(void); static void s2io_tx_watchdog(struct net_device *dev); static void s2io_set_multicast(struct net_device *dev); static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index a41bb5e6b..75e88f4c1 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -4076,6 +4076,8 @@ static void nv_do_nic_poll(unsigned long data) struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 mask = 0; + unsigned long flags; + unsigned int irq = 0; /* * First disable irq(s) and then @@ -4085,25 +4087,27 @@ static void nv_do_nic_poll(unsigned long data) if (!using_multi_irqs(dev)) { if (np->msi_flags & NV_MSI_X_ENABLED) - disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector; else - disable_irq_lockdep(np->pci_dev->irq); + irq = np->pci_dev->irq; mask = np->irqmask; } else { if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { - disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); + irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector; mask |= NVREG_IRQ_RX_ALL; } if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { - disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); + irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector; mask |= NVREG_IRQ_TX_ALL; } if (np->nic_poll_irq & NVREG_IRQ_OTHER) { - disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); + irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector; mask |= NVREG_IRQ_OTHER; } } - /* disable_irq() contains synchronize_irq, thus no irq handler can run now */ + + disable_irq_nosync_lockdep_irqsave(irq, &flags); + synchronize_irq(irq); if (np->recover_error) { np->recover_error = 0; @@ -4156,28 +4160,22 @@ static void nv_do_nic_poll(unsigned long data) nv_nic_irq_optimized(0, dev); else nv_nic_irq(0, dev); - if (np->msi_flags & NV_MSI_X_ENABLED) - enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); - else - enable_irq_lockdep(np->pci_dev->irq); } else { if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL; nv_nic_irq_rx(0, dev); - enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); } if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL; nv_nic_irq_tx(0, dev); - enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); } if (np->nic_poll_irq & NVREG_IRQ_OTHER) { np->nic_poll_irq &= ~NVREG_IRQ_OTHER; nv_nic_irq_other(0, dev); - enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); } } + enable_irq_lockdep_irqrestore(irq, &flags); } #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 66fd86815..b159ef830 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -476,13 +476,12 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac) mac[5] = tmp >> 8; } -static void __lpc_eth_clock_enable(struct netdata_local *pldat, - bool enable) +static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable) { if (enable) - clk_enable(pldat->clk); + clk_prepare_enable(pldat->clk); else - clk_disable(pldat->clk); + clk_disable_unprepare(pldat->clk); } static void __lpc_params_setup(struct netdata_local *pldat) @@ -1494,7 +1493,7 @@ err_out_free_irq: err_out_iounmap: iounmap(pldat->net_base); err_out_disable_clocks: - clk_disable(pldat->clk); + clk_disable_unprepare(pldat->clk); clk_put(pldat->clk); err_out_free_dev: free_netdev(ndev); @@ -1519,7 +1518,7 @@ static int lpc_eth_drv_remove(struct platform_device *pdev) iounmap(pldat->net_base); mdiobus_unregister(pldat->mii_bus); mdiobus_free(pldat->mii_bus); - clk_disable(pldat->clk); + clk_disable_unprepare(pldat->clk); clk_put(pldat->clk); free_netdev(ndev); @@ -1540,7 +1539,7 @@ static int lpc_eth_drv_suspend(struct platform_device *pdev, if (netif_running(ndev)) { netif_device_detach(ndev); __lpc_eth_shutdown(pldat); - clk_disable(pldat->clk); + clk_disable_unprepare(pldat->clk); /* * Reset again now clock is disable to be sure diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 32707fe0c..85cdbbc86 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -24,9 +24,7 @@ #include #include #include - #include - #include #include #include @@ -39,8 +37,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 3 -#define _QLCNIC_LINUX_SUBVERSION 62 -#define QLCNIC_LINUX_VERSIONID "5.3.62" +#define _QLCNIC_LINUX_SUBVERSION 63 +#define QLCNIC_LINUX_VERSIONID "5.3.63" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) @@ -538,6 +536,7 @@ struct qlcnic_hardware_context { u8 extend_lb_time; u8 phys_port_id[ETH_ALEN]; u8 lb_mode; + u8 vxlan_port_count; u16 vxlan_port; struct device *hwmon_dev; u32 post_mode; @@ -926,6 +925,7 @@ struct qlcnic_mac_vlan_list { #define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5 #define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7 #define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9 +#define QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP BIT_13 #define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD BIT_0 #define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD BIT_1 @@ -2291,8 +2291,9 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops; #define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 #define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030 -#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830 #define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430 +#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830 +#define PCI_DEVICE_ID_QLOGIC_VF_QLE8C30 0x8C30 #define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040 #define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440 @@ -2319,7 +2320,8 @@ static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter) (device == PCI_DEVICE_ID_QLOGIC_QLE8830) || (device == PCI_DEVICE_ID_QLOGIC_QLE844X) || (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) || - (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false; + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) || + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false; return status; } @@ -2335,7 +2337,8 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter) bool status; status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) || - (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false; + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) || + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false; return status; } @@ -2351,7 +2354,8 @@ static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter) { unsigned short device = adapter->pdev->device; - return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false; + return ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) || + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false; } static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 840bf36b5..9f0bdd993 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -5,14 +5,15 @@ * See LICENSE.qlcnic for copyright and licensing details. */ -#include "qlcnic.h" -#include "qlcnic_sriov.h" #include #include #include #include #include +#include "qlcnic.h" +#include "qlcnic_sriov.h" + static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *); static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8); static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8, @@ -118,6 +119,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2}, {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50}, {QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1}, + {QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP, 4, 1}, }; const u32 qlcnic_83xx_ext_reg_tbl[] = { @@ -916,8 +918,6 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, mbx->req.arg = NULL; return -ENOMEM; } - memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num); - memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); temp = adapter->ahw->fw_hal_version << 29; mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp); mbx->cmd_op = type; @@ -3513,6 +3513,31 @@ out: qlcnic_free_mbx_args(&cmd); } +#define QLCNIC_83XX_ADD_PORT0 BIT_0 +#define QLCNIC_83XX_ADD_PORT1 BIT_1 +#define QLCNIC_83XX_EXTENDED_MEM_SIZE 13 /* In MB */ +int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *adapter) +{ + struct qlcnic_cmd_args cmd; + int err; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP); + if (err) + return err; + + cmd.req.arg[1] = (QLCNIC_83XX_ADD_PORT0 | QLCNIC_83XX_ADD_PORT1); + cmd.req.arg[2] = QLCNIC_83XX_EXTENDED_MEM_SIZE; + cmd.req.arg[3] = QLCNIC_83XX_EXTENDED_MEM_SIZE; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_err(&adapter->pdev->dev, + "failed to issue extend iSCSI minidump capability\n"); + + return err; +} + int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter) { u32 major, minor, sub; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index db899e2cd..def3f7efd 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -10,6 +10,7 @@ #include #include + #include "qlcnic_hw.h" #define QLCNIC_83XX_BAR0_LENGTH 0x4000 @@ -626,6 +627,7 @@ int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *); void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); +int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *); int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *); int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *); void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index c316746d5..13dc6646a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1384,7 +1384,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) size_t size; u64 addr; - temp = kzalloc(fw->size, GFP_KERNEL); + temp = vzalloc(fw->size); if (!temp) { release_firmware(fw); fw_info->fw = NULL; @@ -1430,7 +1430,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) exit: release_firmware(fw); fw_info->fw = NULL; - kfree(temp); + vfree(temp); return ret; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index 6e6f18fc5..a5f422f26 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -73,8 +73,6 @@ int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, mbx->req.arg = NULL; return -ENOMEM; } - memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num); - memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); mbx->req.arg[0] = type; break; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 75ee9e4ce..509b596cf 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -5,13 +5,13 @@ * See LICENSE.qlcnic for copyright and licensing details. */ -#include "qlcnic.h" -#include "qlcnic_hdr.h" - #include #include #include +#include "qlcnic.h" +#include "qlcnic_hdr.h" + #define MASK(n) ((1ULL<<(n))-1) #define OCM_WIN_P3P(addr) (addr & 0xffc0000) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index cbe2399c3..4bb33af8e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -109,6 +109,7 @@ enum qlcnic_regs { #define QLCNIC_CMD_GET_LED_CONFIG 0x6A #define QLCNIC_CMD_83XX_SET_DRV_VER 0x6F #define QLCNIC_CMD_ADD_RCV_RINGS 0x0B +#define QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP 0x37 #define QLCNIC_INTRPT_INTX 1 #define QLCNIC_INTRPT_MSIX 3 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index dc7501265..6bb5211f9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -7,11 +7,6 @@ #include #include - -#include "qlcnic.h" -#include "qlcnic_sriov.h" -#include "qlcnic_hw.h" - #include #include #include @@ -25,6 +20,10 @@ #include #endif +#include "qlcnic.h" +#include "qlcnic_sriov.h" +#include "qlcnic_hw.h" + MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(QLCNIC_LINUX_VERSIONID); @@ -111,8 +110,9 @@ static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter) static const struct pci_device_id qlcnic_pci_tbl[] = { ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X), - ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830), ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X), + ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830), + ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE8C30), ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X), ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X), {0,} @@ -483,11 +483,17 @@ static void qlcnic_add_vxlan_port(struct net_device *netdev, /* Adapter supports only one VXLAN port. Use very first port * for enabling offload */ - if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port) + if (!qlcnic_encap_rx_offload(adapter)) + return; + if (!ahw->vxlan_port_count) { + ahw->vxlan_port_count = 1; + ahw->vxlan_port = ntohs(port); + adapter->flags |= QLCNIC_ADD_VXLAN_PORT; return; + } + if (ahw->vxlan_port == ntohs(port)) + ahw->vxlan_port_count++; - ahw->vxlan_port = ntohs(port); - adapter->flags |= QLCNIC_ADD_VXLAN_PORT; } static void qlcnic_del_vxlan_port(struct net_device *netdev, @@ -496,11 +502,13 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev, struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; - if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port || + if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count || (ahw->vxlan_port != ntohs(port))) return; - adapter->flags |= QLCNIC_DEL_VXLAN_PORT; + ahw->vxlan_port_count--; + if (!ahw->vxlan_port_count) + adapter->flags |= QLCNIC_DEL_VXLAN_PORT; } static netdev_features_t qlcnic_features_check(struct sk_buff *skb, @@ -1149,6 +1157,7 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar) case PCI_DEVICE_ID_QLOGIC_QLE844X: case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: case PCI_DEVICE_ID_QLOGIC_VF_QLE844X: + case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30: *bar = QLCNIC_83XX_BAR0_LENGTH; break; default: @@ -2403,7 +2412,6 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, qlcnic_free_tx_rings(adapter); return -ENOMEM; } - memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); tx_ring->cmd_buf_arr = cmd_buf_arr; spin_lock_init(&tx_ring->tx_clean_lock); } @@ -2492,6 +2500,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) qlcnic_83xx_register_map(ahw); break; case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: + case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30: case PCI_DEVICE_ID_QLOGIC_VF_QLE844X: qlcnic_sriov_vf_register_map(ahw); break; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index 332bb8a3f..cda9e604a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -5,13 +5,13 @@ * See LICENSE.qlcnic for copyright and licensing details. */ +#include + #include "qlcnic.h" #include "qlcnic_hdr.h" #include "qlcnic_83xx_hw.h" #include "qlcnic_hw.h" -#include - #define QLC_83XX_MINIDUMP_FLASH 0x520000 #define QLC_83XX_OCM_INDEX 3 #define QLC_83XX_PCI_INDEX 0 @@ -1388,27 +1388,60 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) fw_dump->clr = 1; snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name); netdev_info(adapter->netdev, - "Dump data %d bytes captured, template header size %d bytes\n", - fw_dump->size, fw_dump->tmpl_hdr_size); + "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n", + fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size, + fw_dump->tmpl_hdr); /* Send a udev event to notify availability of FW dump */ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg); return 0; } +static inline bool +qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter) +{ + /* For special adapters (with 0x8830 device ID), where iSCSI firmware + * dump needs to be captured as part of regular firmware dump + * collection process, firmware exports it's capability through + * capability registers + */ + return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) && + (adapter->ahw->extra_capability[0] & + QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP)); +} + void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) { u32 prev_version, current_version; struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump; struct pci_dev *pdev = adapter->pdev; + bool extended = false; prev_version = adapter->fw_version; current_version = qlcnic_83xx_get_fw_version(adapter); if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) { vfree(fw_dump->tmpl_hdr); + + if (qlcnic_83xx_md_check_extended_dump_capability(adapter)) + extended = !qlcnic_83xx_extend_md_capab(adapter); + if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) dev_info(&pdev->dev, "Supports FW dump capability\n"); + + /* Once we have minidump template with extended iSCSI dump + * capability, update the minidump capture mask to 0x1f as + * per FW requirement + */ + if (extended) { + struct qlcnic_83xx_dump_template_hdr *hdr; + + hdr = fw_dump->tmpl_hdr; + hdr->drv_cap_mask = 0x1f; + fw_dump->cap_mask = 0x1f; + dev_info(&pdev->dev, + "Extended iSCSI dump capability and updated capture mask to 0x1f\n"); + } } } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h index 4677b2edc..017d8c2c8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h @@ -8,10 +8,11 @@ #ifndef _QLCNIC_83XX_SRIOV_H_ #define _QLCNIC_83XX_SRIOV_H_ -#include "qlcnic.h" #include #include +#include "qlcnic.h" + extern const u32 qlcnic_83xx_reg_tbl[]; extern const u32 qlcnic_83xx_ext_reg_tbl[]; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index e6312465f..7327b729b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -5,10 +5,11 @@ * See LICENSE.qlcnic for copyright and licensing details. */ +#include + #include "qlcnic_sriov.h" #include "qlcnic.h" #include "qlcnic_83xx_hw.h" -#include #define QLC_BC_COMMAND 0 #define QLC_BC_RESPONSE 1 @@ -728,8 +729,6 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type) mbx->req.arg = NULL; return -ENOMEM; } - memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num); - memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); mbx->req.arg[0] = (type | (mbx->req.num << 16) | (3 << 29)); mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index a29538b86..afd687e5e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -5,9 +5,10 @@ * See LICENSE.qlcnic for copyright and licensing details. */ +#include + #include "qlcnic_sriov.h" #include "qlcnic.h" -#include #define QLCNIC_SRIOV_VF_MAX_MAC 7 #define QLC_VF_MIN_TX_RATE 100 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 05c28f2c6..ccbb04503 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -7,10 +7,6 @@ #include #include - -#include "qlcnic.h" -#include "qlcnic_hw.h" - #include #include #include @@ -24,6 +20,9 @@ #include #endif +#include "qlcnic.h" +#include "qlcnic_hw.h" + int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) { return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index d79e33b3c..686334f45 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -157,6 +157,7 @@ enum { NWayAdvert = 0x66, /* MII ADVERTISE */ NWayLPAR = 0x68, /* MII LPA */ NWayExpansion = 0x6A, /* MII Expansion */ + TxDmaOkLowDesc = 0x82, /* Low 16 bit address of a Tx descriptor. */ Config5 = 0xD8, /* Config5 */ TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */ RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */ @@ -341,6 +342,7 @@ struct cp_private { unsigned tx_tail; struct cp_desc *tx_ring; struct sk_buff *tx_skb[CP_TX_RING_SIZE]; + u32 tx_opts[CP_TX_RING_SIZE]; unsigned rx_buf_sz; unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ @@ -665,7 +667,7 @@ static void cp_tx (struct cp_private *cp) BUG_ON(!skb); dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), - le32_to_cpu(txd->opts1) & 0xffff, + cp->tx_opts[tx_tail] & 0xffff, PCI_DMA_TODEVICE); if (status & LastFrag) { @@ -733,7 +735,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, { struct cp_private *cp = netdev_priv(dev); unsigned entry; - u32 eor, flags; + u32 eor, opts1; unsigned long intr_flags; __le32 opts2; int mss = 0; @@ -753,6 +755,21 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, mss = skb_shinfo(skb)->gso_size; opts2 = cpu_to_le32(cp_tx_vlan_tag(skb)); + opts1 = DescOwn; + if (mss) + opts1 |= LargeSend | ((mss & MSSMask) << MSSShift); + else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + if (ip->protocol == IPPROTO_TCP) + opts1 |= IPCS | TCPCS; + else if (ip->protocol == IPPROTO_UDP) + opts1 |= IPCS | UDPCS; + else { + WARN_ONCE(1, + "Net bug: asked to checksum invalid Legacy IP packet\n"); + goto out_dma_error; + } + } if (skb_shinfo(skb)->nr_frags == 0) { struct cp_desc *txd = &cp->tx_ring[entry]; @@ -768,31 +785,20 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, txd->addr = cpu_to_le64(mapping); wmb(); - flags = eor | len | DescOwn | FirstFrag | LastFrag; - - if (mss) - flags |= LargeSend | ((mss & MSSMask) << MSSShift); - else if (skb->ip_summed == CHECKSUM_PARTIAL) { - const struct iphdr *ip = ip_hdr(skb); - if (ip->protocol == IPPROTO_TCP) - flags |= IPCS | TCPCS; - else if (ip->protocol == IPPROTO_UDP) - flags |= IPCS | UDPCS; - else - WARN_ON(1); /* we need a WARN() */ - } + opts1 |= eor | len | FirstFrag | LastFrag; - txd->opts1 = cpu_to_le32(flags); + txd->opts1 = cpu_to_le32(opts1); wmb(); cp->tx_skb[entry] = skb; - entry = NEXT_TX(entry); + cp->tx_opts[entry] = opts1; + netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", + entry, skb->len); } else { struct cp_desc *txd; - u32 first_len, first_eor; + u32 first_len, first_eor, ctrl; dma_addr_t first_mapping; int frag, first_entry = entry; - const struct iphdr *ip = ip_hdr(skb); /* We must give this initial chunk to the device last. * Otherwise we could race with the device. @@ -805,14 +811,14 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, goto out_dma_error; cp->tx_skb[entry] = skb; - entry = NEXT_TX(entry); for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; u32 len; - u32 ctrl; dma_addr_t mapping; + entry = NEXT_TX(entry); + len = skb_frag_size(this_frag); mapping = dma_map_single(&cp->pdev->dev, skb_frag_address(this_frag), @@ -824,19 +830,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; - ctrl = eor | len | DescOwn; - - if (mss) - ctrl |= LargeSend | - ((mss & MSSMask) << MSSShift); - else if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (ip->protocol == IPPROTO_TCP) - ctrl |= IPCS | TCPCS; - else if (ip->protocol == IPPROTO_UDP) - ctrl |= IPCS | UDPCS; - else - BUG(); - } + ctrl = opts1 | eor | len; if (frag == skb_shinfo(skb)->nr_frags - 1) ctrl |= LastFrag; @@ -849,8 +843,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, txd->opts1 = cpu_to_le32(ctrl); wmb(); + cp->tx_opts[entry] = ctrl; cp->tx_skb[entry] = skb; - entry = NEXT_TX(entry); } txd = &cp->tx_ring[first_entry]; @@ -858,27 +852,17 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, txd->addr = cpu_to_le64(first_mapping); wmb(); - if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (ip->protocol == IPPROTO_TCP) - txd->opts1 = cpu_to_le32(first_eor | first_len | - FirstFrag | DescOwn | - IPCS | TCPCS); - else if (ip->protocol == IPPROTO_UDP) - txd->opts1 = cpu_to_le32(first_eor | first_len | - FirstFrag | DescOwn | - IPCS | UDPCS); - else - BUG(); - } else - txd->opts1 = cpu_to_le32(first_eor | first_len | - FirstFrag | DescOwn); + ctrl = opts1 | first_eor | first_len | FirstFrag; + txd->opts1 = cpu_to_le32(ctrl); wmb(); + + cp->tx_opts[first_entry] = ctrl; + netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n", + first_entry, entry, skb->len); } - cp->tx_head = entry; + cp->tx_head = NEXT_TX(entry); netdev_sent_queue(dev, skb->len); - netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", - entry, skb->len); if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) netif_stop_queue(dev); @@ -1115,6 +1099,7 @@ static int cp_init_rings (struct cp_private *cp) { memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); + memset(cp->tx_opts, 0, sizeof(cp->tx_opts)); cp_init_rings_index(cp); @@ -1151,7 +1136,7 @@ static void cp_clean_rings (struct cp_private *cp) desc = cp->rx_ring + i; dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), cp->rx_buf_sz, PCI_DMA_FROMDEVICE); - dev_kfree_skb(cp->rx_skb[i]); + dev_kfree_skb_any(cp->rx_skb[i]); } } @@ -1164,7 +1149,7 @@ static void cp_clean_rings (struct cp_private *cp) le32_to_cpu(desc->opts1) & 0xffff, PCI_DMA_TODEVICE); if (le32_to_cpu(desc->opts1) & LastFrag) - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); cp->dev->stats.tx_dropped++; } } @@ -1172,6 +1157,7 @@ static void cp_clean_rings (struct cp_private *cp) memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); + memset(cp->tx_opts, 0, sizeof(cp->tx_opts)); memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); @@ -1249,7 +1235,7 @@ static void cp_tx_timeout(struct net_device *dev) { struct cp_private *cp = netdev_priv(dev); unsigned long flags; - int rc; + int rc, i; netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n", cpr8(Cmd), cpr16(CpCmd), @@ -1257,13 +1243,26 @@ static void cp_tx_timeout(struct net_device *dev) spin_lock_irqsave(&cp->lock, flags); + netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n", + cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc)); + for (i = 0; i < CP_TX_RING_SIZE; i++) { + netif_dbg(cp, tx_err, cp->dev, + "TX slot %d @%p: %08x (%08x) %08x %llx %p\n", + i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1), + cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2), + le64_to_cpu(cp->tx_ring[i].addr), + cp->tx_skb[i]); + } + cp_stop_hw(cp); cp_clean_rings(cp); rc = cp_init_rings(cp); cp_start_hw(cp); - cp_enable_irq(cp); + __cp_set_rx_mode(dev); + cpw16_f(IntrMask, cp_norx_intr_mask); netif_wake_queue(dev); + napi_schedule_irqoff(&cp->napi); spin_unlock_irqrestore(&cp->lock, flags); } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 70335f229..426a411a7 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -637,6 +637,9 @@ enum rtl_register_content { /* _TBICSRBit */ TBILinkOK = 0x02000000, + /* ResetCounterCommand */ + CounterReset = 0x1, + /* DumpCounterCommand */ CounterDump = 0x8, @@ -747,6 +750,13 @@ struct rtl8169_counters { __le16 tx_underun; }; +struct rtl8169_tc_offsets { + bool inited; + __le64 tx_errors; + __le32 tx_multi_collision; + __le16 tx_aborted; +}; + enum rtl_flag { RTL_FLAG_TASK_ENABLED, RTL_FLAG_TASK_SLOW_PENDING, @@ -823,7 +833,9 @@ struct rtl8169_private { unsigned features; struct mii_if_info mii; - struct rtl8169_counters counters; + dma_addr_t counters_phys_addr; + struct rtl8169_counters *counters; + struct rtl8169_tc_offsets tc_offset; u32 saved_wolopts; u32 opts1_mask; @@ -2165,65 +2177,121 @@ DECLARE_RTL_COND(rtl_counters_cond) { void __iomem *ioaddr = tp->mmio_addr; - return RTL_R32(CounterAddrLow) & CounterDump; + return RTL_R32(CounterAddrLow) & (CounterReset | CounterDump); } -static void rtl8169_update_counters(struct net_device *dev) +static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; - struct device *d = &tp->pci_dev->dev; - struct rtl8169_counters *counters; - dma_addr_t paddr; + dma_addr_t paddr = tp->counters_phys_addr; u32 cmd; + bool ret; + + RTL_W32(CounterAddrHigh, (u64)paddr >> 32); + cmd = (u64)paddr & DMA_BIT_MASK(32); + RTL_W32(CounterAddrLow, cmd); + RTL_W32(CounterAddrLow, cmd | counter_cmd); + + ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); + + RTL_W32(CounterAddrLow, 0); + RTL_W32(CounterAddrHigh, 0); + + return ret; +} + +static bool rtl8169_reset_counters(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + /* + * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the + * tally counters. + */ + if (tp->mac_version < RTL_GIGA_MAC_VER_19) + return true; + + return rtl8169_do_counters(dev, CounterReset); +} + +static bool rtl8169_update_counters(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; /* * Some chips are unable to dump tally counters when the receiver * is disabled. */ if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0) - return; + return true; - counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL); - if (!counters) - return; + return rtl8169_do_counters(dev, CounterDump); +} - RTL_W32(CounterAddrHigh, (u64)paddr >> 32); - cmd = (u64)paddr & DMA_BIT_MASK(32); - RTL_W32(CounterAddrLow, cmd); - RTL_W32(CounterAddrLow, cmd | CounterDump); +static bool rtl8169_init_counter_offsets(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_counters *counters = tp->counters; + bool ret = false; - if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000)) - memcpy(&tp->counters, counters, sizeof(*counters)); + /* + * rtl8169_init_counter_offsets is called from rtl_open. On chip + * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only + * reset by a power cycle, while the counter values collected by the + * driver are reset at every driver unload/load cycle. + * + * To make sure the HW values returned by @get_stats64 match the SW + * values, we collect the initial values at first open(*) and use them + * as offsets to normalize the values returned by @get_stats64. + * + * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one + * for the reason stated in rtl8169_update_counters; CmdRxEnb is only + * set at open time by rtl_hw_start. + */ - RTL_W32(CounterAddrLow, 0); - RTL_W32(CounterAddrHigh, 0); + if (tp->tc_offset.inited) + return true; + + /* If both, reset and update fail, propagate to caller. */ + if (rtl8169_reset_counters(dev)) + ret = true; + + if (rtl8169_update_counters(dev)) + ret = true; + + tp->tc_offset.tx_errors = counters->tx_errors; + tp->tc_offset.tx_multi_collision = counters->tx_multi_collision; + tp->tc_offset.tx_aborted = counters->tx_aborted; + tp->tc_offset.inited = true; - dma_free_coherent(d, sizeof(*counters), counters, paddr); + return ret; } static void rtl8169_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_counters *counters = tp->counters; ASSERT_RTNL(); rtl8169_update_counters(dev); - data[0] = le64_to_cpu(tp->counters.tx_packets); - data[1] = le64_to_cpu(tp->counters.rx_packets); - data[2] = le64_to_cpu(tp->counters.tx_errors); - data[3] = le32_to_cpu(tp->counters.rx_errors); - data[4] = le16_to_cpu(tp->counters.rx_missed); - data[5] = le16_to_cpu(tp->counters.align_errors); - data[6] = le32_to_cpu(tp->counters.tx_one_collision); - data[7] = le32_to_cpu(tp->counters.tx_multi_collision); - data[8] = le64_to_cpu(tp->counters.rx_unicast); - data[9] = le64_to_cpu(tp->counters.rx_broadcast); - data[10] = le32_to_cpu(tp->counters.rx_multicast); - data[11] = le16_to_cpu(tp->counters.tx_aborted); - data[12] = le16_to_cpu(tp->counters.tx_underun); + data[0] = le64_to_cpu(counters->tx_packets); + data[1] = le64_to_cpu(counters->rx_packets); + data[2] = le64_to_cpu(counters->tx_errors); + data[3] = le32_to_cpu(counters->rx_errors); + data[4] = le16_to_cpu(counters->rx_missed); + data[5] = le16_to_cpu(counters->align_errors); + data[6] = le32_to_cpu(counters->tx_one_collision); + data[7] = le32_to_cpu(counters->tx_multi_collision); + data[8] = le64_to_cpu(counters->rx_unicast); + data[9] = le64_to_cpu(counters->rx_broadcast); + data[10] = le32_to_cpu(counters->rx_multicast); + data[11] = le16_to_cpu(counters->tx_aborted); + data[12] = le16_to_cpu(counters->tx_underun); } static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -5995,7 +6063,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; - u16 rg_saw_cnt; + int rg_saw_cnt; u32 data; static const struct ephy_info e_info_8168h_1[] = { { 0x1e, 0x0800, 0x0001 }, @@ -7349,6 +7417,9 @@ process_pkt: tp->rx_stats.packets++; tp->rx_stats.bytes += pkt_size; u64_stats_update_end(&tp->rx_stats.syncp); + + if (skb->pkt_type == PACKET_MULTICAST) + dev->stats.multicast++; } release_descriptor: desc->opts2 = 0; @@ -7613,6 +7684,9 @@ static int rtl_open(struct net_device *dev) rtl_hw_start(dev); + if (!rtl8169_init_counter_offsets(dev)) + netif_warn(tp, hw, dev, "counter reset/update failed\n"); + netif_start_queue(dev); rtl_unlock_work(tp); @@ -7645,6 +7719,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; + struct rtl8169_counters *counters = tp->counters; unsigned int start; if (netif_running(dev)) @@ -7656,7 +7731,6 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_bytes = tp->rx_stats.bytes; } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); - do { start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); stats->tx_packets = tp->tx_stats.packets; @@ -7670,6 +7744,24 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_crc_errors = dev->stats.rx_crc_errors; stats->rx_fifo_errors = dev->stats.rx_fifo_errors; stats->rx_missed_errors = dev->stats.rx_missed_errors; + stats->multicast = dev->stats.multicast; + + /* + * Fetch additonal counter values missing in stats collected by driver + * from tally counters. + */ + rtl8169_update_counters(dev); + + /* + * Subtract values fetched during initalization. + * See rtl8169_init_counter_offsets for a description why we do that. + */ + stats->tx_errors = le64_to_cpu(counters->tx_errors) - + le64_to_cpu(tp->tc_offset.tx_errors); + stats->collisions = le32_to_cpu(counters->tx_multi_collision) - + le32_to_cpu(tp->tc_offset.tx_multi_collision); + stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) - + le16_to_cpu(tp->tc_offset.tx_aborted); return stats; } @@ -7870,6 +7962,9 @@ static void rtl_remove_one(struct pci_dev *pdev) unregister_netdev(dev); + dma_free_coherent(&tp->pci_dev->dev, sizeof(*tp->counters), + tp->counters, tp->counters_phys_addr); + rtl_release_firmware(tp); if (pci_dev_run_wake(pdev)) @@ -8295,9 +8390,16 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; + tp->counters = dma_alloc_coherent (&pdev->dev, sizeof(*tp->counters), + &tp->counters_phys_addr, GFP_KERNEL); + if (!tp->counters) { + rc = -ENOMEM; + goto err_out_msi_4; + } + rc = register_netdev(dev); if (rc < 0) - goto err_out_msi_4; + goto err_out_cnt_5; pci_set_drvdata(pdev, dev); @@ -8331,6 +8433,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) out: return rc; +err_out_cnt_5: + dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters, + tp->counters_phys_addr); err_out_msi_4: netif_napi_del(&tp->napi); rtl_disable_msi(pdev, tp); diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 8aa50ac4e..a157aaaaf 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -658,6 +658,8 @@ struct ravb_desc { __le32 dptr; /* Descriptor pointer */ }; +#define DPTR_ALIGN 4 /* Required descriptor pointer alignment */ + enum DIE_DT { /* Frame data */ DT_FMID = 0x40, @@ -739,6 +741,7 @@ enum RAVB_QUEUE { #define RX_QUEUE_OFFSET 4 #define NUM_RX_QUEUE 2 #define NUM_TX_QUEUE 2 +#define NUM_TX_DESC 2 /* TX descriptors per packet */ struct ravb_tstamp_skb { struct list_head list; @@ -777,9 +780,9 @@ struct ravb_private { dma_addr_t tx_desc_dma[NUM_TX_QUEUE]; struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE]; struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE]; + void *tx_align[NUM_TX_QUEUE]; struct sk_buff **rx_skb[NUM_RX_QUEUE]; struct sk_buff **tx_skb[NUM_TX_QUEUE]; - void **tx_buffers[NUM_TX_QUEUE]; u32 rx_over_errors; u32 rx_fifo_errors; struct net_device_stats stats[NUM_RX_QUEUE]; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 78849dd4e..450899e9c 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -195,12 +195,8 @@ static void ravb_ring_free(struct net_device *ndev, int q) priv->tx_skb[q] = NULL; /* Free aligned TX buffers */ - if (priv->tx_buffers[q]) { - for (i = 0; i < priv->num_tx_ring[q]; i++) - kfree(priv->tx_buffers[q][i]); - } - kfree(priv->tx_buffers[q]); - priv->tx_buffers[q] = NULL; + kfree(priv->tx_align[q]); + priv->tx_align[q] = NULL; if (priv->rx_ring[q]) { ring_size = sizeof(struct ravb_ex_rx_desc) * @@ -212,7 +208,7 @@ static void ravb_ring_free(struct net_device *ndev, int q) if (priv->tx_ring[q]) { ring_size = sizeof(struct ravb_tx_desc) * - (priv->num_tx_ring[q] + 1); + (priv->num_tx_ring[q] * NUM_TX_DESC + 1); dma_free_coherent(NULL, ring_size, priv->tx_ring[q], priv->tx_desc_dma[q]); priv->tx_ring[q] = NULL; @@ -223,11 +219,12 @@ static void ravb_ring_free(struct net_device *ndev, int q) static void ravb_ring_format(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); - struct ravb_ex_rx_desc *rx_desc = NULL; - struct ravb_tx_desc *tx_desc = NULL; - struct ravb_desc *desc = NULL; + struct ravb_ex_rx_desc *rx_desc; + struct ravb_tx_desc *tx_desc; + struct ravb_desc *desc; int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; - int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; + int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * + NUM_TX_DESC; dma_addr_t dma_addr; int i; @@ -260,11 +257,12 @@ static void ravb_ring_format(struct net_device *ndev, int q) memset(priv->tx_ring[q], 0, tx_ring_size); /* Build TX ring buffer */ - for (i = 0; i < priv->num_tx_ring[q]; i++) { - tx_desc = &priv->tx_ring[q][i]; + for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; + i++, tx_desc++) { + tx_desc->die_dt = DT_EEMPTY; + tx_desc++; tx_desc->die_dt = DT_EEMPTY; } - tx_desc = &priv->tx_ring[q][i]; tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); tx_desc->die_dt = DT_LINKFIX; /* type */ @@ -285,7 +283,6 @@ static int ravb_ring_init(struct net_device *ndev, int q) struct ravb_private *priv = netdev_priv(ndev); struct sk_buff *skb; int ring_size; - void *buffer; int i; /* Allocate RX and TX skb rings */ @@ -305,19 +302,11 @@ static int ravb_ring_init(struct net_device *ndev, int q) } /* Allocate rings for the aligned buffers */ - priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], - sizeof(*priv->tx_buffers[q]), GFP_KERNEL); - if (!priv->tx_buffers[q]) + priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + + DPTR_ALIGN - 1, GFP_KERNEL); + if (!priv->tx_align[q]) goto error; - for (i = 0; i < priv->num_tx_ring[q]; i++) { - buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL); - if (!buffer) - goto error; - /* Aligned TX buffer */ - priv->tx_buffers[q][i] = buffer; - } - /* Allocate all RX descriptors. */ ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size, @@ -329,7 +318,8 @@ static int ravb_ring_init(struct net_device *ndev, int q) priv->dirty_rx[q] = 0; /* Allocate all TX descriptors. */ - ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] + 1); + ring_size = sizeof(struct ravb_tx_desc) * + (priv->num_tx_ring[q] * NUM_TX_DESC + 1); priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size, &priv->tx_desc_dma[q], GFP_KERNEL); @@ -439,11 +429,12 @@ static int ravb_tx_free(struct net_device *ndev, int q) struct net_device_stats *stats = &priv->stats[q]; struct ravb_tx_desc *desc; int free_num = 0; - int entry = 0; + int entry; u32 size; for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { - entry = priv->dirty_tx[q] % priv->num_tx_ring[q]; + entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * + NUM_TX_DESC); desc = &priv->tx_ring[q][entry]; if (desc->die_dt != DT_FEMPTY) break; @@ -451,14 +442,18 @@ static int ravb_tx_free(struct net_device *ndev, int q) dma_rmb(); size = le16_to_cpu(desc->ds_tagl) & TX_DS; /* Free the original skb. */ - if (priv->tx_skb[q][entry]) { + if (priv->tx_skb[q][entry / NUM_TX_DESC]) { dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), size, DMA_TO_DEVICE); - dev_kfree_skb_any(priv->tx_skb[q][entry]); - priv->tx_skb[q][entry] = NULL; + /* Last packet descriptor? */ + if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { + entry /= NUM_TX_DESC; + dev_kfree_skb_any(priv->tx_skb[q][entry]); + priv->tx_skb[q][entry] = NULL; + stats->tx_packets++; + } free_num++; } - stats->tx_packets++; stats->tx_bytes += size; desc->die_dt = DT_EEMPTY; } @@ -512,8 +507,8 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) struct sk_buff *skb; dma_addr_t dma_addr; struct timespec64 ts; - u16 pkt_len = 0; u8 desc_status; + u16 pkt_len; int limit; boguscnt = min(boguscnt, *quota); @@ -1277,44 +1272,60 @@ static void ravb_tx_timeout_work(struct work_struct *work) static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); - struct ravb_tstamp_skb *ts_skb = NULL; u16 q = skb_get_queue_mapping(skb); + struct ravb_tstamp_skb *ts_skb; struct ravb_tx_desc *desc; unsigned long flags; u32 dma_addr; void *buffer; u32 entry; + u32 len; spin_lock_irqsave(&priv->lock, flags); - if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) { + if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * + NUM_TX_DESC) { netif_err(priv, tx_queued, ndev, "still transmitting with the full ring!\n"); netif_stop_subqueue(ndev, q); spin_unlock_irqrestore(&priv->lock, flags); return NETDEV_TX_BUSY; } - entry = priv->cur_tx[q] % priv->num_tx_ring[q]; - priv->tx_skb[q][entry] = skb; + entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC); + priv->tx_skb[q][entry / NUM_TX_DESC] = skb; if (skb_put_padto(skb, ETH_ZLEN)) goto drop; - buffer = PTR_ALIGN(priv->tx_buffers[q][entry], RAVB_ALIGN); - memcpy(buffer, skb->data, skb->len); - desc = &priv->tx_ring[q][entry]; - desc->ds_tagl = cpu_to_le16(skb->len); - dma_addr = dma_map_single(&ndev->dev, buffer, skb->len, DMA_TO_DEVICE); + buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + + entry / NUM_TX_DESC * DPTR_ALIGN; + len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; + memcpy(buffer, skb->data, len); + dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE); if (dma_mapping_error(&ndev->dev, dma_addr)) goto drop; + + desc = &priv->tx_ring[q][entry]; + desc->ds_tagl = cpu_to_le16(len); + desc->dptr = cpu_to_le32(dma_addr); + + buffer = skb->data + len; + len = skb->len - len; + dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE); + if (dma_mapping_error(&ndev->dev, dma_addr)) + goto unmap; + + desc++; + desc->ds_tagl = cpu_to_le16(len); desc->dptr = cpu_to_le32(dma_addr); /* TX timestamp required */ if (q == RAVB_NC) { ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); if (!ts_skb) { - dma_unmap_single(&ndev->dev, dma_addr, skb->len, + desc--; + dma_unmap_single(&ndev->dev, dma_addr, len, DMA_TO_DEVICE); - goto drop; + goto unmap; } ts_skb->skb = skb; ts_skb->tag = priv->ts_skb_tag++; @@ -1330,13 +1341,15 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* Descriptor type must be set after all the above writes */ dma_wmb(); - desc->die_dt = DT_FSINGLE; + desc->die_dt = DT_FEND; + desc--; + desc->die_dt = DT_FSTART; ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR); - priv->cur_tx[q]++; - if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] && - !ravb_tx_free(ndev, q)) + priv->cur_tx[q] += NUM_TX_DESC; + if (priv->cur_tx[q] - priv->dirty_tx[q] > + (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q)) netif_stop_subqueue(ndev, q); exit: @@ -1344,9 +1357,12 @@ exit: spin_unlock_irqrestore(&priv->lock, flags); return NETDEV_TX_OK; +unmap: + dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), + le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); drop: dev_kfree_skb_any(skb); - priv->tx_skb[q][entry] = NULL; + priv->tx_skb[q][entry / NUM_TX_DESC] = NULL; goto exit; } @@ -1643,7 +1659,7 @@ static int ravb_probe(struct platform_device *pdev) ndev->dma = -1; irq = platform_get_irq(pdev, 0); if (irq < 0) { - error = -ENODEV; + error = irq; goto out_release; } ndev->irq = irq; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 7fb244f56..a484d8beb 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1127,7 +1127,7 @@ static void sh_eth_ring_format(struct net_device *ndev) struct sh_eth_txdesc *txdesc = NULL; int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; - int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; + int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; dma_addr_t dma_addr; mdp->cur_rx = 0; @@ -1148,8 +1148,8 @@ static void sh_eth_ring_format(struct net_device *ndev) /* RX descriptor */ rxdesc = &mdp->rx_ring[i]; - /* The size of the buffer is a multiple of 16 bytes. */ - rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); + /* The size of the buffer is a multiple of 32 bytes. */ + rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32); dma_addr = dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, DMA_FROM_DEVICE); @@ -1450,7 +1450,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) struct sk_buff *skb; u16 pkt_len = 0; u32 desc_status; - int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; + int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; dma_addr_t dma_addr; boguscnt = min(boguscnt, *quota); @@ -1506,7 +1506,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) if (mdp->cd->rpadir) skb_reserve(skb, NET_IP_ALIGN); dma_unmap_single(&ndev->dev, rxdesc->addr, - ALIGN(mdp->rx_buf_sz, 16), + ALIGN(mdp->rx_buf_sz, 32), DMA_FROM_DEVICE); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, ndev); @@ -1524,8 +1524,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { entry = mdp->dirty_rx % mdp->num_rx_ring; rxdesc = &mdp->rx_ring[entry]; - /* The size of the buffer is 16 byte boundary. */ - rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); + /* The size of the buffer is 32 byte boundary. */ + rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32); if (mdp->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb(ndev, skbuff_size); @@ -3089,10 +3089,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev) ndev->dma = -1; ret = platform_get_irq(pdev, 0); - if (ret < 0) { - ret = -ENODEV; + if (ret < 0) goto out_release; - } ndev->irq = ret; SET_NETDEV_DEV(ndev, &pdev->dev); diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 2e7f9a283..34ac41ac9 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -202,6 +202,7 @@ enum { ROCKER_CTRL_IPV4_MCAST, ROCKER_CTRL_IPV6_MCAST, ROCKER_CTRL_DFLT_BRIDGING, + ROCKER_CTRL_DFLT_OVS, ROCKER_CTRL_MAX, }; @@ -323,7 +324,14 @@ static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port, static bool rocker_port_is_bridged(const struct rocker_port *rocker_port) { - return !!rocker_port->bridge_dev; + return rocker_port->bridge_dev && + netif_is_bridge_master(rocker_port->bridge_dev); +} + +static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port) +{ + return rocker_port->bridge_dev && + netif_is_ovs_master(rocker_port->bridge_dev); } #define ROCKER_OP_FLAG_REMOVE BIT(0) @@ -1817,6 +1825,30 @@ rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port, return 0; } +static int +rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + int mtu = *(int *)priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU, + mtu)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + static int rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, @@ -1874,6 +1906,14 @@ static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, macaddr, NULL, NULL); } +static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port, + int mtu) +{ + return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0, + rocker_cmd_set_port_settings_mtu_prep, + &mtu, NULL, NULL); +} + static int rocker_port_set_learning(struct rocker_port *rocker_port, enum switchdev_trans trans) { @@ -3243,6 +3283,12 @@ static struct rocker_ctrl { .bridge = true, .copy_to_cpu = true, }, + [ROCKER_CTRL_DFLT_OVS] = { + /* pass all pkts up to CPU */ + .eth_dst = zero_mac, + .eth_dst_mask = zero_mac, + .acl = true, + }, }; static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, @@ -3755,11 +3801,14 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port, break; case BR_STATE_LEARNING: case BR_STATE_FORWARDING: - want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; + if (!rocker_port_is_ovsed(rocker_port)) + want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; want[ROCKER_CTRL_IPV4_MCAST] = true; want[ROCKER_CTRL_IPV6_MCAST] = true; if (rocker_port_is_bridged(rocker_port)) want[ROCKER_CTRL_DFLT_BRIDGING] = true; + else if (rocker_port_is_ovsed(rocker_port)) + want[ROCKER_CTRL_DFLT_OVS] = true; else want[ROCKER_CTRL_LOCAL_ARP] = true; break; @@ -3983,7 +4032,8 @@ static int rocker_port_open(struct net_device *dev) napi_enable(&rocker_port->napi_tx); napi_enable(&rocker_port->napi_rx); - rocker_port_set_enable(rocker_port, true); + if (!dev->proto_down) + rocker_port_set_enable(rocker_port, true); netif_start_queue(dev); return 0; @@ -4102,8 +4152,11 @@ static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev) skb->data, skb_headlen(skb)); if (err) goto nest_cancel; - if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) - goto nest_cancel; + if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) { + err = skb_linearize(skb); + if (err) + goto unmap_frags; + } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; @@ -4152,6 +4205,34 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p) return 0; } +static int rocker_port_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int running = netif_running(dev); + int err; + +#define ROCKER_PORT_MIN_MTU 68 +#define ROCKER_PORT_MAX_MTU 9000 + + if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU) + return -EINVAL; + + if (running) + rocker_port_stop(dev); + + netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu); + dev->mtu = new_mtu; + + err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu); + if (err) + return err; + + if (running) + err = rocker_port_open(dev); + + return err; +} + static int rocker_port_get_phys_port_name(struct net_device *dev, char *buf, size_t len) { @@ -4167,11 +4248,33 @@ static int rocker_port_get_phys_port_name(struct net_device *dev, return err ? -EOPNOTSUPP : 0; } +static int rocker_port_change_proto_down(struct net_device *dev, + bool proto_down) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + if (rocker_port->dev->flags & IFF_UP) + rocker_port_set_enable(rocker_port, !proto_down); + rocker_port->dev->proto_down = proto_down; + return 0; +} + +static void rocker_port_neigh_destroy(struct neighbour *n) +{ + struct rocker_port *rocker_port = netdev_priv(n->dev); + int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT; + __be32 ip_addr = *(__be32 *)n->primary_key; + + rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE, + flags, ip_addr, n->ha); +} + static const struct net_device_ops rocker_port_netdev_ops = { .ndo_open = rocker_port_open, .ndo_stop = rocker_port_stop, .ndo_start_xmit = rocker_port_xmit, .ndo_set_mac_address = rocker_port_set_mac_address, + .ndo_change_mtu = rocker_port_change_mtu, .ndo_bridge_getlink = switchdev_port_bridge_getlink, .ndo_bridge_setlink = switchdev_port_bridge_setlink, .ndo_bridge_dellink = switchdev_port_bridge_dellink, @@ -4179,6 +4282,8 @@ static const struct net_device_ops rocker_port_netdev_ops = { .ndo_fdb_del = switchdev_port_fdb_del, .ndo_fdb_dump = switchdev_port_fdb_dump, .ndo_get_phys_port_name = rocker_port_get_phys_port_name, + .ndo_change_proto_down = rocker_port_change_proto_down, + .ndo_neigh_destroy = rocker_port_neigh_destroy, }; /******************** @@ -4445,6 +4550,7 @@ static int rocker_port_fdb_dump(const struct rocker_port *rocker_port, if (found->key.pport != rocker_port->pport) continue; fdb->addr = found->key.addr; + fdb->ndm_state = NUD_REACHABLE; fdb->vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id); err = obj->cb(rocker_port->dev, obj); @@ -4726,6 +4832,7 @@ static int rocker_port_rx_proc(const struct rocker *rocker, const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); size_t rx_len; + u16 rx_flags = 0; if (!skb) return -ENOENT; @@ -4733,6 +4840,8 @@ static int rocker_port_rx_proc(const struct rocker *rocker, rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); if (!attrs[ROCKER_TLV_RX_FRAG_LEN]) return -EINVAL; + if (attrs[ROCKER_TLV_RX_FLAGS]) + rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]); rocker_dma_rx_ring_skb_unmap(rocker, attrs); @@ -4740,6 +4849,9 @@ static int rocker_port_rx_proc(const struct rocker *rocker, skb_put(skb, rx_len); skb->protocol = eth_type_trans(skb, rocker_port->dev); + if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD) + skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark; + rocker_port->dev->stats.rx_packets++; rocker_port->dev->stats.rx_bytes += skb->len; @@ -4869,7 +4981,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) NAPI_POLL_WEIGHT); rocker_carrier_init(rocker_port); - dev->features |= NETIF_F_NETNS_LOCAL; + dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG; err = register_netdev(dev); if (err) { @@ -4878,11 +4990,13 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) } rocker->ports[port_number] = rocker_port; + switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false); + rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE); err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0); if (err) { - dev_err(&pdev->dev, "install ig port table failed\n"); + netdev_err(rocker_port->dev, "install ig port table failed\n"); goto err_port_ig_tbl; } @@ -4902,6 +5016,7 @@ err_untagged_vlan: rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, ROCKER_OP_FLAG_REMOVE); err_port_ig_tbl: + rocker->ports[port_number] = NULL; unregister_netdev(dev); err_register_netdev: free_netdev(dev); @@ -5074,7 +5189,8 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_probe_ports; } - dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id); + dev_info(&pdev->dev, "Rocker switch with id %*phN\n", + (int)sizeof(rocker->hw.id), &rocker->hw.id); return 0; @@ -5157,6 +5273,7 @@ static int rocker_port_bridge_join(struct rocker_port *rocker_port, rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex); rocker_port->bridge_dev = bridge; + switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true); return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE, untagged_vid, 0); @@ -5177,6 +5294,8 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port) rocker_port_internal_vlan_id_get(rocker_port, rocker_port->dev->ifindex); + switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev, + false); rocker_port->bridge_dev = NULL; err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE, @@ -5191,46 +5310,77 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port) return err; } -static int rocker_port_master_changed(struct net_device *dev) + +static int rocker_port_ovs_changed(struct rocker_port *rocker_port, + struct net_device *master) +{ + int err; + + rocker_port->bridge_dev = master; + + err = rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE, 0); + if (err) + return err; + err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0); + + return err; +} + +static int rocker_port_master_linked(struct rocker_port *rocker_port, + struct net_device *master) { - struct rocker_port *rocker_port = netdev_priv(dev); - struct net_device *master = netdev_master_upper_dev_get(dev); int err = 0; - /* There are currently three cases handled here: - * 1. Joining a bridge - * 2. Leaving a previously joined bridge - * 3. Other, e.g. being added to or removed from a bond or openvswitch, - * in which case nothing is done - */ - if (master && master->rtnl_link_ops && - !strcmp(master->rtnl_link_ops->kind, "bridge")) + if (netif_is_bridge_master(master)) err = rocker_port_bridge_join(rocker_port, master); - else if (rocker_port_is_bridged(rocker_port)) - err = rocker_port_bridge_leave(rocker_port); + else if (netif_is_ovs_master(master)) + err = rocker_port_ovs_changed(rocker_port, master); + return err; +} +static int rocker_port_master_unlinked(struct rocker_port *rocker_port) +{ + int err = 0; + + if (rocker_port_is_bridged(rocker_port)) + err = rocker_port_bridge_leave(rocker_port); + else if (rocker_port_is_ovsed(rocker_port)) + err = rocker_port_ovs_changed(rocker_port, NULL); return err; } static int rocker_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info; + struct rocker_port *rocker_port; int err; + if (!rocker_port_dev_check(dev)) + return NOTIFY_DONE; + switch (event) { case NETDEV_CHANGEUPPER: - dev = netdev_notifier_info_to_dev(ptr); - if (!rocker_port_dev_check(dev)) - return NOTIFY_DONE; - err = rocker_port_master_changed(dev); - if (err) - netdev_warn(dev, - "failed to reflect master change (err %d)\n", - err); + info = ptr; + if (!info->master) + goto out; + rocker_port = netdev_priv(dev); + if (info->linking) { + err = rocker_port_master_linked(rocker_port, + info->upper_dev); + if (err) + netdev_warn(dev, "failed to reflect master linked (err %d)\n", + err); + } else { + err = rocker_port_master_unlinked(rocker_port); + if (err) + netdev_warn(dev, "failed to reflect master unlinked (err %d)\n", + err); + } break; } - +out: return NOTIFY_DONE; } diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index c61fbf968..12490b2f6 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -159,6 +159,7 @@ enum { ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */ ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */ ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */ + ROCKER_TLV_CMD_PORT_SETTINGS_MTU, /* u16 */ __ROCKER_TLV_CMD_PORT_SETTINGS_MAX, ROCKER_TLV_CMD_PORT_SETTINGS_MAX = @@ -245,6 +246,7 @@ enum { #define ROCKER_RX_FLAGS_TCP BIT(5) #define ROCKER_RX_FLAGS_UDP BIT(6) #define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD BIT(7) +#define ROCKER_RX_FLAGS_FWD_OFFLOAD BIT(8) enum { ROCKER_TLV_TX_UNSPEC, diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index b1a4ea21c..286cc6b69 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -49,6 +49,12 @@ enum { */ #define HUNT_FILTER_TBL_ROWS 8192 +#define EFX_EF10_FILTER_ID_INVALID 0xffff +struct efx_ef10_dev_addr { + u8 addr[ETH_ALEN]; + u16 id; +}; + struct efx_ef10_filter_table { /* The RX match field masks supported by this fw & hw, in order of priority */ enum efx_filter_match_flags rx_match_flags[ @@ -69,13 +75,14 @@ struct efx_ef10_filter_table { /* Shadow of net_device address lists, guarded by mac_lock */ #define EFX_EF10_FILTER_DEV_UC_MAX 32 #define EFX_EF10_FILTER_DEV_MC_MAX 256 - struct { - u8 addr[ETH_ALEN]; - u16 id; - } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX], - dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; - int dev_uc_count; /* negative for PROMISC */ - int dev_mc_count; /* negative for PROMISC/ALLMULTI */ + struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX]; + struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; + int dev_uc_count; + int dev_mc_count; +/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */ + u16 ucdef_id; + u16 bcast_id; + u16 mcdef_id; }; /* An arbitrary search limit for the software hash table */ @@ -288,11 +295,11 @@ static int efx_ef10_probe(struct efx_nic *efx) /* We can have one VI for each 8K region. However, until we * use TX option descriptors we need two TX queues per channel. */ - efx->max_channels = - min_t(unsigned int, - EFX_MAX_CHANNELS, - efx_ef10_mem_map_size(efx) / - (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); + efx->max_channels = min_t(unsigned int, + EFX_MAX_CHANNELS, + efx_ef10_mem_map_size(efx) / + (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); + efx->max_tx_channels = efx->max_channels; if (WARN_ON(efx->max_channels == 0)) return -EIO; @@ -387,7 +394,7 @@ static int efx_ef10_probe(struct efx_nic *efx) * First try to enable it, then if we get EPERM, just * ask if it's already enabled */ - rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true); + rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL); if (rc == 0) { nic_data->workaround_35388 = true; } else if (rc == -EPERM) { @@ -817,11 +824,13 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; unsigned int uc_mem_map_size, wc_mem_map_size; - unsigned int min_vis, pio_write_vi_base, max_vis; + unsigned int min_vis = max(EFX_TXQ_TYPES, + efx_separate_tx_channels ? 2 : 1); + unsigned int channel_vis, pio_write_vi_base, max_vis; void __iomem *membase; int rc; - min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); + channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); #ifdef EFX_USE_PIO /* Try to allocate PIO buffers if wanted and if the full @@ -855,11 +864,11 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) * page size is >4K). So we may allocate some extra VIs just * for writing PIO buffers through. * - * The UC mapping contains (min_vis - 1) complete VIs and the + * The UC mapping contains (channel_vis - 1) complete VIs and the * first half of the next VI. Then the WC mapping begins with * the second half of this last VI. */ - uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE + + uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF); if (nic_data->n_piobufs) { /* pio_write_vi_base rounds down to give the number of complete @@ -874,7 +883,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) } else { pio_write_vi_base = 0; wc_mem_map_size = 0; - max_vis = min_vis; + max_vis = channel_vis; } /* In case the last attached driver failed to free VIs, do it now */ @@ -886,6 +895,23 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) if (rc != 0) return rc; + if (nic_data->n_allocated_vis < channel_vis) { + netif_info(efx, drv, efx->net_dev, + "Could not allocate enough VIs to satisfy RSS" + " requirements. Performance may not be optimal.\n"); + /* We didn't get the VIs to populate our channels. + * We could keep what we got but then we'd have more + * interrupts than we need. + * Instead calculate new max_channels and restart + */ + efx->max_channels = nic_data->n_allocated_vis; + efx->max_tx_channels = + nic_data->n_allocated_vis / EFX_TXQ_TYPES; + + efx_ef10_free_vis(efx); + return -EAGAIN; + } + /* If we didn't get enough VIs to map all the PIO buffers, free the * PIO buffers */ @@ -984,12 +1010,24 @@ static int efx_ef10_init_nic(struct efx_nic *efx) static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; +#ifdef CONFIG_SFC_SRIOV + unsigned int i; +#endif /* All our allocations have been reset */ nic_data->must_realloc_vis = true; nic_data->must_restore_filters = true; nic_data->must_restore_piobufs = true; nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; + + /* Driver-created vswitches and vports must be re-created */ + nic_data->must_probe_vswitching = true; + nic_data->vport_id = EVB_PORT_ID_ASSIGNED; +#ifdef CONFIG_SFC_SRIOV + if (nic_data->vf) + for (i = 0; i < efx->vf_count; i++) + nic_data->vf[i].vport_id = 0; +#endif } static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason) @@ -1034,6 +1072,12 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) { int rc = efx_mcdi_reset(efx, reset_type); + /* Unprivileged functions return -EPERM, but need to return success + * here so that the datapath is brought back up. + */ + if (reset_type == RESET_TYPE_WORLD && rc == -EPERM) + rc = 0; + /* If it was a port reset, trigger reallocation of MC resources. * Note that on an MC reset nothing needs to be done now because we'll * detect the MC reset later and handle it then. @@ -1583,10 +1627,6 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) /* All our allocations have been reset */ efx_ef10_reset_mc_allocations(efx); - /* Driver-created vswitches and vports must be re-created */ - nic_data->must_probe_vswitching = true; - nic_data->vport_id = EVB_PORT_ID_ASSIGNED; - /* The datapath firmware might have been changed */ nic_data->must_check_datapath_caps = true; @@ -1809,7 +1849,9 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) unsigned int write_ptr; efx_qword_t *txd; - BUG_ON(tx_queue->write_count == tx_queue->insert_count); + tx_queue->xmit_more_available = false; + if (unlikely(tx_queue->write_count == tx_queue->insert_count)) + return; do { write_ptr = tx_queue->write_count & tx_queue->ptr_mask; @@ -2222,6 +2264,29 @@ static int efx_ef10_ev_probe(struct efx_channel *channel) GFP_KERNEL); } +static void efx_ef10_ev_fini(struct efx_channel *channel) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); + struct efx_nic *efx = channel->efx; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + + if (rc && rc != -EALREADY) + goto fail; + + return; + +fail: + efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, + outbuf, outlen, rc); +} + static int efx_ef10_ev_init(struct efx_channel *channel) { MCDI_DECLARE_BUF(inbuf, @@ -2233,6 +2298,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel) struct efx_ef10_nic_data *nic_data; bool supports_rx_merge; size_t inlen, outlen; + unsigned int enabled, implemented; dma_addr_t dma_addr; int rc; int i; @@ -2273,30 +2339,52 @@ static int efx_ef10_ev_init(struct efx_channel *channel) rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, outbuf, sizeof(outbuf), &outlen); /* IRQ return is ignored */ - return rc; -} - -static void efx_ef10_ev_fini(struct efx_channel *channel) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); - MCDI_DECLARE_BUF_ERR(outbuf); - struct efx_nic *efx = channel->efx; - size_t outlen; - int rc; - - MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); - - rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); + if (channel->channel || rc) + return rc; - if (rc && rc != -EALREADY) + /* Successfully created event queue on channel 0 */ + rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); + if (rc == -ENOSYS) { + /* GET_WORKAROUNDS was implemented before the bug26807 + * workaround, thus the latter must be unavailable in this fw + */ + nic_data->workaround_26807 = false; + rc = 0; + } else if (rc) { goto fail; + } else { + nic_data->workaround_26807 = + !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807); + + if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 && + !nic_data->workaround_26807) { + unsigned int flags; + + rc = efx_mcdi_set_workaround(efx, + MC_CMD_WORKAROUND_BUG26807, + true, &flags); + + if (!rc) { + if (flags & + 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) { + netif_info(efx, drv, efx->net_dev, + "other functions on NIC have been reset\n"); + /* MC's boot count has incremented */ + ++nic_data->warm_boot_count; + } + nic_data->workaround_26807 = true; + } else if (rc == -EPERM) { + rc = 0; + } + } + } - return; + if (!rc) + return 0; fail: - efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, - outbuf, outlen, rc); + efx_ef10_ev_fini(channel); + return rc; } static void efx_ef10_ev_remove(struct efx_channel *channel) @@ -3250,6 +3338,19 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx, filter_id, false); } +static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id) +{ + return filter_id % HUNT_FILTER_TBL_ROWS; +} + +static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) +{ + return efx_ef10_filter_remove_internal(efx, 1U << priority, + filter_id, true); +} + static int efx_ef10_filter_get_safe(struct efx_nic *efx, enum efx_filter_priority priority, u32 filter_id, struct efx_filter_spec *spec) @@ -3623,6 +3724,10 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx) goto fail; } + table->ucdef_id = EFX_EF10_FILTER_ID_INVALID; + table->bcast_id = EFX_EF10_FILTER_ID_INVALID; + table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; + efx->filter_state = table; init_waitqueue_head(&table->waitq); return 0; @@ -3725,145 +3830,233 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx) kfree(table); } -/* Caller must hold efx->filter_sem for read if race against - * efx_ef10_filter_table_remove() is possible - */ -static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) +#define EFX_EF10_FILTER_DO_MARK_OLD(id) \ + if (id != EFX_EF10_FILTER_ID_INVALID) { \ + filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \ + WARN_ON(!table->entry[filter_idx].spec); \ + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; \ + } +static void efx_ef10_filter_mark_old(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; - struct net_device *net_dev = efx->net_dev; - struct efx_filter_spec spec; - bool remove_failed = false; - struct netdev_hw_addr *uc; - struct netdev_hw_addr *mc; - unsigned int filter_idx; - int i, n, rc; - - if (!efx_dev_registered(efx)) - return; + unsigned int filter_idx, i; if (!table) return; /* Mark old filters that may need to be removed */ spin_lock_bh(&efx->filter_lock); - n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count; - for (i = 0; i < n; i++) { - filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS; - table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; - } - n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count; - for (i = 0; i < n; i++) { - filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS; - table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; - } + for (i = 0; i < table->dev_uc_count; i++) + EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id); + for (i = 0; i < table->dev_mc_count; i++) + EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id); + EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id); + EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id); + EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id); spin_unlock_bh(&efx->filter_lock); +} +#undef EFX_EF10_FILTER_DO_MARK_OLD - /* Copy/convert the address lists; add the primary station - * address and broadcast address - */ - netif_addr_lock_bh(net_dev); - if (net_dev->flags & IFF_PROMISC || - netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) { - table->dev_uc_count = -1; - } else { - table->dev_uc_count = 1 + netdev_uc_count(net_dev); - ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); - i = 1; - netdev_for_each_uc_addr(uc, net_dev) { - ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); - i++; +static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct net_device *net_dev = efx->net_dev; + struct netdev_hw_addr *uc; + int addr_count; + unsigned int i; + + table->ucdef_id = EFX_EF10_FILTER_ID_INVALID; + addr_count = netdev_uc_count(net_dev); + if (net_dev->flags & IFF_PROMISC) + *promisc = true; + table->dev_uc_count = 1 + addr_count; + ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); + i = 1; + netdev_for_each_uc_addr(uc, net_dev) { + if (i >= EFX_EF10_FILTER_DEV_UC_MAX) { + *promisc = true; + break; } + ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); + table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID; + i++; } - if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) || - netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) { - table->dev_mc_count = -1; - } else { - table->dev_mc_count = 1 + netdev_mc_count(net_dev); - eth_broadcast_addr(table->dev_mc_list[0].addr); - i = 1; - netdev_for_each_mc_addr(mc, net_dev) { - ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); - i++; +} + +static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct net_device *net_dev = efx->net_dev; + struct netdev_hw_addr *mc; + unsigned int i, addr_count; + + table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; + table->bcast_id = EFX_EF10_FILTER_ID_INVALID; + if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) + *promisc = true; + + addr_count = netdev_mc_count(net_dev); + i = 0; + netdev_for_each_mc_addr(mc, net_dev) { + if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { + *promisc = true; + break; } + ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); + table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID; + i++; } - netif_addr_unlock_bh(net_dev); - /* Insert/renew unicast filters */ - if (table->dev_uc_count >= 0) { - for (i = 0; i < table->dev_uc_count; i++) { - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); - efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, - table->dev_uc_list[i].addr); - rc = efx_ef10_filter_insert(efx, &spec, true); - if (rc < 0) { - /* Fall back to unicast-promisc */ - while (i--) - efx_ef10_filter_remove_safe( + table->dev_mc_count = i; +} + +static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, + bool multicast, bool rollback) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_dev_addr *addr_list; + struct efx_filter_spec spec; + u8 baddr[ETH_ALEN]; + unsigned int i, j; + int addr_count; + int rc; + + if (multicast) { + addr_list = table->dev_mc_list; + addr_count = table->dev_mc_count; + } else { + addr_list = table->dev_uc_list; + addr_count = table->dev_uc_count; + } + + /* Insert/renew filters */ + for (i = 0; i < addr_count; i++) { + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, + EFX_FILTER_FLAG_RX_RSS, + 0); + efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, + addr_list[i].addr); + rc = efx_ef10_filter_insert(efx, &spec, true); + if (rc < 0) { + if (rollback) { + netif_info(efx, drv, efx->net_dev, + "efx_ef10_filter_insert failed rc=%d\n", + rc); + /* Fall back to promiscuous */ + for (j = 0; j < i; j++) { + if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID) + continue; + efx_ef10_filter_remove_unsafe( efx, EFX_FILTER_PRI_AUTO, - table->dev_uc_list[i].id); - table->dev_uc_count = -1; - break; + addr_list[j].id); + addr_list[j].id = EFX_EF10_FILTER_ID_INVALID; + } + return rc; + } else { + /* mark as not inserted, and carry on */ + rc = EFX_EF10_FILTER_ID_INVALID; } - table->dev_uc_list[i].id = rc; } + addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc); } - if (table->dev_uc_count < 0) { + + if (multicast && rollback) { + /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, EFX_FILTER_FLAG_RX_RSS, 0); - efx_filter_set_uc_def(&spec); + eth_broadcast_addr(baddr); + efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { - WARN_ON(1); - table->dev_uc_count = 0; + netif_warn(efx, drv, efx->net_dev, + "Broadcast filter insert failed rc=%d\n", rc); + /* Fall back to promiscuous */ + for (j = 0; j < i; j++) { + if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID) + continue; + efx_ef10_filter_remove_unsafe( + efx, EFX_FILTER_PRI_AUTO, + addr_list[j].id); + addr_list[j].id = EFX_EF10_FILTER_ID_INVALID; + } + return rc; } else { - table->dev_uc_list[0].id = rc; + table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc); } } - /* Insert/renew multicast filters */ - if (table->dev_mc_count >= 0) { - for (i = 0; i < table->dev_mc_count; i++) { + return 0; +} + +static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, + bool rollback) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_filter_spec spec; + u8 baddr[ETH_ALEN]; + int rc; + + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, + EFX_FILTER_FLAG_RX_RSS, + 0); + + if (multicast) + efx_filter_set_mc_def(&spec); + else + efx_filter_set_uc_def(&spec); + + rc = efx_ef10_filter_insert(efx, &spec, true); + if (rc < 0) { + netif_warn(efx, drv, efx->net_dev, + "%scast mismatch filter insert failed rc=%d\n", + multicast ? "Multi" : "Uni", rc); + } else if (multicast) { + table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc); + if (!nic_data->workaround_26807) { + /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, EFX_FILTER_FLAG_RX_RSS, 0); + eth_broadcast_addr(baddr); efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, - table->dev_mc_list[i].addr); + baddr); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { - /* Fall back to multicast-promisc */ - while (i--) - efx_ef10_filter_remove_safe( - efx, EFX_FILTER_PRI_AUTO, - table->dev_mc_list[i].id); - table->dev_mc_count = -1; - break; + netif_warn(efx, drv, efx->net_dev, + "Broadcast filter insert failed rc=%d\n", + rc); + if (rollback) { + /* Roll back the mc_def filter */ + efx_ef10_filter_remove_unsafe( + efx, EFX_FILTER_PRI_AUTO, + table->mcdef_id); + table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; + return rc; + } + } else { + table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc); } - table->dev_mc_list[i].id = rc; - } - } - if (table->dev_mc_count < 0) { - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); - efx_filter_set_mc_def(&spec); - rc = efx_ef10_filter_insert(efx, &spec, true); - if (rc < 0) { - WARN_ON(1); - table->dev_mc_count = 0; - } else { - table->dev_mc_list[0].id = rc; } + rc = 0; + } else { + table->ucdef_id = rc; + rc = 0; } + return rc; +} + +/* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD + * flag or removes these filters, we don't need to hold the filter_lock while + * scanning for these filters. + */ +static void efx_ef10_filter_remove_old(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + bool remove_failed = false; + int i; - /* Remove filters that weren't renewed. Since nothing else - * changes the AUTO_OLD flag or removes these filters, we - * don't need to hold the filter_lock while scanning for - * these filters. - */ for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { if (ACCESS_ONCE(table->entry[i].spec) & EFX_EF10_FILTER_FLAG_AUTO_OLD) { @@ -3942,6 +4135,87 @@ reset_nic: return rc ? rc : rc2; } +/* Caller must hold efx->filter_sem for read if race against + * efx_ef10_filter_table_remove() is possible + */ +static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct net_device *net_dev = efx->net_dev; + bool uc_promisc = false, mc_promisc = false; + + if (!efx_dev_registered(efx)) + return; + + if (!table) + return; + + efx_ef10_filter_mark_old(efx); + + /* Copy/convert the address lists; add the primary station + * address and broadcast address + */ + netif_addr_lock_bh(net_dev); + efx_ef10_filter_uc_addr_list(efx, &uc_promisc); + efx_ef10_filter_mc_addr_list(efx, &mc_promisc); + netif_addr_unlock_bh(net_dev); + + /* Insert/renew unicast filters */ + if (uc_promisc) { + efx_ef10_filter_insert_def(efx, false, false); + efx_ef10_filter_insert_addr_list(efx, false, false); + } else { + /* If any of the filters failed to insert, fall back to + * promiscuous mode - add in the uc_def filter. But keep + * our individual unicast filters. + */ + if (efx_ef10_filter_insert_addr_list(efx, false, false)) + efx_ef10_filter_insert_def(efx, false, false); + } + + /* Insert/renew multicast filters */ + /* If changing promiscuous state with cascaded multicast filters, remove + * old filters first, so that packets are dropped rather than duplicated + */ + if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc) + efx_ef10_filter_remove_old(efx); + if (mc_promisc) { + if (nic_data->workaround_26807) { + /* If we failed to insert promiscuous filters, rollback + * and fall back to individual multicast filters + */ + if (efx_ef10_filter_insert_def(efx, true, true)) { + /* Changing promisc state, so remove old filters */ + efx_ef10_filter_remove_old(efx); + efx_ef10_filter_insert_addr_list(efx, true, false); + } + } else { + /* If we failed to insert promiscuous filters, don't + * rollback. Regardless, also insert the mc_list + */ + efx_ef10_filter_insert_def(efx, true, false); + efx_ef10_filter_insert_addr_list(efx, true, false); + } + } else { + /* If any filters failed to insert, rollback and fall back to + * promiscuous mode - mc_def filter and maybe broadcast. If + * that fails, roll back again and insert as many of our + * individual multicast filters as we can. + */ + if (efx_ef10_filter_insert_addr_list(efx, true, true)) { + /* Changing promisc state, so remove old filters */ + if (nic_data->workaround_26807) + efx_ef10_filter_remove_old(efx); + if (efx_ef10_filter_insert_def(efx, true, true)) + efx_ef10_filter_insert_addr_list(efx, true, false); + } + } + + efx_ef10_filter_remove_old(efx); + efx->mc_promisc = mc_promisc; +} + static int efx_ef10_set_mac_address(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); @@ -4110,6 +4384,8 @@ efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD); out: + if (rc == -EPERM) + rc = 0; rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0); return rc ? rc : rc2; } diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 03bc03b67..974637d3a 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -115,9 +115,9 @@ static struct workqueue_struct *reset_workqueue; * * This is only used in MSI-X interrupt mode */ -static bool separate_tx_channels; -module_param(separate_tx_channels, bool, 0444); -MODULE_PARM_DESC(separate_tx_channels, +bool efx_separate_tx_channels; +module_param(efx_separate_tx_channels, bool, 0444); +MODULE_PARM_DESC(efx_separate_tx_channels, "Use separate channels for TX and RX"); /* This is the weight assigned to each of the (per-channel) virtual @@ -1391,7 +1391,7 @@ static int efx_probe_interrupts(struct efx_nic *efx) unsigned int n_channels; n_channels = efx_wanted_parallelism(efx); - if (separate_tx_channels) + if (efx_separate_tx_channels) n_channels *= 2; n_channels += extra_channels; n_channels = min(n_channels, efx->max_channels); @@ -1418,13 +1418,16 @@ static int efx_probe_interrupts(struct efx_nic *efx) efx->n_channels = n_channels; if (n_channels > extra_channels) n_channels -= extra_channels; - if (separate_tx_channels) { - efx->n_tx_channels = max(n_channels / 2, 1U); + if (efx_separate_tx_channels) { + efx->n_tx_channels = min(max(n_channels / 2, + 1U), + efx->max_tx_channels); efx->n_rx_channels = max(n_channels - efx->n_tx_channels, 1U); } else { - efx->n_tx_channels = n_channels; + efx->n_tx_channels = min(n_channels, + efx->max_tx_channels); efx->n_rx_channels = n_channels; } for (i = 0; i < efx->n_channels; i++) @@ -1450,7 +1453,7 @@ static int efx_probe_interrupts(struct efx_nic *efx) /* Assume legacy interrupts */ if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { - efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); + efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0); efx->n_rx_channels = 1; efx->n_tx_channels = 1; efx->legacy_irq = efx->pci_dev->irq; @@ -1624,7 +1627,8 @@ static void efx_set_channels(struct efx_nic *efx) struct efx_tx_queue *tx_queue; efx->tx_channel_offset = - separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; + efx_separate_tx_channels ? + efx->n_channels - efx->n_tx_channels : 0; /* We need to mark which channels really have RX and TX * queues, and adjust the TX queue numbers if we have separate @@ -1653,17 +1657,34 @@ static int efx_probe_nic(struct efx_nic *efx) if (rc) return rc; - /* Determine the number of channels and queues by trying to hook - * in MSI-X interrupts. */ - rc = efx_probe_interrupts(efx); - if (rc) - goto fail1; + do { + if (!efx->max_channels || !efx->max_tx_channels) { + netif_err(efx, drv, efx->net_dev, + "Insufficient resources to allocate" + " any channels\n"); + rc = -ENOSPC; + goto fail1; + } - efx_set_channels(efx); + /* Determine the number of channels and queues by trying + * to hook in MSI-X interrupts. + */ + rc = efx_probe_interrupts(efx); + if (rc) + goto fail1; - rc = efx->type->dimension_resources(efx); - if (rc) - goto fail2; + efx_set_channels(efx); + + /* dimension_resources can fail with EAGAIN */ + rc = efx->type->dimension_resources(efx); + if (rc != 0 && rc != -EAGAIN) + goto fail2; + + if (rc == -EAGAIN) + /* try again with new max_channels */ + efx_remove_interrupts(efx); + + } while (rc == -EAGAIN); if (efx->n_channels > 1) netdev_rss_key_fill(&efx->rx_hash_key, diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index acb1e0718..1aaf76c1a 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -35,6 +35,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); int efx_setup_tc(struct net_device *net_dev, u8 num_tc); unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); extern unsigned int efx_piobuf_size; +extern bool efx_separate_tx_channels; /* RX */ void efx_set_default_rx_indir_table(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 80e69af21..d790cb8d9 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -2371,6 +2371,7 @@ static int falcon_probe_nic(struct efx_nic *efx) efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 : EFX_MAX_CHANNELS); + efx->max_tx_channels = efx->max_channels; efx->timer_quantum_ns = 4968; /* 621 cycles */ /* Initialise I2C adapter */ diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index f08266f0e..5a1c5a8f2 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -321,7 +321,9 @@ void efx_farch_tx_write(struct efx_tx_queue *tx_queue) unsigned write_ptr; unsigned old_write_count = tx_queue->write_count; - BUG_ON(tx_queue->write_count == tx_queue->insert_count); + tx_queue->xmit_more_available = false; + if (unlikely(tx_queue->write_count == tx_queue->insert_count)) + return; do { write_ptr = tx_queue->write_count & tx_queue->ptr_mask; diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 81640f8bb..98d172b04 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -1779,15 +1779,31 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx) return rc; } -int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled) +int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled, + unsigned int *flags) { MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN); + size_t outlen; + int rc; BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type); MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled); - return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf), - NULL, 0, NULL); + rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (!flags) + return 0; + + if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN) + *flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS); + else + *flags = 0; + + return 0; } int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, @@ -1816,7 +1832,11 @@ int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, return 0; fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + /* Older firmware lacks GET_WORKAROUNDS and this isn't especially + * terrifying. The call site will have to deal with it though. + */ + netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR, + efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 1838afe2d..025d504c4 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -346,7 +346,8 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx); bool efx_mcdi_mac_check_fault(struct efx_nic *efx); enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); -int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled); +int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled, + unsigned int *flags); int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, unsigned int *enabled_out); diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 45fca9fc6..4cc772164 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -26,6 +26,10 @@ * Unlike a warm boot, assume DMEM has been reloaded, so that * the MC persistent data must be reinitialised. */ #define MC_FW_TEPID_BOOT_OK (16) +/* We have entered the main firmware via recovery mode. This + * means that MC persistent data must be reinitialised, but that + * we shouldn't touch PCIe config. */ +#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32) /* BIST state has been initialized */ #define MC_FW_BIST_INIT_OK (128) @@ -169,6 +173,8 @@ #define MC_CMD_ERR_EINTR 4 /* I/O failure */ #define MC_CMD_ERR_EIO 5 +/* Already exists */ +#define MC_CMD_ERR_EEXIST 6 /* Try again */ #define MC_CMD_ERR_EAGAIN 11 /* Out of memory */ @@ -181,6 +187,10 @@ #define MC_CMD_ERR_ENODEV 19 /* Invalid argument to target */ #define MC_CMD_ERR_EINVAL 22 +/* Broken pipe */ +#define MC_CMD_ERR_EPIPE 32 +/* Read-only */ +#define MC_CMD_ERR_EROFS 30 /* Out of range */ #define MC_CMD_ERR_ERANGE 34 /* Non-recursive resource is already acquired */ @@ -226,6 +236,43 @@ #define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a /* The datapath is disabled. */ #define MC_CMD_ERR_DATAPATH_DISABLED 0x100b +/* The requesting client is not a function */ +#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c +/* The requested operation might require the + command to be passed between MCs, and the + transport doesn't support that. Should + only ever been seen over the UART. */ +#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d +/* VLAN tag(s) exists */ +#define MC_CMD_ERR_VLAN_EXIST 0x100e +/* No MAC address assigned to an EVB port */ +#define MC_CMD_ERR_NO_MAC_ADDR 0x100f +/* Notifies the driver that the request has been relayed + * to an admin function for authorization. The driver should + * wait for a PROXY_RESPONSE event and then resend its request. + * This error code is followed by a 32-bit handle that + * helps matching it with the respective PROXY_RESPONSE event. */ +#define MC_CMD_ERR_PROXY_PENDING 0x1010 +#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4 +/* The request cannot be passed for authorization because + * another request from the same function is currently being + * authorized. The drvier should try again later. */ +#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011 +/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function + * that has enabled proxying or BLOCK_INDEX points to a function that + * doesn't await an authorization. */ +#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012 +/* This code is currently only used internally in FW. Its meaning is that + * an operation failed due to lack of SR-IOV privilege. + * Normally it is translated to EPERM by send_cmd_err(), + * but it may also be used to trigger some special mechanism + * for handling such case, e.g. to relay the failed request + * to a designated admin function for authorization. */ +#define MC_CMD_ERR_NO_PRIVILEGE 0x1013 +/* Workaround 26807 could not be turned on/off because some functions + * have already installed filters. See the comment at + * MC_CMD_WORKAROUND_BUG26807. */ +#define MC_CMD_ERR_FILTERS_PRESENT 0x1014 #define MC_CMD_ERR_CODE_OFST 0 @@ -275,6 +322,11 @@ MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) +/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default + * stack ID (which must be in the range 1-255) along with an EVB port ID. + */ +#define EVB_STACK_ID(n) (((n) & 0xff) << 16) + /* Version 2 adds an optional argument to error returns: the errno value * may be followed by the (0-based) number of the first argument that @@ -394,6 +446,8 @@ #define MCDI_EVENT_AOE_BYTEBLASTER 0x9 /* enum: DDR ECC status update */ #define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa +/* enum: PTP status update */ +#define MCDI_EVENT_AOE_PTP_STATUS 0xb #define MCDI_EVENT_AOE_ERR_DATA_LBN 8 #define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8 #define MCDI_EVENT_RX_ERR_RXQ_LBN 0 @@ -408,6 +462,16 @@ #define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12 #define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0 #define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16 +#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0 +#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8 +/* enum: MUM failed to load - no valid image? */ +#define MCDI_EVENT_MUM_NO_LOAD 0x1 +/* enum: MUM f/w reported an exception */ +#define MCDI_EVENT_MUM_ASSERT 0x2 +/* enum: MUM not kicking watchdog */ +#define MCDI_EVENT_MUM_WATCHDOG 0x3 +#define MCDI_EVENT_MUM_ERR_DATA_LBN 8 +#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8 #define MCDI_EVENT_DATA_LBN 0 #define MCDI_EVENT_DATA_WIDTH 32 #define MCDI_EVENT_SRC_LBN 36 @@ -416,6 +480,8 @@ #define MCDI_EVENT_EV_CODE_WIDTH 4 #define MCDI_EVENT_CODE_LBN 44 #define MCDI_EVENT_CODE_WIDTH 8 +/* enum: Event generated by host software */ +#define MCDI_EVENT_SW_EVENT 0x0 /* enum: Bad assert. */ #define MCDI_EVENT_CODE_BADSSERT 0x1 /* enum: PM Notice. */ @@ -470,6 +536,14 @@ #define MCDI_EVENT_CODE_MC_BIST 0x19 /* enum: PTP tick event providing current NIC time */ #define MCDI_EVENT_CODE_PTP_TIME 0x1a +/* enum: MUM fault */ +#define MCDI_EVENT_CODE_MUM 0x1b +/* enum: notify the designated PF of a new authorization request */ +#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c +/* enum: notify a function that awaits an authorization that its request has + * been processed and it may now resend the command + */ +#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d /* enum: Artificial event generated by host and posted via MC for test * purposes. */ @@ -537,6 +611,33 @@ /* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */ #define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36 #define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC clock has ever been set + */ +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36 +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC and System clocks are in sync + */ +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37 +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of + * the minor value of the PTP clock + */ +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38 +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32 +/* Zero means that the request has been completed or authorized, and the driver + * should resend it. A non-zero value means that the authorization has been + * denied, and gives the reason. Typically it will be EPERM. + */ +#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36 +#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8 /* FCDI_EVENT structuredef */ #define FCDI_EVENT_LEN 8 @@ -581,6 +682,10 @@ #define FCDI_EVENT_CODE_PTP_TICK 0x7 /* enum: ECC error counters */ #define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8 +/* enum: Current status of PTP */ +#define FCDI_EVENT_CODE_PTP_STATUS 0x9 +/* enum: Port id config to map MC-FC port idx */ +#define FCDI_EVENT_CODE_PORT_CONFIG 0xa #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 @@ -594,11 +699,24 @@ #define FCDI_EVENT_LINK_STATE_DATA_OFST 0 #define FCDI_EVENT_LINK_STATE_DATA_LBN 0 #define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32 +#define FCDI_EVENT_PTP_STATE_OFST 0 +#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */ +#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */ +#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */ +#define FCDI_EVENT_PTP_STATE_LBN 0 +#define FCDI_EVENT_PTP_STATE_WIDTH 32 #define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36 #define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32 +/* Index of MC port being referred to */ +#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36 +#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8 +/* FC Port index that matches the MC port index in SRC */ +#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32 /* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events * to the MC. Note that this structure | is overlayed over a normal FCDI event @@ -631,6 +749,90 @@ #define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64 #define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64 +/* MUM_EVENT structuredef */ +#define MUM_EVENT_LEN 8 +#define MUM_EVENT_CONT_LBN 32 +#define MUM_EVENT_CONT_WIDTH 1 +#define MUM_EVENT_LEVEL_LBN 33 +#define MUM_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define MUM_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define MUM_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define MUM_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define MUM_EVENT_LEVEL_FATAL 0x3 +#define MUM_EVENT_DATA_OFST 0 +#define MUM_EVENT_SENSOR_ID_LBN 0 +#define MUM_EVENT_SENSOR_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +#define MUM_EVENT_SENSOR_STATE_LBN 8 +#define MUM_EVENT_SENSOR_STATE_WIDTH 8 +#define MUM_EVENT_PORT_PHY_READY_LBN 0 +#define MUM_EVENT_PORT_PHY_READY_WIDTH 1 +#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1 +#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2 +#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3 +#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4 +#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5 +#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6 +#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1 +#define MUM_EVENT_DATA_LBN 0 +#define MUM_EVENT_DATA_WIDTH 32 +#define MUM_EVENT_SRC_LBN 36 +#define MUM_EVENT_SRC_WIDTH 8 +#define MUM_EVENT_EV_CODE_LBN 60 +#define MUM_EVENT_EV_CODE_WIDTH 4 +#define MUM_EVENT_CODE_LBN 44 +#define MUM_EVENT_CODE_WIDTH 8 +/* enum: The MUM was rebooted. */ +#define MUM_EVENT_CODE_REBOOT 0x1 +/* enum: Bad assert. */ +#define MUM_EVENT_CODE_ASSERT 0x2 +/* enum: Sensor failure. */ +#define MUM_EVENT_CODE_SENSOR 0x3 +/* enum: Link fault has been asserted, or has cleared. */ +#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4 +#define MUM_EVENT_SENSOR_DATA_OFST 0 +#define MUM_EVENT_SENSOR_DATA_LBN 0 +#define MUM_EVENT_SENSOR_DATA_WIDTH 32 +#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0 +#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0 +#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32 +#define MUM_EVENT_PORT_PHY_CAPS_OFST 0 +#define MUM_EVENT_PORT_PHY_CAPS_LBN 0 +#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32 +#define MUM_EVENT_PORT_PHY_TECH_OFST 0 +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */ +#define MUM_EVENT_PORT_PHY_TECH_LBN 0 +#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40 +#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4 + /***********************************/ /* MC_CMD_READ32 @@ -687,24 +889,34 @@ /* MC_CMD_COPYCODE_IN msgrequest */ #define MC_CMD_COPYCODE_IN_LEN 16 -/* Source address */ -#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 -/* enum: The main image should be entered via a copy of a single word from and - * to this address when none of the other magic behaviours are required. +/* Source address + * + * The main image should be entered via a copy of a single word from and to a + * magic address, which controls various aspects of the boot. The magic address + * is a bitfield, with each bit as documented below. */ +#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */ #define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000 -/* enum: Entering the main image via a copy of a single word from and to this - * address indicates that it should not attempt to start the datapath CPUs. - * This is useful for certain soft rebooting scenarios. (Huntington only) +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and + * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below) */ #define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0 -/* enum: Entering the main image via a copy of a single word from and to this - * address indicates that it should not attempt to parse any configuration from - * flash. (In addition, the datapath CPUs will not be started, as for - * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for - * certain soft rebooting scenarios. (Huntington only) +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT, + * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see + * below) */ #define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1 /* Destination address */ #define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 #define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 @@ -795,6 +1007,10 @@ #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8 #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4 #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 /* Failing thread address */ #define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132 #define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136 @@ -802,7 +1018,8 @@ /***********************************/ /* MC_CMD_LOG_CTRL - * Configure the output stream for various events and messages. + * Configure the output stream for log events such as link state changes, + * sensor notifications and MCDI completions */ #define MC_CMD_LOG_CTRL 0x7 @@ -816,6 +1033,7 @@ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum: Event queue. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 +/* Legacy argument. Must be zero. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4 /* MC_CMD_LOG_CTRL_OUT msgresponse */ @@ -955,8 +1173,12 @@ * input on the same NIC. */ #define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a +/* enum: Set the PTP sync status. Status is used by firmware to report to event + * subscribers. + */ +#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b /* enum: Above this for future use. */ -#define MC_CMD_PTP_OP_MAX 0x1b +#define MC_CMD_PTP_OP_MAX 0x1c /* MC_CMD_PTP_IN_ENABLE msgrequest */ #define MC_CMD_PTP_IN_ENABLE_LEN 16 @@ -1191,8 +1413,12 @@ #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ -/* Event queue to send PTP time events to */ +/* Original field containing queue ID. Now extended to include flags. */ #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1 /* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16 @@ -1214,6 +1440,23 @@ /* 1 to enable PPS test mode, 0 to disable and return result. */ #define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8 +/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* NIC - Host System Clock Synchronization status */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8 +/* enum: Host System clock and NIC clock are not in sync */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0 +/* enum: Host System clock and NIC clock are synchronized */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1 +/* If synchronized, number of seconds until clocks should be considered to be + * no longer in sync. + */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20 + /* MC_CMD_PTP_OUT msgresponse */ #define MC_CMD_PTP_OUT_LEN 0 @@ -1375,7 +1618,7 @@ #define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2 /* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */ -#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24 /* Time format required/used by for this NIC. Applies to all PTP MCDI * operations that pass times between the host and firmware. If this operation * is not supported (older firmware) a format of seconds and nanoseconds should @@ -1396,6 +1639,13 @@ * end and start times minus the time that the MC waited for host end. */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4 +/* Various PTP capabilities */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16 @@ -1415,6 +1665,9 @@ /* Enum values, see field(s): */ /* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */ +/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */ +#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0 + /***********************************/ /* MC_CMD_CSR_READ32 @@ -1915,6 +2168,14 @@ #define MC_CMD_FW_FULL_FEATURED 0x0 /* enum: Prefer to use firmware with fewer features but lower latency */ #define MC_CMD_FW_LOW_LATENCY 0x1 +/* enum: Prefer to use firmware for SolarCapture packed stream mode */ +#define MC_CMD_FW_PACKED_STREAM 0x2 +/* enum: Prefer to use firmware with fewer features and simpler TX event + * batching but higher TX packet rate + */ +#define MC_CMD_FW_HIGH_TX_RATE 0x3 +/* enum: Reserved value */ +#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 /* enum: Only this option is allowed for non-admin functions */ #define MC_CMD_FW_DONT_CARE 0xffffffff @@ -2481,6 +2742,12 @@ #define MC_CMD_LOOPBACK_SD_FES_WS 0x22 /* enum: Near side of AOE Siena side port */ #define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 +/* enum: Medford Wireside datapath loopback */ +#define MC_CMD_LOOPBACK_DATA_WS 0x24 +/* enum: Force link up without setting up any physical loopback (snapper use + * only) + */ +#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 /* Supported loopbacks. */ #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8 #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8 @@ -2552,12 +2819,8 @@ #define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1 /* This returns the negotiated flow control value. */ #define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 -/* enum: Flow control is off. */ -#define MC_CMD_FCNTL_OFF 0x0 -/* enum: Respond to flow control. */ -#define MC_CMD_FCNTL_RESPOND 0x1 -/* enum: Respond to and Issue flow control. */ -#define MC_CMD_FCNTL_BIDIR 0x2 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ #define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 @@ -2632,7 +2895,7 @@ #define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK /* MC_CMD_SET_MAC_IN msgrequest */ -#define MC_CMD_SET_MAC_IN_LEN 24 +#define MC_CMD_SET_MAC_IN_LEN 28 /* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of * EtherII, VLAN, bug16011 padding). */ @@ -2649,13 +2912,20 @@ #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 #define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 /* enum: Flow control is off. */ -/* MC_CMD_FCNTL_OFF 0x0 */ +#define MC_CMD_FCNTL_OFF 0x0 /* enum: Respond to flow control. */ -/* MC_CMD_FCNTL_RESPOND 0x1 */ +#define MC_CMD_FCNTL_RESPOND 0x1 /* enum: Respond to and Issue flow control. */ -/* MC_CMD_FCNTL_BIDIR 0x2 */ +#define MC_CMD_FCNTL_BIDIR 0x2 /* enum: Auto neg flow control. */ #define MC_CMD_FCNTL_AUTO 0x3 +/* enum: Priority flow control (eftest builds only). */ +#define MC_CMD_FCNTL_QBB 0x4 +/* enum: Issue flow control. */ +#define MC_CMD_FCNTL_GENERATE 0x5 +#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1 /* MC_CMD_SET_MAC_OUT msgresponse */ #define MC_CMD_SET_MAC_OUT_LEN 0 @@ -2748,7 +3018,8 @@ * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is * performed, and the statistics may be read from the message response. If * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location). - * Locks required: None. Returns: 0, ETIME + * Locks required: None. The PERIODIC_CLEAR option is not used and now has no + * effect. Returns: 0, ETIME */ #define MC_CMD_MAC_STATS 0x2e @@ -2791,6 +3062,7 @@ #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4 #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS #define MC_CMD_MAC_GENERATION_START 0x0 /* enum */ +#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */ #define MC_CMD_MAC_TX_PKTS 0x1 /* enum */ #define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */ #define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */ @@ -2890,8 +3162,8 @@ * PM_AND_RXDP_COUNTERS capability only. */ #define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 -/* enum: RXDP counter: Number of times an emergency descriptor fetch was - * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. +/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed. + * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ #define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47 /* enum: RXDP counter: Number of times the DPCPU waited for an existing @@ -3213,6 +3485,8 @@ #define MC_CMD_NVRAM_TYPE_LICENSE 0x12 /* enum: FC Log. */ #define MC_CMD_NVRAM_TYPE_FC_LOG 0x13 +/* enum: Additional flash on FPGA. */ +#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14 /***********************************/ @@ -3407,6 +3681,8 @@ */ #define MC_CMD_SCHEDINFO 0x3e +#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_SCHEDINFO_IN msgrequest */ #define MC_CMD_SCHEDINFO_IN_LEN 0 @@ -3593,6 +3869,68 @@ #define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c /* enum: Hotpoint temperature: degC */ #define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d +/* enum: Port 0 PHY power switch over-current: bool */ +#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e +/* enum: Port 1 PHY power switch over-current: bool */ +#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f +/* enum: Mop-up microcontroller reference voltage (millivolts) */ +#define MC_CMD_SENSOR_MUM_VCC 0x30 +/* enum: 0.9v power phase A voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9_A 0x31 +/* enum: 0.9v power phase A current: mA */ +#define MC_CMD_SENSOR_IN_I0V9_A 0x32 +/* enum: 0.9V voltage regulator phase A temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33 +/* enum: 0.9v power phase B voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9_B 0x34 +/* enum: 0.9v power phase B current: mA */ +#define MC_CMD_SENSOR_IN_I0V9_B 0x35 +/* enum: 0.9V voltage regulator phase B temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36 +/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37 +/* enum: CCOM AVREG 1v2 supply (external ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38 +/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39 +/* enum: CCOM AVREG 1v8 supply (external ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f +/* enum: controller internal temperature sensor voltage on master core + * (internal ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40 +/* enum: controller internal temperature on master core (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41 +/* enum: controller internal temperature sensor voltage on master core + * (external ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42 +/* enum: controller internal temperature on master core (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43 +/* enum: controller internal temperature on slave core sensor voltage (internal + * ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44 +/* enum: controller internal temperature on slave core (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45 +/* enum: controller internal temperature on slave core sensor voltage (external + * ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46 +/* enum: controller internal temperature on slave core (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47 +/* enum: Voltage supplied to the SODIMMs from their power supply: mV */ +#define MC_CMD_SENSOR_SODIMM_VOUT 0x49 +/* enum: Temperature of SODIMM 0 (if installed): degC */ +#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a +/* enum: Temperature of SODIMM 1 (if installed): degC */ +#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b +/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */ +#define MC_CMD_SENSOR_PHY0_VCC 0x4c +/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */ +#define MC_CMD_SENSOR_PHY1_VCC 0x4d /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ #define MC_CMD_SENSOR_ENTRY_OFST 4 #define MC_CMD_SENSOR_ENTRY_LEN 8 @@ -3701,6 +4039,8 @@ #define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum: Sensor is working but does not currently have a reading. */ #define MC_CMD_SENSOR_STATE_NO_READING 0x4 +/* enum: Sensor initialisation failed. */ +#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3 @@ -3870,6 +4210,7 @@ /* MC_CMD_WORKAROUND_IN msgrequest */ #define MC_CMD_WORKAROUND_IN_LEN 8 +/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */ #define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 /* enum: Bug 17230 work around. */ #define MC_CMD_WORKAROUND_BUG17230 0x1 @@ -3877,11 +4218,38 @@ #define MC_CMD_WORKAROUND_BUG35388 0x2 /* enum: Bug35017 workaround (A64 tables must be identity map) */ #define MC_CMD_WORKAROUND_BUG35017 0x3 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_WORKAROUND_BUG41750 0x4 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_WORKAROUND_BUG42008 0x5 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) + * This feature cannot be turned on/off while there are any filters already + * present. The behaviour in such case depends on the acting client's privilege + * level. If the client has the admin privilege, then all functions that have + * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise + * the command will fail with MC_CMD_ERR_FILTERS_PRESENT. + */ +#define MC_CMD_WORKAROUND_BUG26807 0x6 +/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable + * the workaround + */ #define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 /* MC_CMD_WORKAROUND_OUT msgresponse */ #define MC_CMD_WORKAROUND_OUT_LEN 0 +/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used + * when (TYPE == MC_CMD_WORKAROUND_BUG26807) + */ +#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4 +#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1 + /***********************************/ /* MC_CMD_GET_PHY_MEDIA_INFO @@ -4093,7 +4461,7 @@ /***********************************/ /* MC_CMD_GET_MAC_ADDRESSES - * Returns the base MAC, count and stride for the requestiong function + * Returns the base MAC, count and stride for the requesting function */ #define MC_CMD_GET_MAC_ADDRESSES 0x55 @@ -4115,6 +4483,527 @@ /* Spacing of allocated MAC addresses */ #define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12 + +/***********************************/ +/* MC_CMD_CLP + * Perform a CLP related operation + */ +#define MC_CMD_CLP 0x56 + +#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_CLP_IN msgrequest */ +#define MC_CMD_CLP_IN_LEN 4 +/* Sub operation */ +#define MC_CMD_CLP_IN_OP_OFST 0 +/* enum: Return to factory default settings */ +#define MC_CMD_CLP_OP_DEFAULT 0x1 +/* enum: Set MAC address */ +#define MC_CMD_CLP_OP_SET_MAC 0x2 +/* enum: Get MAC address */ +#define MC_CMD_CLP_OP_GET_MAC 0x3 +/* enum: Set UEFI/GPXE boot mode */ +#define MC_CMD_CLP_OP_SET_BOOT 0x4 +/* enum: Get UEFI/GPXE boot mode */ +#define MC_CMD_CLP_OP_GET_BOOT 0x5 + +/* MC_CMD_CLP_OUT msgresponse */ +#define MC_CMD_CLP_OUT_LEN 0 + +/* MC_CMD_CLP_IN_DEFAULT msgrequest */ +#define MC_CMD_CLP_IN_DEFAULT_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ + +/* MC_CMD_CLP_OUT_DEFAULT msgresponse */ +#define MC_CMD_CLP_OUT_DEFAULT_LEN 0 + +/* MC_CMD_CLP_IN_SET_MAC msgrequest */ +#define MC_CMD_CLP_IN_SET_MAC_LEN 12 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MAC address assigned to port */ +#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4 +#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10 +#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2 + +/* MC_CMD_CLP_OUT_SET_MAC msgresponse */ +#define MC_CMD_CLP_OUT_SET_MAC_LEN 0 + +/* MC_CMD_CLP_IN_GET_MAC msgrequest */ +#define MC_CMD_CLP_IN_GET_MAC_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ + +/* MC_CMD_CLP_OUT_GET_MAC msgresponse */ +#define MC_CMD_CLP_OUT_GET_MAC_LEN 8 +/* MAC address assigned to port */ +#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0 +#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6 +#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2 + +/* MC_CMD_CLP_IN_SET_BOOT msgrequest */ +#define MC_CMD_CLP_IN_SET_BOOT_LEN 5 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* Boot flag */ +#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4 +#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1 + +/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */ +#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0 + +/* MC_CMD_CLP_IN_GET_BOOT msgrequest */ +#define MC_CMD_CLP_IN_GET_BOOT_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ + +/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */ +#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4 +/* Boot flag */ +#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0 +#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1 +/* Padding */ +#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1 +#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3 + + +/***********************************/ +/* MC_CMD_MUM + * Perform a MUM operation + */ +#define MC_CMD_MUM 0x57 + +#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_MUM_IN msgrequest */ +#define MC_CMD_MUM_IN_LEN 4 +#define MC_CMD_MUM_IN_OP_HDR_OFST 0 +#define MC_CMD_MUM_IN_OP_LBN 0 +#define MC_CMD_MUM_IN_OP_WIDTH 8 +/* enum: NULL MCDI command to MUM */ +#define MC_CMD_MUM_OP_NULL 0x1 +/* enum: Get MUM version */ +#define MC_CMD_MUM_OP_GET_VERSION 0x2 +/* enum: Issue raw I2C command to MUM */ +#define MC_CMD_MUM_OP_RAW_CMD 0x3 +/* enum: Read from registers on devices connected to MUM. */ +#define MC_CMD_MUM_OP_READ 0x4 +/* enum: Write to registers on devices connected to MUM. */ +#define MC_CMD_MUM_OP_WRITE 0x5 +/* enum: Control UART logging. */ +#define MC_CMD_MUM_OP_LOG 0x6 +/* enum: Operations on MUM GPIO lines */ +#define MC_CMD_MUM_OP_GPIO 0x7 +/* enum: Get sensor readings from MUM */ +#define MC_CMD_MUM_OP_READ_SENSORS 0x8 +/* enum: Initiate clock programming on the MUM */ +#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9 +/* enum: Initiate FPGA load from flash on the MUM */ +#define MC_CMD_MUM_OP_FPGA_LOAD 0xa +/* enum: Request sensor reading from MUM ADC resulting from earlier request via + * MUM ATB + */ +#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb +/* enum: Send commands relating to the QSFP ports via the MUM for PHY + * operations + */ +#define MC_CMD_MUM_OP_QSFP 0xc + +/* MC_CMD_MUM_IN_NULL msgrequest */ +#define MC_CMD_MUM_IN_NULL_LEN 4 +/* MUM cmd header */ +#define MC_CMD_MUM_IN_CMD_OFST 0 + +/* MC_CMD_MUM_IN_GET_VERSION msgrequest */ +#define MC_CMD_MUM_IN_GET_VERSION_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ + +/* MC_CMD_MUM_IN_READ msgrequest */ +#define MC_CMD_MUM_IN_READ_LEN 16 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* ID of (device connected to MUM) to read from registers of */ +#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4 +/* enum: Hittite HMC1035 clock generator on Sorrento board */ +#define MC_CMD_MUM_DEV_HITTITE 0x1 +/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */ +#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2 +/* 32-bit address to read from */ +#define MC_CMD_MUM_IN_READ_ADDR_OFST 8 +/* Number of words to read. */ +#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12 + +/* MC_CMD_MUM_IN_WRITE msgrequest */ +#define MC_CMD_MUM_IN_WRITE_LENMIN 16 +#define MC_CMD_MUM_IN_WRITE_LENMAX 252 +#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num)) +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* ID of (device connected to MUM) to write to registers of */ +#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4 +/* enum: Hittite HMC1035 clock generator on Sorrento board */ +/* MC_CMD_MUM_DEV_HITTITE 0x1 */ +/* 32-bit address to write to */ +#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8 +/* Words to write */ +#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12 +#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60 + +/* MC_CMD_MUM_IN_RAW_CMD msgrequest */ +#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17 +#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num)) +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MUM I2C cmd code */ +#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4 +/* Number of bytes to write */ +#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8 +/* Number of bytes to read */ +#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12 +/* Bytes to write */ +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236 + +/* MC_CMD_MUM_IN_LOG msgrequest */ +#define MC_CMD_MUM_IN_LOG_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_LOG_OP_OFST 4 +#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */ + +/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */ +#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */ +/* Enable/disable debug output to UART */ +#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8 + +/* MC_CMD_MUM_IN_GPIO msgrequest */ +#define MC_CMD_MUM_IN_GPIO_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0 +#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8 +#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */ + +/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4 +/* The first 32-bit word to be written to the GPIO OUT register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8 +/* The second 32-bit word to be written to the GPIO OUT register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12 + +/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4 +/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8 +/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12 + +/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4 + +/* MC_CMD_MUM_IN_GPIO_OP msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16 +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8 + +/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */ +#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8 + +/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* Bit-mask of clocks to be programmed */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4 +#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */ +#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */ +#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */ +/* Control flags for clock programming */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1 + +/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */ +#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* Enable/Disable FPGA config from flash */ +#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4 + +/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */ +#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ + +/* MC_CMD_MUM_IN_QSFP msgrequest */ +#define MC_CMD_MUM_IN_QSFP_LEN 12 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0 +#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4 +#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */ +#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8 + +/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */ +#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12 + +/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20 + +/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8 + +/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12 + +/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8 + +/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8 + +/* MC_CMD_MUM_OUT msgresponse */ +#define MC_CMD_MUM_OUT_LEN 0 + +/* MC_CMD_MUM_OUT_NULL msgresponse */ +#define MC_CMD_MUM_OUT_NULL_LEN 0 + +/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */ +#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12 +#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8 + +/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */ +#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1 +#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num)) +/* returned data */ +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252 + +/* MC_CMD_MUM_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_READ_LENMIN 4 +#define MC_CMD_MUM_OUT_READ_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0 +#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4 +#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1 +#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63 + +/* MC_CMD_MUM_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_LOG msgresponse */ +#define MC_CMD_MUM_OUT_LOG_LEN 0 + +/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */ +#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8 +/* The first 32-bit word read from the GPIO IN register. */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0 +/* The second 32-bit word read from the GPIO IN register. */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4 + +/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8 +/* The first 32-bit word read from the GPIO OUT register. */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0 +/* The second 32-bit word read from the GPIO OUT register. */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4 + +/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4 +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0 + +/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */ +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4 +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8 + +/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */ +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4 +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0 + +/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */ +#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0 + +/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */ +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4 +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0 + +/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0 + +/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1 + +/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0 + +/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num)) +/* in bytes */ +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248 + +/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4 + +/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0 + /* MC_CMD_RESOURCE_SPECIFIER enum */ /* enum: Any */ #define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff @@ -4203,6 +5092,30 @@ #define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00 /* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */ #define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff +/* enum: Primary FPGA partition */ +#define NVRAM_PARTITION_TYPE_FPGA 0xb00 +/* enum: Secondary FPGA partition */ +#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01 +/* enum: FC firmware partition */ +#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02 +/* enum: FC License partition */ +#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03 +/* enum: Non-volatile log output partition for FC */ +#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04 +/* enum: MUM firmware partition */ +#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00 +/* enum: MUM Non-volatile log output partition. */ +#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01 +/* enum: MUM Application table partition. */ +#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02 +/* enum: MUM boot rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03 +/* enum: MUM production signatures & calibration rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04 +/* enum: MUM user signatures & calibration rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05 +/* enum: MUM fuses and lockbits partition. */ +#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 /* enum: Start of reserved value range (firmware may use for any purpose) */ #define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 /* enum: End of reserved value range (firmware may use for any purpose) */ @@ -4218,66 +5131,69 @@ #define LICENSED_APP_ID_LEN 4 #define LICENSED_APP_ID_ID_OFST 0 /* enum: OpenOnload */ -#define LICENSED_APP_ID_ONLOAD 0x1 +#define LICENSED_APP_ID_ONLOAD 0x1 /* enum: PTP timestamping */ -#define LICENSED_APP_ID_PTP 0x2 +#define LICENSED_APP_ID_PTP 0x2 /* enum: SolarCapture Pro */ -#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 +#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 +/* enum: SolarSecure filter engine */ +#define LICENSED_APP_ID_SOLARSECURE 0x8 +/* enum: Performance monitor */ +#define LICENSED_APP_ID_PERF_MONITOR 0x10 +/* enum: SolarCapture Live */ +#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20 +/* enum: Capture SolarSystem */ +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40 +/* enum: Network Access Control */ +#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80 #define LICENSED_APP_ID_ID_LBN 0 #define LICENSED_APP_ID_ID_WIDTH 32 - -/***********************************/ -/* MC_CMD_GET_WORKAROUNDS - * Read the list of all implemented and all currently enabled workarounds. The - * enums here must correspond with those in MC_CMD_WORKAROUND. - */ -#define MC_CMD_GET_WORKAROUNDS 0x59 - -/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */ -#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8 -/* Each workaround is represented by a single bit according to the enums below. - */ -#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0 -#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4 -/* enum: Bug 17230 work around. */ -#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2 -/* enum: Bug 35388 work around (unsafe EVQ writes). */ -#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4 -/* enum: Bug35017 workaround (A64 tables must be identity map) */ -#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8 - - -/***********************************/ -/* MC_CMD_LINK_STATE_MODE - * Read/set link state mode of a VF - */ -#define MC_CMD_LINK_STATE_MODE 0x5c - -#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_LINK_STATE_MODE_IN msgrequest */ -#define MC_CMD_LINK_STATE_MODE_IN_LEN 8 -/* The target function to have its link state mode read or set, must be a VF - * e.g. VF 1,3 = 0x00030001 - */ -#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0 -#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0 -#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16 -#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16 -#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16 -/* New link state mode to be set */ -#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4 -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ -/* enum: Use this value to just read the existing setting without modifying it. - */ -#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff - -/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */ -#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4 -#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0 +/* TX_TIMESTAMP_EVENT structuredef */ +#define TX_TIMESTAMP_EVENT_LEN 6 +/* lower 16 bits of timestamp data */ +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16 +/* Type of TX event, ordinary TX completion, low or high part of TX timestamp + */ +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1 +/* enum: This is a TX completion event, not a timestamp */ +#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0 +/* enum: This is the low part of a TX timestamp event */ +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51 +/* enum: This is the high part of a TX timestamp event */ +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8 +/* upper 16 bits of timestamp data */ +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16 + +/* RSS_MODE structuredef */ +#define RSS_MODE_LEN 1 +/* The RSS mode for a particular packet type is a value from 0 - 15 which can + * be considered as 4 bits selecting which fields are included in the hash. (A + * value 0 effectively disables RSS spreading for the packet type.) The YAML + * generation tools require this structure to be a whole number of bytes wide, + * but only 4 bits are relevant. + */ +#define RSS_MODE_HASH_SELECTOR_OFST 0 +#define RSS_MODE_HASH_SELECTOR_LEN 1 +#define RSS_MODE_HASH_SRC_ADDR_LBN 0 +#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1 +#define RSS_MODE_HASH_DST_ADDR_LBN 1 +#define RSS_MODE_HASH_DST_ADDR_WIDTH 1 +#define RSS_MODE_HASH_SRC_PORT_LBN 2 +#define RSS_MODE_HASH_SRC_PORT_WIDTH 1 +#define RSS_MODE_HASH_DST_PORT_LBN 3 +#define RSS_MODE_HASH_DST_PORT_WIDTH 1 +#define RSS_MODE_HASH_SELECTOR_LBN 0 +#define RSS_MODE_HASH_SELECTOR_WIDTH 8 /***********************************/ @@ -4413,7 +5329,9 @@ #define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL -/* MC_CMD_INIT_RXQ_IN msgrequest */ +/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version + * in new code. + */ #define MC_CMD_INIT_RXQ_IN_LENMIN 36 #define MC_CMD_INIT_RXQ_IN_LENMAX 252 #define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num)) @@ -4456,9 +5374,73 @@ #define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1 #define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28 +/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode + * flags + */ +#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0 +/* The EVQ to send events to. This is an index originally specified to INIT_EVQ + */ +#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540 + /* MC_CMD_INIT_RXQ_OUT msgresponse */ #define MC_CMD_INIT_RXQ_OUT_LEN 0 +/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0 + /***********************************/ /* MC_CMD_INIT_TXQ @@ -4467,7 +5449,9 @@ #define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL -/* MC_CMD_INIT_TXQ_IN msgrequest */ +/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version + * in new code. + */ #define MC_CMD_INIT_TXQ_IN_LENMIN 36 #define MC_CMD_INIT_TXQ_IN_LENMAX 252 #define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num)) @@ -4499,6 +5483,10 @@ #define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1 #define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9 #define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ @@ -4511,6 +5499,60 @@ #define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1 #define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28 +/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode + * flags + */ +#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. + */ +#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12 +/* There will be more flags here. */ +#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64 +/* Flags related to Qbb flow control mode. */ +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3 + /* MC_CMD_INIT_TXQ_OUT msgresponse */ #define MC_CMD_INIT_TXQ_OUT_LEN 0 @@ -4617,6 +5659,132 @@ /* MC_CMD_PROXY_CMD_OUT msgresponse */ #define MC_CMD_PROXY_CMD_OUT_LEN 0 +/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to + * manage proxied requests + */ +#define MC_PROXY_STATUS_BUFFER_LEN 16 +/* Handle allocated by the firmware for this proxy transaction */ +#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0 +/* enum: An invalid handle. */ +#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32 +/* The requesting physical function number */ +#define MC_PROXY_STATUS_BUFFER_PF_OFST 4 +#define MC_PROXY_STATUS_BUFFER_PF_LEN 2 +#define MC_PROXY_STATUS_BUFFER_PF_LBN 32 +#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16 +/* The requesting virtual function number. Set to VF_NULL if the target is a + * PF. + */ +#define MC_PROXY_STATUS_BUFFER_VF_OFST 6 +#define MC_PROXY_STATUS_BUFFER_VF_LEN 2 +#define MC_PROXY_STATUS_BUFFER_VF_LBN 48 +#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16 +/* The target function RID. */ +#define MC_PROXY_STATUS_BUFFER_RID_OFST 8 +#define MC_PROXY_STATUS_BUFFER_RID_LEN 2 +#define MC_PROXY_STATUS_BUFFER_RID_LBN 64 +#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16 +/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */ +#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10 +#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2 +#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80 +#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16 +/* If a request is authorized rather than carried out by the host, this is the + * elevated privilege mask granted to the requesting function. + */ +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12 +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96 +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32 + + +/***********************************/ +/* MC_CMD_PROXY_CONFIGURE + * Enable/disable authorization of MCDI requests from unprivileged functions by + * a designated admin function + */ +#define MC_CMD_PROXY_CONFIGURE 0x58 + +#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */ +#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108 +#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0 +#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REQUEST_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REPLY_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if + * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32 +/* Must be a power of 2, or zero if this buffer is not provided */ +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36 +/* Applies to all three buffers */ +#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40 +/* A bit mask defining which MCDI operations may be proxied */ +#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44 +#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64 + +/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */ +#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PROXY_COMPLETE + * Tells FW that a requested proxy operation has either been completed (by + * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the + * function that enabled proxying/authorization (by using + * MC_CMD_PROXY_CONFIGURE). + */ +#define MC_CMD_PROXY_COMPLETE 0x5f + +#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PROXY_COMPLETE_IN msgrequest */ +#define MC_CMD_PROXY_COMPLETE_IN_LEN 12 +#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0 +#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4 +/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply + * is stored in the REPLY_BUFF. + */ +#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0 +/* enum: The operation has been authorized. The originating function may now + * try again. + */ +#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1 +/* enum: The operation has been declined. */ +#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2 +/* enum: The authorization failed because the relevant application did not + * respond in time. + */ +#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3 +#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8 + +/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */ +#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0 + /***********************************/ /* MC_CMD_ALLOC_BUFTBL_CHUNK @@ -4688,6 +5856,44 @@ /* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */ #define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0 +/* PORT_CONFIG_ENTRY structuredef */ +#define PORT_CONFIG_ENTRY_LEN 16 +/* External port number (label) */ +#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0 +#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1 +#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0 +#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8 +/* Port core location */ +#define PORT_CONFIG_ENTRY_CORE_OFST 1 +#define PORT_CONFIG_ENTRY_CORE_LEN 1 +#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */ +#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */ +#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */ +#define PORT_CONFIG_ENTRY_CORE_LBN 8 +#define PORT_CONFIG_ENTRY_CORE_WIDTH 8 +/* Internal number (HW resource) relative to the core */ +#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2 +#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1 +#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16 +#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8 +/* Reserved */ +#define PORT_CONFIG_ENTRY_RSVD_OFST 3 +#define PORT_CONFIG_ENTRY_RSVD_LEN 1 +#define PORT_CONFIG_ENTRY_RSVD_LBN 24 +#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8 +/* Bitmask of KR lanes used by the port */ +#define PORT_CONFIG_ENTRY_LANES_OFST 4 +#define PORT_CONFIG_ENTRY_LANES_LBN 32 +#define PORT_CONFIG_ENTRY_LANES_WIDTH 32 +/* Port capabilities (MC_CMD_PHY_CAP_*) */ +#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8 +#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64 +#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32 +/* Reserved (align to 16 bytes) */ +#define PORT_CONFIG_ENTRY_RSVD2_OFST 12 +#define PORT_CONFIG_ENTRY_RSVD2_LBN 96 +#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32 + /***********************************/ /* MC_CMD_FILTER_OP @@ -4759,9 +5965,9 @@ #define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 /* enum: receive to MC */ #define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 -/* enum: loop back to port 0 TX MAC */ +/* enum: loop back to TXDP 0 */ #define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 -/* enum: loop back to port 1 TX MAC */ +/* enum: loop back to TXDP 1 */ #define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 /* receive queue handle (for multiple queue modes, this is the base queue) */ #define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24 @@ -4778,9 +5984,7 @@ #define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 /* RSS context (for RX_MODE_RSS) or .1p mapping handle (for * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or - * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered - * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be - * a valid handle. + * MC_CMD_DOT1P_MAPPING_ALLOC. */ #define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32 /* transmit domain (reserved; set to 0) */ @@ -4835,6 +6039,235 @@ #define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92 #define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16 +/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to + * include handling of VXLAN/NVGRE encapsulated frame filtering (which is + * supported on Medford only). + */ +#define MC_CMD_FILTER_OP_EXT_IN_LEN 172 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24 +/* receive mode */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68 +/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP + * protocol is GRE) to match (as bytes in network order; set last byte to 0 for + * VXLAN/NVGRE, or 1 for Geneve) + */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8 +/* enum: Match VXLAN traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0 +/* enum: Match Geneve traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1 +/* enum: Reserved for experimental development use */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8 +/* enum: Match NVGRE traffic with this VSID */ +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16 +/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6 +/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2 +/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in + * network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6 +/* VXLAN/NVGRE inner frame destination port to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2 +/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2 +/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to + * 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2 +/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132 +/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136 +/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16 +/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16 + /* MC_CMD_FILTER_OP_OUT msgresponse */ #define MC_CMD_FILTER_OP_OUT_LEN 12 /* identifies the type of operation requested */ @@ -4849,6 +6282,27 @@ #define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8 #define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4 #define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8 +/* enum: guaranteed invalid filter handle (low 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff +/* enum: guaranteed invalid filter handle (high 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff + +/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */ +#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_EXT_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_OUT/HANDLE */ /***********************************/ @@ -4865,6 +6319,10 @@ #define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0 /* enum: read the list of supported RX filter matches */ #define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1 +/* enum: read flags indicating restrictions on filter insertion for the calling + * client + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2 /* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 @@ -4884,6 +6342,17 @@ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0 #define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61 +/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* bitfield of filter insertion restrictions */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1 + /***********************************/ /* MC_CMD_PARSER_DISP_RW @@ -4901,8 +6370,10 @@ #define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0 /* enum: TX dispatcher CPU */ #define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1 -/* enum: Lookup engine */ +/* enum: Lookup engine (with original metadata format) */ #define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 +/* enum: Lookup engine (with requested metadata format) */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3 /* identifies the type of operation requested */ #define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4 /* enum: read a word of DICPU DMEM or a LUE entry */ @@ -4919,6 +6390,8 @@ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12 /* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16 +/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12 /* value to write (for LUE writes) */ #define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12 #define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20 @@ -5019,7 +6492,9 @@ /* The maximum number of VIs that would be useful */ #define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4 -/* MC_CMD_ALLOC_VIS_OUT msgresponse */ +/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request. + * Use extended version in new code. + */ #define MC_CMD_ALLOC_VIS_OUT_LEN 8 /* The number of VIs allocated on this function */ #define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0 @@ -5028,6 +6503,17 @@ */ #define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4 +/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12 +/* The number of VIs allocated on this function */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4 +/* Function's port vi_shift value (always 0 on Huntington) */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8 + /***********************************/ /* MC_CMD_FREE_VIS @@ -5114,13 +6600,15 @@ #define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0 /* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */ -#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12 /* The number of VIs allocated on this function */ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0 /* The base absolute VI number allocated to this function. Required to * correctly interpret wakeup events. */ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4 +/* Function's port vi_shift value (always 0 on Huntington) */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8 /***********************************/ @@ -5575,6 +7063,7 @@ #define MC_CMD_GET_CAPABILITIES 0xbe #define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_CAPABILITIES_IN msgrequest */ #define MC_CMD_GET_CAPABILITIES_IN_LEN 0 @@ -5582,6 +7071,20 @@ #define MC_CMD_GET_CAPABILITIES_OUT_LEN 20 /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19 #define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20 @@ -5600,8 +7103,14 @@ #define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27 #define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30 #define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1 /* RxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 @@ -5609,6 +7118,10 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ @@ -5632,6 +7145,10 @@ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ @@ -5642,22 +7159,69 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Virtual switching (full feature) RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */ +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Virtual switching (full feature) TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ #define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12 /* Licensed capabilities */ @@ -5735,6 +7299,15 @@ /* the rate in mbps */ #define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4 +/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0 +/* the rate in mbps */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4 +/* the desired maximum fill level */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8 + /* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */ #define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0 @@ -5753,8 +7326,14 @@ #define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0 /* the static priority associated with the txq */ #define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4 -/* bitmask of the priority queues this txq is inserted into */ +/* bitmask of the priority queues this txq is inserted into when inserted. */ #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1 /* the reaction point (RP) bucket */ #define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12 /* an already reserved bucket (typically set to bucket associated with outer @@ -5768,6 +7347,35 @@ /* the min bucket (typically for ETS/minimum bandwidth) */ #define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24 +/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32 +/* the txq id */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0 +/* the static priority associated with the txq */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4 +/* bitmask of the priority queues this txq is inserted into when inserted. */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1 +/* the reaction point (RP) bucket */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12 +/* an already reserved bucket (typically set to bucket associated with outer + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16 +/* an already reserved bucket (typically set to bucket associated with inner + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20 +/* the min bucket (typically for ETS/minimum bandwidth) */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24 +/* the static priority associated with the txq */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28 + /* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */ #define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0 @@ -5826,13 +7434,23 @@ #define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1 /* enum: VEB */ #define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2 -/* enum: VEPA */ +/* enum: VEPA (obsolete) */ #define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3 +/* enum: MUX */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4 +/* enum: Snapper specific; semantics TBD */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5 /* Flags controlling v-port creation */ #define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8 #define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 -/* The number of VLAN tags to support. */ +/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators, + * this must be one or greated, and the attached v-ports must have exactly this + * number of tags. For other v-switch types, this must be zero of greater, and + * is an upper limit on the number of VLAN tags for attached v-ports. An error + * will be returned if existing configuration means we can't support attached + * v-ports with this number of tags. + */ #define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 /* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */ @@ -5892,7 +7510,10 @@ #define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 -/* The number of VLAN tags to insert/remove. */ +/* The number of VLAN tags to insert/remove. An error will be returned if + * incompatible with the number of VLAN tags specified for the upstream + * v-switch. + */ #define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 /* The actual VLAN tags to insert/remove */ #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16 @@ -6136,8 +7757,13 @@ /* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4 -/* The handle of the new RSS context */ +/* The handle of the new RSS context. This should be considered opaque to the + * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid + * handle. + */ #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0 +/* enum: guaranteed invalid RSS context handle value */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff /***********************************/ @@ -6249,7 +7875,11 @@ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 -/* Hash control flags */ +/* Hash control flags. The _EN bits are always supported. The _MODE bits only + * work when the firmware reports ADDITIONAL_RSS_MODES in + * MC_CMD_GET_CAPABILITIES and override the _EN bits if any of them are not 0. + * See the RSS_MODE structure for the meaning of the mode bits. + */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1 @@ -6259,6 +7889,20 @@ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4 /* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0 @@ -6279,7 +7923,12 @@ /* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8 -/* Hash control flags */ +/* Hash control flags. If any _MODE bits are non-zero (which will only be true + * when the firmware reports ADDITIONAL_RSS_MODES) then the _EN bits should be + * disregarded (but are guaranteed to be consistent with the _MODE bits if + * RSS_CONTEXT_SET_FLAGS has never been called for this context since it was + * allocated). + */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1 @@ -6289,6 +7938,20 @@ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4 /***********************************/ @@ -6311,8 +7974,13 @@ /* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */ #define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4 -/* The handle of the new .1p mapping */ +/* The handle of the new .1p mapping. This should be considered opaque to the + * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid + * handle. + */ #define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0 +/* enum: guaranteed invalid .1p mapping handle value */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff /***********************************/ @@ -6421,401 +8089,32 @@ /***********************************/ -/* MC_CMD_RMON_RX_CLASS_STATS - * Retrieve rmon rx class statistics +/* MC_CMD_VPORT_ADD_MAC_ADDRESS + * Add a MAC address to a v-port */ -#define MC_CMD_RMON_RX_CLASS_STATS 0xc3 - -/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63 - +#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8 -/***********************************/ -/* MC_CMD_RMON_TX_CLASS_STATS - * Retrieve rmon tx class statistics - */ -#define MC_CMD_RMON_TX_CLASS_STATS 0xc4 +#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL -/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1 +/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10 +/* The handle of the v-port */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +/* MAC address to add */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4 +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6 -/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63 +/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0 /***********************************/ -/* MC_CMD_RMON_RX_SUPER_CLASS_STATS - * Retrieve rmon rx super_class statistics +/* MC_CMD_VPORT_DEL_MAC_ADDRESS + * Delete a MAC address from a v-port */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5 - -/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63 +#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9 - -/***********************************/ -/* MC_CMD_RMON_TX_SUPER_CLASS_STATS - * Retrieve rmon tx super_class statistics - */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6 - -/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS - * Add qid to class for statistics collection - */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7 - -/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12 -/* class */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 -/* qid */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4 -/* flags */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 - -/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS - * Add qid to class for statistics collection - */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8 - -/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12 -/* class */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 -/* qid */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4 -/* flags */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 - -/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS - * Add qid to class for statistics collection - */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9 - -/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12 -/* class */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 -/* qid */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4 -/* flags */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 - -/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_ALLOC_CLASS - * Allocate an rmon class - */ -#define MC_CMD_RMON_ALLOC_CLASS 0xca - -/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */ -#define MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0 - -/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4 -/* class */ -#define MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0 - - -/***********************************/ -/* MC_CMD_RMON_DEALLOC_CLASS - * Deallocate an rmon class - */ -#define MC_CMD_RMON_DEALLOC_CLASS 0xcb - -/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */ -#define MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4 -/* class */ -#define MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0 - -/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_ALLOC_SUPER_CLASS - * Allocate an rmon super_class - */ -#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc - -/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */ -#define MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0 - -/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4 -/* super_class */ -#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0 - - -/***********************************/ -/* MC_CMD_RMON_DEALLOC_SUPER_CLASS - * Deallocate an rmon tx super_class - */ -#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd - -/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */ -#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4 -/* super_class */ -#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0 - -/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_RX_UP_CONV_STATS - * Retrieve up converter statistics - */ -#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce - -/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_IPI_STATS - * Retrieve rx ipi stats - */ -#define MC_CMD_RMON_RX_IPI_STATS 0xcf - -/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0 -#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5 -#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5 -#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS - * Retrieve rx ipsec cntxt_ptr indexed stats - */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0 - -/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_IPSEC_PORT_STATS - * Retrieve rx ipsec port indexed stats - */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1 - -/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS - * Retrieve tx ipsec overflow - */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2 - -/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_VPORT_ADD_MAC_ADDRESS - * Add a MAC address to a v-port - */ -#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8 - -#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */ -#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10 -/* The handle of the v-port */ -#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0 -/* MAC address to add */ -#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4 -#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6 - -/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */ -#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_VPORT_DEL_MAC_ADDRESS - * Delete a MAC address from a v-port - */ -#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9 - -#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL +#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */ #define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10 @@ -6877,7 +8176,7 @@ #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num)) -/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */ +/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1 @@ -6920,354 +8219,6 @@ #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1 -/***********************************/ -/* MC_CMD_RMON_RX_CLASS_DROPS_STATS - * Retrieve rx class drop stats - */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3 - -/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS - * Retrieve rx super class drop stats - */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4 - -/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_ERRORS_STATS - * Retrieve rxdp errors - */ -#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5 - -/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0 -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11 -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11 -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_OVERFLOW_STATS - * Retrieve rxdp overflow - */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6 - -/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_IPI_STATS - * Retrieve tx ipi stats - */ -#define MC_CMD_RMON_TX_IPI_STATS 0xd7 - -/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0 -#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5 -#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5 -#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS - * Retrieve tx ipsec counters by cntxt_ptr - */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8 - -/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_IPSEC_PORT_STATS - * Retrieve tx ipsec counters by port - */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9 - -/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS - * Retrieve tx ipsec overflow - */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda - -/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_NOWHERE_STATS - * Retrieve tx nowhere stats - */ -#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb - -/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS - * Retrieve tx nowhere qbb stats - */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc - -/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_ERRORS_STATS - * Retrieve rxdp errors - */ -#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd - -/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0 -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11 -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11 -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_OVERFLOW_STATS - * Retrieve rxdp overflow - */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde - -/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_COLLECT_CLASS_STATS - * Explicitly collect class stats at the specified evb port - */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf - -/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4 -/* The port id associated with the vport/pport at which to collect class stats - */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0 - -/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4 -/* class */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0 - - -/***********************************/ -/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS - * Explicitly collect class stats at the specified evb port - */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0 - -/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4 -/* The port id associated with the vport/pport at which to collect class stats - */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0 - -/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4 -/* super_class */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0 - - /***********************************/ /* MC_CMD_GET_CLOCK * Return the system and PDCPU clock frequencies. @@ -7296,22 +8247,66 @@ #define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN /* MC_CMD_SET_CLOCK_IN msgrequest */ -#define MC_CMD_SET_CLOCK_IN_LEN 12 -/* Requested system frequency in MHz; 0 leaves unchanged. */ +#define MC_CMD_SET_CLOCK_IN_LEN 28 +/* Requested frequency in MHz for system clock domain */ #define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0 -/* Requested inter-core frequency in MHz; 0 leaves unchanged. */ +/* enum: Leave the system clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for inter-core clock domain */ #define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4 -/* Request DPCPU frequency in MHz; 0 leaves unchanged. */ +/* enum: Leave the inter-core clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for DPCPU clock domain */ #define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8 +/* enum: Leave the DPCPU clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for PCS clock domain */ +#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12 +/* enum: Leave the PCS clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for MC clock domain */ +#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16 +/* enum: Leave the MC clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for rmon clock domain */ +#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20 +/* enum: Leave the rmon clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for vswitch clock domain */ +#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24 +/* enum: Leave the vswitch clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0 /* MC_CMD_SET_CLOCK_OUT msgresponse */ -#define MC_CMD_SET_CLOCK_OUT_LEN 12 +#define MC_CMD_SET_CLOCK_OUT_LEN 28 /* Resulting system frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0 +/* enum: The system clock domain doesn't exist */ +#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0 /* Resulting inter-core frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4 +/* enum: The inter-core clock domain doesn't exist / isn't used */ +#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0 /* Resulting DPCPU frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8 +/* enum: The dpcpu clock domain doesn't exist */ +#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0 +/* Resulting PCS frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12 +/* enum: The PCS clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0 +/* Resulting MC frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16 +/* enum: The MC clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0 +/* Resulting rmon frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20 +/* enum: The rmon clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0 +/* Resulting vswitch frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24 +/* enum: The vswitch clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0 /***********************************/ @@ -7325,12 +8320,22 @@ /* MC_CMD_DPCPU_RPC_IN msgrequest */ #define MC_CMD_DPCPU_RPC_IN_LEN 36 #define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0 -/* enum: RxDPCPU */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x0 +/* enum: RxDPCPU0 */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0 /* enum: TxDPCPU0 */ #define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1 /* enum: TxDPCPU1 */ #define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2 +/* enum: RxDPCPU1 (Medford only) */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3 +/* enum: RxDPCPU (will be for the calling function; for now, just an alias of + * DPCPU_RX0) + */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80 +/* enum: TxDPCPU (will be for the calling function; for now, just an alias of + * DPCPU_TX0) + */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81 /* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be * initialised to zero */ @@ -7417,6 +8422,25 @@ #define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0 +/***********************************/ +/* MC_CMD_SHMBOOT_OP + * Special operations to support (for now) shmboot. + */ +#define MC_CMD_SHMBOOT_OP 0xe6 + +#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SHMBOOT_OP_IN msgrequest */ +#define MC_CMD_SHMBOOT_OP_IN_LEN 4 +/* Identifies the operation to perform */ +#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0 +/* enum: Copy slave_data section to the slave core. (Greenport only) */ +#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0 + +/* MC_CMD_SHMBOOT_OP_OUT msgresponse */ +#define MC_CMD_SHMBOOT_OP_OUT_LEN 0 + + /***********************************/ /* MC_CMD_CAP_BLK_READ * Read multiple 64bit words from capture block memory @@ -7730,6 +8754,8 @@ * more data is returned. */ #define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6 +/* enum: Read Figure Of Merit (eye quality, higher is better). */ +#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7 /* Align the arguments to 32 bits */ #define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1 #define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3 @@ -7762,20 +8788,32 @@ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 -/* enum: Attenuation (0-15) */ +/* enum: Attenuation (0-15, TBD for Medford) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0 -/* enum: CTLE Boost (0-15) */ +/* enum: CTLE Boost (0-15, TBD for Medford) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1 -/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */ +/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive, TBD + * for Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2 -/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */ +/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive, TBD for + * Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3 -/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */ +/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive, TBD for + * Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4 -/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */ +/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive, TBD for + * Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5 -/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */ +/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive, TBD for + * Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6 +/* enum: Edge DFE DLEV (TBD for Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ @@ -7865,6 +8903,8 @@ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7 /* enum: TX Slew Rate Fine control */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8 +/* enum: TX Termination Impedance control */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ @@ -7955,6 +8995,20 @@ #define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0 #define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126 +/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4 + +/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */ +#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4 +#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0 + /***********************************/ /* MC_CMD_PCIE_TUNE @@ -8224,6 +9278,8 @@ #define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4 /* enum: validate application */ #define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 +/* enum: mask application */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1 /* arguments specific to this particular operation */ #define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8 #define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4 @@ -8258,10 +9314,22 @@ #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4 #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64 +/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12 +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4 +/* flag */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8 + +/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0 + /***********************************/ /* MC_CMD_SET_PORT_SNIFF_CONFIG - * Configure port sniffing for the physical port associated with the calling + * Configure RX port sniffing for the physical port associated with the calling * function. Only a privileged function may change the port sniffing * configuration. A copy of all traffic delivered to the host (non-promiscuous * mode) or all traffic arriving at the port (promiscuous mode) may be @@ -8299,7 +9367,7 @@ /***********************************/ /* MC_CMD_GET_PORT_SNIFF_CONFIG - * Obtain the current port sniffing configuration for the physical port + * Obtain the current RX port sniffing configuration for the physical port * associated with the calling function. Only a privileged function may read * the configuration. */ @@ -8330,4 +9398,673 @@ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 +/***********************************/ +/* MC_CMD_SET_PARSER_DISP_CONFIG + * Change configuration related to the parser-dispatcher subsystem. + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9 + +#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num)) +/* the type of configuration setting to change */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +/* enum: Per-TXQ enable for multicast UDP destination lookup for possible + * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.) + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0 +/* enum: Per-v-adaptor enable for suppression of self-transmissions on the + * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single + * boolean.) + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1 +/* handle for the entity to update: queue handle, EVB port ID, etc. depending + * on the type of configuration setting being changed + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +/* new value: the details depend on the type of configuration setting being + * changed + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61 + +/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PARSER_DISP_CONFIG + * Read configuration related to the parser-dispatcher subsystem. + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa + +#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8 +/* the type of configuration setting to read */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */ +/* handle for the entity to query: queue handle, EVB port ID, etc. depending on + * the type of configuration setting being read + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 + +/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num)) +/* current value: the details depend on the type of configuration setting being + * read + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG + * Configure TX port sniffing for the physical port associated with the calling + * function. Only a privileged function may change the port sniffing + * configuration. A copy of all traffic transmitted through the port may be + * delivered to a specific queue, or a set of queues with RSS. Note that these + * packets are delivered with transmit timestamps in the packet prefix, not + * receive timestamps, so it is likely that the queue(s) will need to be + * dedicated as TX sniff receivers. + */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb + +#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16 +/* configuration flags */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 +/* receive queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 +/* receive mode */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 +/* enum: receive to just the specified queue */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note + * that these handles should be considered opaque to the host, although a value + * of 0xFFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 + +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG + * Obtain the current TX port sniffing configuration for the physical port + * associated with the calling function. Only a privileged function may read + * the configuration. + */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc + +#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0 + +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16 +/* configuration flags */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 +/* receiving queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 +/* receive mode */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 +/* enum: receiving to just the specified queue */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 +/* enum: receiving to multiple queues using RSS context */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 + + +/***********************************/ +/* MC_CMD_RMON_STATS_RX_ERRORS + * Per queue rx error stats. + */ +#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe + +#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */ +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8 +/* The rx queue to get stats for. */ +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */ +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12 + + +/***********************************/ +/* MC_CMD_GET_PCIE_RESOURCE_INFO + * Find out about available PCIE resources + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd + +/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0 + +/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28 +/* The maximum number of PFs the device can expose */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0 +/* The maximum number of VFs the device can expose in total */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4 +/* The maximum number of MSI-X vectors the device can provide in total */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8 +/* the number of MSI-X vectors the device will allocate by default to each PF + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12 +/* the number of MSI-X vectors the device will allocate by default to each VF + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16 +/* the maximum number of MSI-X vectors the device can allocate to any one PF */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20 +/* the maximum number of MSI-X vectors the device can allocate to any one VF */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24 + + +/***********************************/ +/* MC_CMD_GET_PORT_MODES + * Find out about available port modes + */ +#define MC_CMD_GET_PORT_MODES 0xff + +#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PORT_MODES_IN msgrequest */ +#define MC_CMD_GET_PORT_MODES_IN_LEN 0 + +/* MC_CMD_GET_PORT_MODES_OUT msgresponse */ +#define MC_CMD_GET_PORT_MODES_OUT_LEN 12 +/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */ +#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0 +/* Default (canonical) board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4 +/* Current board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8 + + +/***********************************/ +/* MC_CMD_READ_ATB + * Sample voltages on the ATB + */ +#define MC_CMD_READ_ATB 0x100 + +#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_READ_ATB_IN msgrequest */ +#define MC_CMD_READ_ATB_IN_LEN 16 +#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0 +#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */ +#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4 +#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8 +#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12 + +/* MC_CMD_READ_ATB_OUT msgresponse */ +#define MC_CMD_READ_ATB_OUT_LEN 4 +#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0 + + +/***********************************/ +/* MC_CMD_GET_WORKAROUNDS + * Read the list of all implemented and all currently enabled workarounds. The + * enums here must correspond with those in MC_CMD_WORKAROUND. + */ +#define MC_CMD_GET_WORKAROUNDS 0x59 + +#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */ +#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8 +/* Each workaround is represented by a single bit according to the enums below. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0 +#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4 +/* enum: Bug 17230 work around. */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2 +/* enum: Bug 35388 work around (unsafe EVQ writes). */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4 +/* enum: Bug35017 workaround (A64 tables must be identity map) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40 + + +/***********************************/ +/* MC_CMD_PRIVILEGE_MASK + * Read/set privileges of an arbitrary PCIe function + */ +#define MC_CMD_PRIVILEGE_MASK 0x5a + +#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */ +#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8 +/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF + * 1,3 = 0x00030001 + */ +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */ +/* New privilege mask to be set. The mask will only be changed if the MSB is + * set to 1. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */ +/* enum: Set this bit to indicate that a new privilege mask is to be set, + * otherwise the command will only read the existing mask. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000 + +/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */ +#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4 +/* For an admin function, always all the privileges are reported. */ +#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0 + + +/***********************************/ +/* MC_CMD_LINK_STATE_MODE + * Read/set link state mode of a VF + */ +#define MC_CMD_LINK_STATE_MODE 0x5c + +#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LINK_STATE_MODE_IN msgrequest */ +#define MC_CMD_LINK_STATE_MODE_IN_LEN 8 +/* The target function to have its link state mode read or set, must be a VF + * e.g. VF 1,3 = 0x00030001 + */ +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16 +/* New link state mode to be set */ +#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4 +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ +/* enum: Use this value to just read the existing setting without modifying it. + */ +#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff + +/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */ +#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4 +#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0 + + +/***********************************/ +/* MC_CMD_GET_SNAPSHOT_LENGTH + * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH + * parameter to MC_CMD_INIT_RXQ. + */ +#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101 + +#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0 + +/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8 +/* Minimum acceptable snapshot length. */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0 +/* Maximum acceptable snapshot length. */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4 + + +/***********************************/ +/* MC_CMD_FUSE_DIAGS + * Additional fuse diagnostics + */ +#define MC_CMD_FUSE_DIAGS 0x102 + +#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_FUSE_DIAGS_IN msgrequest */ +#define MC_CMD_FUSE_DIAGS_IN_LEN 0 + +/* MC_CMD_FUSE_DIAGS_OUT msgresponse */ +#define MC_CMD_FUSE_DIAGS_OUT_LEN 48 +/* Total number of mismatched bits between pairs in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0 +/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4 +/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8 +/* Checksum of data after logical OR of pairs in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12 +/* Total number of mismatched bits between pairs in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16 +/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20 +/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24 +/* Checksum of data after logical OR of pairs in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28 +/* Total number of mismatched bits between pairs in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32 +/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36 +/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40 +/* Checksum of data after logical OR of pairs in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44 + + +/***********************************/ +/* MC_CMD_PRIVILEGE_MODIFY + * Modify the privileges of a set of PCIe functions. Note that this operation + * only effects non-admin functions unless the admin privilege itself is + * included in one of the masks provided. + */ +#define MC_CMD_PRIVILEGE_MODIFY 0x60 + +#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16 +/* The groups of functions to have their privilege masks modified. */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0 +#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */ +/* For VFS_OF_PF specify the PF, for ONE specify the target function */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16 +/* Privileges to be added to the target functions. For privilege definitions + * refer to the command MC_CMD_PRIVILEGE_MASK + */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8 +/* Privileges to be removed from the target functions. For privilege + * definitions refer to the command MC_CMD_PRIVILEGE_MASK + */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12 + +/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */ +#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_READ_BYTES + * Read XPM memory + */ +#define MC_CMD_XPM_READ_BYTES 0x103 + +#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_READ_BYTES_IN msgrequest */ +#define MC_CMD_XPM_READ_BYTES_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0 +/* Count (bytes) */ +#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4 + +/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */ +#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0 +#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252 +#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num)) +/* Data */ +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_BYTES + * Write XPM memory + */ +#define MC_CMD_XPM_WRITE_BYTES 0x104 + +#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */ +#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8 +#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252 +#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num)) +/* Start address (byte) */ +#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0 +/* Count (bytes) */ +#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4 +/* Data */ +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244 + +/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_READ_SECTOR + * Read XPM sector + */ +#define MC_CMD_XPM_READ_SECTOR 0x105 + +#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8 +/* Sector index */ +#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0 +/* Sector size */ +#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4 + +/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4 +#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36 +#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num)) +/* Sector type */ +#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0 +#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */ +/* Sector data */ +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_SECTOR + * Write XPM sector + */ +#define MC_CMD_XPM_WRITE_SECTOR 0x106 + +#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12 +#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44 +#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num)) +/* If writing fails due to an uncorrectable error, try up to RETRIES following + * sectors (or until no more space available). If 0, only one write attempt is + * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair + * mechanism. + */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3 +/* Sector type */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4 +/* Enum values, see field(s): */ +/* MC_CMD_XPM_READ_SECTOR_OUT/TYPE */ +/* Sector size */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8 +/* Sector data */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32 + +/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4 +/* New sector index */ +#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0 + + +/***********************************/ +/* MC_CMD_XPM_INVALIDATE_SECTOR + * Invalidate XPM sector + */ +#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107 + +#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4 +/* Sector index */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0 + +/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_BLANK_CHECK + * Blank-check XPM memory and report bad locations + */ +#define MC_CMD_XPM_BLANK_CHECK 0x108 + +#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */ +#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0 +/* Count (bytes) */ +#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4 + +/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4 +#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252 +#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num)) +/* Total number of bad (non-blank) locations */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0 +/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit + * into MCDI response) + */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124 + + +/***********************************/ +/* MC_CMD_XPM_REPAIR + * Blank-check and repair XPM memory + */ +#define MC_CMD_XPM_REPAIR 0x109 + +#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_REPAIR_IN msgrequest */ +#define MC_CMD_XPM_REPAIR_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0 +/* Count (bytes) */ +#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4 + +/* MC_CMD_XPM_REPAIR_OUT msgresponse */ +#define MC_CMD_XPM_REPAIR_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_DECODER_TEST + * Test XPM memory address decoders for gross manufacturing defects. Can only + * be performed on an unprogrammed part. + */ +#define MC_CMD_XPM_DECODER_TEST 0x10a + +#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */ +#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0 + +/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */ +#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_TEST + * XPM memory write test. Test XPM write logic for gross manufacturing defects + * by writing to a dedicated test row. There are 16 locations in the test row + * and the test can only be performed on locations that have not been + * previously used (i.e. can be run at most 16 times). The test will pick the + * first available location to use, or fail with ENOSPC if none left. + */ +#define MC_CMD_XPM_WRITE_TEST 0x10b + +#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */ +#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0 + +/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0 + + #endif /* MCDI_PCOL_H */ diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 47d1e3a96..24038ef96 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -219,6 +219,7 @@ struct efx_tx_buffer { * @tso_packets: Number of packets via the TSO xmit path * @pushes: Number of times the TX push feature has been used * @pio_packets: Number of times the TX PIO feature has been used + * @xmit_more_available: Are any packets waiting to be pushed to the NIC * @empty_read_count: If the completion path has seen the queue as empty * and the transmission path has not yet checked this, the value of * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. @@ -253,6 +254,7 @@ struct efx_tx_queue { unsigned int tso_packets; unsigned int pushes; unsigned int pio_packets; + bool xmit_more_available; /* Statistics to supplement MAC stats */ unsigned long tx_packets; @@ -925,6 +927,7 @@ struct vfdi_status; * @stats_lock: Statistics update lock. Must be held when calling * efx_nic_type::{update,start,stop}_stats. * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb + * @mc_promisc: Whether in multicast promiscuous mode when last changed * * This is stored in the private area of the &struct net_device. */ @@ -971,6 +974,7 @@ struct efx_nic { unsigned next_buffer_table; unsigned int max_channels; + unsigned int max_tx_channels; unsigned n_channels; unsigned n_rx_channels; unsigned rss_spread; @@ -1072,6 +1076,7 @@ struct efx_nic { int last_irq_cpu; spinlock_t stats_lock; atomic_t n_rx_noskb_drops; + bool mc_promisc; }; static inline int efx_dev_registered(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 31ff9084d..0b536e27d 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -506,6 +506,7 @@ enum { * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared * @stats: Hardware statistics * @workaround_35388: Flag: firmware supports workaround for bug 35388 + * @workaround_26807: Flag: firmware supports workaround for bug 26807 * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated * after MC reboot * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of @@ -535,6 +536,7 @@ struct efx_ef10_nic_data { bool rx_rss_context_exclusive; u64 stats[EF10_STAT_COUNT]; bool workaround_35388; + bool workaround_26807; bool must_check_datapath_caps; u32 datapath_caps; unsigned int rx_dpcpu_fw_id; diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index b605dfd5c..9d78830da 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -114,7 +114,10 @@ static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) if (efx->type->test_nvram) { rc = efx->type->test_nvram(efx); - tests->nvram = rc ? -1 : 1; + if (rc == -EPERM) + rc = 0; + else + tests->nvram = rc ? -1 : 1; } return rc; @@ -253,6 +256,12 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, mutex_lock(&efx->mac_lock); rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags); mutex_unlock(&efx->mac_lock); + if (rc == -EPERM) + rc = 0; + else + netif_info(efx, drv, efx->net_dev, + "%s phy selftest\n", rc ? "Failed" : "Passed"); + return rc; } @@ -661,6 +670,9 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, wmb(); kfree(state); + if (rc == -EPERM) + rc = 0; + return rc; } diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index b323b9167..2219b5424 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -262,6 +262,7 @@ static int siena_probe_nic(struct efx_nic *efx) } efx->max_channels = EFX_MAX_CHANNELS; + efx->max_tx_channels = EFX_MAX_CHANNELS; efx_reado(efx, ®, FR_AZ_CS_DEBUG); efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; @@ -1042,9 +1043,5 @@ const struct efx_nic_type siena_a0_nic_type = { .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE | 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT | - 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC | - 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ | - 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT | - 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC | - 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ), + 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT), }; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 1833a0146..67f6afaa0 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -431,8 +431,20 @@ finish_packet: efx_tx_maybe_stop_queue(tx_queue); /* Pass off to hardware */ - if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) + if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) { + struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); + + /* There could be packets left on the partner queue if those + * SKBs had skb->xmit_more set. If we do not push those they + * could be left for a long time and cause a netdev watchdog. + */ + if (txq2->xmit_more_available) + efx_nic_push_buffers(txq2); + efx_nic_push_buffers(tx_queue); + } else { + tx_queue->xmit_more_available = skb->xmit_more; + } tx_queue->tx_packets++; @@ -722,6 +734,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) tx_queue->read_count = 0; tx_queue->old_read_count = 0; tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; + tx_queue->xmit_more_available = false; /* Set up TX descriptor ring */ efx_nic_init_tx(tx_queue); @@ -747,6 +760,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) ++tx_queue->read_count; } + tx_queue->xmit_more_available = false; netdev_tx_reset_queue(tx_queue->core_txq); } @@ -1302,8 +1316,20 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, efx_tx_maybe_stop_queue(tx_queue); /* Pass off to hardware */ - if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) + if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) { + struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); + + /* There could be packets left on the partner queue if those + * SKBs had skb->xmit_more set. If we do not push those they + * could be left for a long time and cause a netdev watchdog. + */ + if (txq2->xmit_more_available) + efx_nic_push_buffers(txq2); + efx_nic_push_buffers(tx_queue); + } else { + tx_queue->xmit_more_available = skb->xmit_more; + } tx_queue->tso_bursts++; return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c index 67d9fdeed..664f59697 100644 --- a/drivers/net/ethernet/smsc/smc9194.c +++ b/drivers/net/ethernet/smsc/smc9194.c @@ -1031,36 +1031,8 @@ err_out: static void print_packet( byte * buf, int length ) { #if 0 - int i; - int remainder; - int lines; - - pr_dbg("Packet of length %d\n", length); - lines = length / 16; - remainder = length % 16; - - for ( i = 0; i < lines ; i ++ ) { - int cur; - - printk(KERN_DEBUG); - for ( cur = 0; cur < 8; cur ++ ) { - byte a, b; - - a = *(buf ++ ); - b = *(buf ++ ); - pr_cont("%02x%02x ", a, b); - } - pr_cont("\n"); - } - printk(KERN_DEBUG); - for ( i = 0; i < remainder/2 ; i++ ) { - byte a, b; - - a = *(buf ++ ); - b = *(buf ++ ); - pr_cont("%02x%02x ", a, b); - } - pr_cont("\n"); + print_hex_dump_debug(DRV_NAME, DUMP_PREFIX_OFFSET, 16, 1, + buf, length, true); #endif } #endif diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 959aeeade..3b4cd8a26 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -59,7 +59,9 @@ #include #include #include +#include #include +#include #include "smsc911x.h" @@ -2362,59 +2364,50 @@ static const struct smsc911x_ops shifted_smsc911x_ops = { .tx_writefifo = smsc911x_tx_writefifo_shift, }; -#ifdef CONFIG_OF -static int smsc911x_probe_config_dt(struct smsc911x_platform_config *config, - struct device_node *np) +static int smsc911x_probe_config(struct smsc911x_platform_config *config, + struct device *dev) { - const char *mac; + int phy_interface; u32 width = 0; + int err; - if (!np) - return -ENODEV; - - config->phy_interface = of_get_phy_mode(np); + phy_interface = device_get_phy_mode(dev); + if (phy_interface < 0) + phy_interface = PHY_INTERFACE_MODE_NA; + config->phy_interface = phy_interface; - mac = of_get_mac_address(np); - if (mac) - memcpy(config->mac, mac, ETH_ALEN); + device_get_mac_address(dev, config->mac, ETH_ALEN); - of_property_read_u32(np, "reg-shift", &config->shift); - - of_property_read_u32(np, "reg-io-width", &width); - if (width == 4) + err = device_property_read_u32(dev, "reg-io-width", &width); + if (err == -ENXIO) + return err; + if (!err && width == 4) config->flags |= SMSC911X_USE_32BIT; else config->flags |= SMSC911X_USE_16BIT; - if (of_get_property(np, "smsc,irq-active-high", NULL)) + device_property_read_u32(dev, "reg-shift", &config->shift); + + if (device_property_present(dev, "smsc,irq-active-high")) config->irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH; - if (of_get_property(np, "smsc,irq-push-pull", NULL)) + if (device_property_present(dev, "smsc,irq-push-pull")) config->irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL; - if (of_get_property(np, "smsc,force-internal-phy", NULL)) + if (device_property_present(dev, "smsc,force-internal-phy")) config->flags |= SMSC911X_FORCE_INTERNAL_PHY; - if (of_get_property(np, "smsc,force-external-phy", NULL)) + if (device_property_present(dev, "smsc,force-external-phy")) config->flags |= SMSC911X_FORCE_EXTERNAL_PHY; - if (of_get_property(np, "smsc,save-mac-address", NULL)) + if (device_property_present(dev, "smsc,save-mac-address")) config->flags |= SMSC911X_SAVE_MAC_ADDRESS; return 0; } -#else -static inline int smsc911x_probe_config_dt( - struct smsc911x_platform_config *config, - struct device_node *np) -{ - return -ENODEV; -} -#endif /* CONFIG_OF */ static int smsc911x_drv_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; struct net_device *dev; struct smsc911x_data *pdata; struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev); @@ -2435,7 +2428,10 @@ static int smsc911x_drv_probe(struct platform_device *pdev) res_size = resource_size(res); irq = platform_get_irq(pdev, 0); - if (irq <= 0) { + if (irq == -EPROBE_DEFER) { + retval = -EPROBE_DEFER; + goto out_0; + } else if (irq <= 0) { pr_warn("Could not allocate irq resource\n"); retval = -ENODEV; goto out_0; @@ -2478,7 +2474,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev) goto out_disable_resources; } - retval = smsc911x_probe_config_dt(&pdata->config, np); + retval = smsc911x_probe_config(&pdata->config, &pdev->dev); if (retval && config) { /* copy config parameters across to pdata */ memcpy(&pdata->config, config, sizeof(pdata->config)); @@ -2654,6 +2650,12 @@ static const struct of_device_id smsc911x_dt_ids[] = { MODULE_DEVICE_TABLE(of, smsc911x_dt_ids); #endif +static const struct acpi_device_id smsc911x_acpi_match[] = { + { "ARMH9118", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, smsc911x_acpi_match); + static struct platform_driver smsc911x_driver = { .probe = smsc911x_drv_probe, .remove = smsc911x_drv_remove, @@ -2661,6 +2663,7 @@ static struct platform_driver smsc911x_driver = { .name = SMSC_CHIPNAME, .pm = SMSC911X_PM_OPS, .of_match_table = of_match_ptr(smsc911x_dt_ids), + .acpi_match_table = ACPI_PTR(smsc911x_acpi_match), }, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c index e817a1a44..b1e5f2470 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c @@ -16,6 +16,46 @@ #include "stmmac.h" #include "stmmac_platform.h" +static int dwmac_generic_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + int ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + if (pdev->dev.of_node) { + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) { + dev_err(&pdev->dev, "dt configuration failed\n"); + return PTR_ERR(plat_dat); + } + } else { + plat_dat = dev_get_platdata(&pdev->dev); + if (!plat_dat) { + dev_err(&pdev->dev, "no platform data provided\n"); + return -EINVAL; + } + + /* Set default value for multicast hash bins */ + plat_dat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat_dat->unicast_filter_entries = 1; + } + + /* Custom initialisation (if needed) */ + if (plat_dat->init) { + ret = plat_dat->init(pdev, plat_dat->bsp_priv); + if (ret) + return ret; + } + + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); +} + static const struct of_device_id dwmac_generic_match[] = { { .compatible = "st,spear600-gmac"}, { .compatible = "snps,dwmac-3.610"}, @@ -27,7 +67,7 @@ static const struct of_device_id dwmac_generic_match[] = { MODULE_DEVICE_TABLE(of, dwmac_generic_match); static struct platform_driver dwmac_generic_driver = { - .probe = stmmac_pltfr_probe, + .probe = dwmac_generic_probe, .remove = stmmac_pltfr_remove, .driver = { .name = STMMAC_RESOURCE_NAME, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index f0e4bb4e3..9d89bdbf0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -248,23 +248,40 @@ static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac) return NULL; } -static void *ipq806x_gmac_setup(struct platform_device *pdev) +static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed) +{ + struct ipq806x_gmac *gmac = priv; + + ipq806x_gmac_set_speed(gmac, speed); +} + +static int ipq806x_gmac_probe(struct platform_device *pdev) { + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; struct device *dev = &pdev->dev; struct ipq806x_gmac *gmac; int val; void *err; + val = stmmac_get_platform_resources(pdev, &stmmac_res); + if (val) + return val; + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); if (!gmac) - return ERR_PTR(-ENOMEM); + return -ENOMEM; gmac->pdev = pdev; err = ipq806x_gmac_of_parse(gmac); - if (err) { + if (IS_ERR(err)) { dev_err(dev, "device tree parsing error\n"); - return err; + return PTR_ERR(err); } regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL, @@ -285,7 +302,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev) default: dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", phy_modes(gmac->phy_mode)); - return NULL; + return -EINVAL; } regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val); @@ -304,7 +321,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev) default: dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", phy_modes(gmac->phy_mode)); - return NULL; + return -EINVAL; } regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val); @@ -327,30 +344,21 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev) 0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET); } - return gmac; -} + plat_dat->has_gmac = true; + plat_dat->bsp_priv = gmac; + plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed; -static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed) -{ - struct ipq806x_gmac *gmac = priv; - - ipq806x_gmac_set_speed(gmac, speed); + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } -static const struct stmmac_of_data ipq806x_gmac_data = { - .has_gmac = 1, - .setup = ipq806x_gmac_setup, - .fix_mac_speed = ipq806x_gmac_fix_mac_speed, -}; - static const struct of_device_id ipq806x_gmac_dwmac_match[] = { - { .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data }, + { .compatible = "qcom,ipq806x-gmac" }, { } }; MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match); static struct platform_driver ipq806x_gmac_dwmac_driver = { - .probe = stmmac_pltfr_probe, + .probe = ipq806x_gmac_probe, .remove = stmmac_pltfr_remove, .driver = { .name = "ipq806x-gmac-dwmac", diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c index cb888d3eb..78e9d1861 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c @@ -25,66 +25,53 @@ # define LPC18XX_CREG_CREG6_ETHMODE_MII 0x0 # define LPC18XX_CREG_CREG6_ETHMODE_RMII 0x4 -struct lpc18xx_dwmac_priv_data { +static int lpc18xx_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; struct regmap *reg; - int interface; -}; + u8 ethmode; + int ret; -static void *lpc18xx_dwmac_setup(struct platform_device *pdev) -{ - struct lpc18xx_dwmac_priv_data *dwmac; + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; - dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) - return ERR_PTR(-ENOMEM); + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); - dwmac->interface = of_get_phy_mode(pdev->dev.of_node); - if (dwmac->interface < 0) - return ERR_PTR(dwmac->interface); + plat_dat->has_gmac = true; - dwmac->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg"); - if (IS_ERR(dwmac->reg)) { - dev_err(&pdev->dev, "Syscon lookup failed\n"); - return dwmac->reg; + reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg"); + if (IS_ERR(reg)) { + dev_err(&pdev->dev, "syscon lookup failed\n"); + return PTR_ERR(reg); } - return dwmac; -} - -static int lpc18xx_dwmac_init(struct platform_device *pdev, void *priv) -{ - struct lpc18xx_dwmac_priv_data *dwmac = priv; - u8 ethmode; - - if (dwmac->interface == PHY_INTERFACE_MODE_MII) { + if (plat_dat->interface == PHY_INTERFACE_MODE_MII) { ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII; - } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) { + } else if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) { ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII; } else { dev_err(&pdev->dev, "Only MII and RMII mode supported\n"); return -EINVAL; } - regmap_update_bits(dwmac->reg, LPC18XX_CREG_CREG6, + regmap_update_bits(reg, LPC18XX_CREG_CREG6, LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode); - return 0; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } -static const struct stmmac_of_data lpc18xx_dwmac_data = { - .has_gmac = 1, - .setup = lpc18xx_dwmac_setup, - .init = lpc18xx_dwmac_init, -}; - static const struct of_device_id lpc18xx_dwmac_match[] = { - { .compatible = "nxp,lpc1850-dwmac", .data = &lpc18xx_dwmac_data }, + { .compatible = "nxp,lpc1850-dwmac" }, { } }; MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match); static struct platform_driver lpc18xx_dwmac_driver = { - .probe = stmmac_pltfr_probe, + .probe = lpc18xx_dwmac_probe, .remove = stmmac_pltfr_remove, .driver = { .name = "lpc18xx-dwmac", diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c index 61a324a87..c1bac1912 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c @@ -47,36 +47,45 @@ static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed) writel(val, dwmac->reg); } -static void *meson6_dwmac_setup(struct platform_device *pdev) +static int meson6_dwmac_probe(struct platform_device *pdev) { + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; struct meson_dwmac *dwmac; struct resource *res; + int ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); if (!dwmac) - return ERR_PTR(-ENOMEM); + return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); dwmac->reg = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dwmac->reg)) - return ERR_CAST(dwmac->reg); + return PTR_ERR(dwmac->reg); - return dwmac; -} + plat_dat->bsp_priv = dwmac; + plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed; -static const struct stmmac_of_data meson6_dwmac_data = { - .setup = meson6_dwmac_setup, - .fix_mac_speed = meson6_dwmac_fix_mac_speed, -}; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); +} static const struct of_device_id meson6_dwmac_match[] = { - { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data}, + { .compatible = "amlogic,meson6-dwmac" }, { } }; MODULE_DEVICE_TABLE(of, meson6_dwmac_match); static struct platform_driver meson6_dwmac_driver = { - .probe = stmmac_pltfr_probe, + .probe = meson6_dwmac_probe, .remove = stmmac_pltfr_remove, .driver = { .name = "meson6-dwmac", diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 00a1e1e09..11baa4b19 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -46,7 +46,7 @@ struct rk_priv_data { struct platform_device *pdev; int phy_iface; struct regulator *regulator; - struct rk_gmac_ops *ops; + const struct rk_gmac_ops *ops; bool clk_enabled; bool clock_input; @@ -177,7 +177,7 @@ static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) } } -struct rk_gmac_ops rk3288_ops = { +static const struct rk_gmac_ops rk3288_ops = { .set_to_rgmii = rk3288_set_to_rgmii, .set_to_rmii = rk3288_set_to_rmii, .set_rgmii_speed = rk3288_set_rgmii_speed, @@ -289,7 +289,7 @@ static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) } } -struct rk_gmac_ops rk3368_ops = { +static const struct rk_gmac_ops rk3368_ops = { .set_to_rgmii = rk3368_set_to_rgmii, .set_to_rmii = rk3368_set_to_rmii, .set_rgmii_speed = rk3368_set_rgmii_speed, @@ -448,7 +448,7 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable) } static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev, - struct rk_gmac_ops *ops) + const struct rk_gmac_ops *ops) { struct rk_priv_data *bsp_priv; struct device *dev = &pdev->dev; @@ -529,16 +529,6 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev, return bsp_priv; } -static void *rk3288_gmac_setup(struct platform_device *pdev) -{ - return rk_gmac_setup(pdev, &rk3288_ops); -} - -static void *rk3368_gmac_setup(struct platform_device *pdev) -{ - return rk_gmac_setup(pdev, &rk3368_ops); -} - static int rk_gmac_init(struct platform_device *pdev, void *priv) { struct rk_priv_data *bsp_priv = priv; @@ -576,31 +566,52 @@ static void rk_fix_speed(void *priv, unsigned int speed) dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface); } -static const struct stmmac_of_data rk3288_gmac_data = { - .has_gmac = 1, - .fix_mac_speed = rk_fix_speed, - .setup = rk3288_gmac_setup, - .init = rk_gmac_init, - .exit = rk_gmac_exit, -}; +static int rk_gmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + const struct rk_gmac_ops *data; + int ret; -static const struct stmmac_of_data rk3368_gmac_data = { - .has_gmac = 1, - .fix_mac_speed = rk_fix_speed, - .setup = rk3368_gmac_setup, - .init = rk_gmac_init, - .exit = rk_gmac_exit, -}; + data = of_device_get_match_data(&pdev->dev); + if (!data) { + dev_err(&pdev->dev, "no of match data provided\n"); + return -EINVAL; + } + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + + plat_dat->has_gmac = true; + plat_dat->init = rk_gmac_init; + plat_dat->exit = rk_gmac_exit; + plat_dat->fix_mac_speed = rk_fix_speed; + + plat_dat->bsp_priv = rk_gmac_setup(pdev, data); + if (IS_ERR(plat_dat->bsp_priv)) + return PTR_ERR(plat_dat->bsp_priv); + + ret = rk_gmac_init(pdev, plat_dat->bsp_priv); + if (ret) + return ret; + + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); +} static const struct of_device_id rk_gmac_dwmac_match[] = { - { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data}, - { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_gmac_data}, + { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, + { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops }, { } }; MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match); static struct platform_driver rk_gmac_dwmac_driver = { - .probe = stmmac_pltfr_probe, + .probe = rk_gmac_probe, .remove = stmmac_pltfr_remove, .driver = { .name = "rk_gmac-dwmac", diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 8141c5b84..401383b25 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -175,31 +175,6 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) return 0; } -static void *socfpga_dwmac_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - int ret; - struct socfpga_dwmac *dwmac; - - dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) - return ERR_PTR(-ENOMEM); - - ret = socfpga_dwmac_parse_data(dwmac, dev); - if (ret) { - dev_err(dev, "Unable to parse OF data\n"); - return ERR_PTR(ret); - } - - ret = socfpga_dwmac_setup(dwmac); - if (ret) { - dev_err(dev, "couldn't setup SoC glue (%d)\n", ret); - return ERR_PTR(ret); - } - - return dwmac; -} - static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv) { struct socfpga_dwmac *dwmac = priv; @@ -257,21 +232,58 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv) return ret; } -static const struct stmmac_of_data socfpga_gmac_data = { - .setup = socfpga_dwmac_probe, - .init = socfpga_dwmac_init, - .exit = socfpga_dwmac_exit, - .fix_mac_speed = socfpga_dwmac_fix_mac_speed, -}; +static int socfpga_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct device *dev = &pdev->dev; + int ret; + struct socfpga_dwmac *dwmac; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + + dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL); + if (!dwmac) + return -ENOMEM; + + ret = socfpga_dwmac_parse_data(dwmac, dev); + if (ret) { + dev_err(dev, "Unable to parse OF data\n"); + return ret; + } + + ret = socfpga_dwmac_setup(dwmac); + if (ret) { + dev_err(dev, "couldn't setup SoC glue (%d)\n", ret); + return ret; + } + + plat_dat->bsp_priv = dwmac; + plat_dat->init = socfpga_dwmac_init; + plat_dat->exit = socfpga_dwmac_exit; + plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; + + ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv); + if (ret) + return ret; + + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); +} static const struct of_device_id socfpga_dwmac_match[] = { - { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data }, + { .compatible = "altr,socfpga-stmmac" }, { } }; MODULE_DEVICE_TABLE(of, socfpga_dwmac_match); static struct platform_driver socfpga_dwmac_driver = { - .probe = stmmac_pltfr_probe, + .probe = socfpga_dwmac_probe, .remove = stmmac_pltfr_remove, .driver = { .name = "socfpga-dwmac", diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index a2e8111c5..7f6f4a4fc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include "stmmac_platform.h" @@ -128,6 +129,11 @@ struct sti_dwmac { struct device *dev; struct regmap *regmap; u32 speed; + void (*fix_retime_src)(void *priv, unsigned int speed); +}; + +struct sti_dwmac_of_data { + void (*fix_retime_src)(void *priv, unsigned int speed); }; static u32 phy_intf_sels[] = { @@ -222,8 +228,9 @@ static void stid127_fix_retime_src(void *priv, u32 spd) regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val); } -static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac) +static int sti_dwmac_init(struct platform_device *pdev, void *priv) { + struct sti_dwmac *dwmac = priv; struct regmap *regmap = dwmac->regmap; int iface = dwmac->interface; struct device *dev = dwmac->dev; @@ -241,28 +248,8 @@ static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac) val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII; regmap_update_bits(regmap, reg, ENMII_MASK, val); -} - -static int stix4xx_init(struct platform_device *pdev, void *priv) -{ - struct sti_dwmac *dwmac = priv; - u32 spd = dwmac->speed; - - sti_dwmac_ctrl_init(dwmac); - - stih4xx_fix_retime_src(priv, spd); - - return 0; -} -static int stid127_init(struct platform_device *pdev, void *priv) -{ - struct sti_dwmac *dwmac = priv; - u32 spd = dwmac->speed; - - sti_dwmac_ctrl_init(dwmac); - - stid127_fix_retime_src(priv, spd); + dwmac->fix_retime_src(priv, dwmac->speed); return 0; } @@ -334,36 +321,58 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, return 0; } -static void *sti_dwmac_setup(struct platform_device *pdev) +static int sti_dwmac_probe(struct platform_device *pdev) { + struct plat_stmmacenet_data *plat_dat; + const struct sti_dwmac_of_data *data; + struct stmmac_resources stmmac_res; struct sti_dwmac *dwmac; int ret; + data = of_device_get_match_data(&pdev->dev); + if (!data) { + dev_err(&pdev->dev, "No OF match data provided\n"); + return -EINVAL; + } + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); if (!dwmac) - return ERR_PTR(-ENOMEM); + return -ENOMEM; ret = sti_dwmac_parse_data(dwmac, pdev); if (ret) { dev_err(&pdev->dev, "Unable to parse OF data\n"); - return ERR_PTR(ret); + return ret; } - return dwmac; + dwmac->fix_retime_src = data->fix_retime_src; + + plat_dat->bsp_priv = dwmac; + plat_dat->init = sti_dwmac_init; + plat_dat->exit = sti_dwmac_exit; + plat_dat->fix_mac_speed = data->fix_retime_src; + + ret = sti_dwmac_init(pdev, plat_dat->bsp_priv); + if (ret) + return ret; + + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } -static const struct stmmac_of_data stih4xx_dwmac_data = { - .fix_mac_speed = stih4xx_fix_retime_src, - .setup = sti_dwmac_setup, - .init = stix4xx_init, - .exit = sti_dwmac_exit, +static const struct sti_dwmac_of_data stih4xx_dwmac_data = { + .fix_retime_src = stih4xx_fix_retime_src, }; -static const struct stmmac_of_data stid127_dwmac_data = { - .fix_mac_speed = stid127_fix_retime_src, - .setup = sti_dwmac_setup, - .init = stid127_init, - .exit = sti_dwmac_exit, +static const struct sti_dwmac_of_data stid127_dwmac_data = { + .fix_retime_src = stid127_fix_retime_src, }; static const struct of_device_id sti_dwmac_match[] = { @@ -376,7 +385,7 @@ static const struct of_device_id sti_dwmac_match[] = { MODULE_DEVICE_TABLE(of, sti_dwmac_match); static struct platform_driver sti_dwmac_driver = { - .probe = stmmac_pltfr_probe, + .probe = sti_dwmac_probe, .remove = stmmac_pltfr_remove, .driver = { .name = "sti-dwmac", diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 15048ca39..52b8ed9bd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -33,35 +33,6 @@ struct sunxi_priv_data { struct regulator *regulator; }; -static void *sun7i_gmac_setup(struct platform_device *pdev) -{ - struct sunxi_priv_data *gmac; - struct device *dev = &pdev->dev; - - gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); - if (!gmac) - return ERR_PTR(-ENOMEM); - - gmac->interface = of_get_phy_mode(dev->of_node); - - gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx"); - if (IS_ERR(gmac->tx_clk)) { - dev_err(dev, "could not get tx clock\n"); - return gmac->tx_clk; - } - - /* Optional regulator for PHY */ - gmac->regulator = devm_regulator_get_optional(dev, "phy"); - if (IS_ERR(gmac->regulator)) { - if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) - return ERR_PTR(-EPROBE_DEFER); - dev_info(dev, "no regulator found\n"); - gmac->regulator = NULL; - } - - return gmac; -} - #define SUN7I_GMAC_GMII_RGMII_RATE 125000000 #define SUN7I_GMAC_MII_RATE 25000000 @@ -132,25 +103,67 @@ static void sun7i_fix_speed(void *priv, unsigned int speed) } } -/* of_data specifying hardware features and callbacks. - * hardware features were copied from Allwinner drivers. */ -static const struct stmmac_of_data sun7i_gmac_data = { - .has_gmac = 1, - .tx_coe = 1, - .fix_mac_speed = sun7i_fix_speed, - .setup = sun7i_gmac_setup, - .init = sun7i_gmac_init, - .exit = sun7i_gmac_exit, -}; +static int sun7i_gmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct sunxi_priv_data *gmac; + struct device *dev = &pdev->dev; + int ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + + gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); + if (!gmac) + return -ENOMEM; + + gmac->interface = of_get_phy_mode(dev->of_node); + + gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx"); + if (IS_ERR(gmac->tx_clk)) { + dev_err(dev, "could not get tx clock\n"); + return PTR_ERR(gmac->tx_clk); + } + + /* Optional regulator for PHY */ + gmac->regulator = devm_regulator_get_optional(dev, "phy"); + if (IS_ERR(gmac->regulator)) { + if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(dev, "no regulator found\n"); + gmac->regulator = NULL; + } + + /* platform data specifying hardware features and callbacks. + * hardware features were copied from Allwinner drivers. */ + plat_dat->tx_coe = 1; + plat_dat->has_gmac = true; + plat_dat->bsp_priv = gmac; + plat_dat->init = sun7i_gmac_init; + plat_dat->exit = sun7i_gmac_exit; + plat_dat->fix_mac_speed = sun7i_fix_speed; + + ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv); + if (ret) + return ret; + + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); +} static const struct of_device_id sun7i_dwmac_match[] = { - { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data}, + { .compatible = "allwinner,sun7i-a20-gmac" }, { } }; MODULE_DEVICE_TABLE(of, sun7i_dwmac_match); static struct platform_driver sun7i_dwmac_driver = { - .probe = stmmac_pltfr_probe, + .probe = sun7i_gmac_probe, .remove = stmmac_pltfr_remove, .driver = { .name = "sun7i-dwmac", diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 771cda2a4..2e51b816a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -721,10 +721,13 @@ static int stmmac_get_ts_info(struct net_device *dev, { struct stmmac_priv *priv = netdev_priv(dev); - if ((priv->hwts_tx_en) && (priv->hwts_rx_en)) { + if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) { - info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (priv->ptp_clock) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index b735fa22a..ebf6abc48 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -161,11 +161,16 @@ int stmmac_mdio_reset(struct mii_bus *bus) if (!gpio_request(reset_gpio, "mdio-reset")) { gpio_direction_output(reset_gpio, active_low ? 1 : 0); - udelay(data->delays[0]); + if (data->delays[0]) + msleep(DIV_ROUND_UP(data->delays[0], 1000)); + gpio_set_value(reset_gpio, active_low ? 0 : 1); - udelay(data->delays[1]); + if (data->delays[1]) + msleep(DIV_ROUND_UP(data->delays[1], 1000)); + gpio_set_value(reset_gpio, active_low ? 1 : 0); - udelay(data->delays[2]); + if (data->delays[2]) + msleep(DIV_ROUND_UP(data->delays[2], 1000)); } } #endif diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index bcdc8955c..d02691ba3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -104,32 +104,16 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries) * this function is to read the driver parameters from device-tree and * set some private fields that will be used by the main at runtime. */ -static int stmmac_probe_config_dt(struct platform_device *pdev, - struct plat_stmmacenet_data *plat, - const char **mac) +struct plat_stmmacenet_data * +stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) { struct device_node *np = pdev->dev.of_node; + struct plat_stmmacenet_data *plat; struct stmmac_dma_cfg *dma_cfg; - const struct of_device_id *device; - struct device *dev = &pdev->dev; - - device = of_match_device(dev->driver->of_match_table, dev); - if (device->data) { - const struct stmmac_of_data *data = device->data; - plat->has_gmac = data->has_gmac; - plat->enh_desc = data->enh_desc; - plat->tx_coe = data->tx_coe; - plat->rx_coe = data->rx_coe; - plat->bugged_jumbo = data->bugged_jumbo; - plat->pmt = data->pmt; - plat->riwt_off = data->riwt_off; - plat->fix_mac_speed = data->fix_mac_speed; - plat->bus_setup = data->bus_setup; - plat->setup = data->setup; - plat->free = data->free; - plat->init = data->init; - plat->exit = data->exit; - } + + plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return ERR_PTR(-ENOMEM); *mac = of_get_mac_address(np); plat->interface = of_get_phy_mode(np); @@ -151,7 +135,7 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, /* If phy-handle is not specified, check if we have a fixed-phy */ if (!plat->phy_node && of_phy_is_fixed_link(np)) { if ((of_phy_register_fixed_link(np) < 0)) - return -ENODEV; + return ERR_PTR(-ENODEV); plat->phy_node = of_node_get(np); } @@ -182,6 +166,12 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, */ plat->maxmtu = JUMBO_LEN; + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; + /* * Currently only the properties needed on SPEAr600 * are provided. All other properties should be added @@ -222,7 +212,7 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, GFP_KERNEL); if (!dma_cfg) { of_node_put(np); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } plat->dma_cfg = dma_cfg; of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); @@ -240,44 +230,34 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); } - return 0; + return plat; } #else -static int stmmac_probe_config_dt(struct platform_device *pdev, - struct plat_stmmacenet_data *plat, - const char **mac) +struct plat_stmmacenet_data * +stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) { - return -ENOSYS; + return ERR_PTR(-ENOSYS); } #endif /* CONFIG_OF */ +EXPORT_SYMBOL_GPL(stmmac_probe_config_dt); -/** - * stmmac_pltfr_probe - platform driver probe. - * @pdev: platform device pointer - * Description: platform_device probe function. It is to allocate - * the necessary platform resources, invoke custom helper (if required) and - * invoke the main probe function. - */ -int stmmac_pltfr_probe(struct platform_device *pdev) +int stmmac_get_platform_resources(struct platform_device *pdev, + struct stmmac_resources *stmmac_res) { - struct stmmac_resources stmmac_res; - int ret = 0; struct resource *res; - struct device *dev = &pdev->dev; - struct plat_stmmacenet_data *plat_dat = NULL; - memset(&stmmac_res, 0, sizeof(stmmac_res)); + memset(stmmac_res, 0, sizeof(*stmmac_res)); /* Get IRQ information early to have an ability to ask for deferred * probe if needed before we went too far with resource allocation. */ - stmmac_res.irq = platform_get_irq_byname(pdev, "macirq"); - if (stmmac_res.irq < 0) { - if (stmmac_res.irq != -EPROBE_DEFER) { - dev_err(dev, + stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); + if (stmmac_res->irq < 0) { + if (stmmac_res->irq != -EPROBE_DEFER) { + dev_err(&pdev->dev, "MAC IRQ configuration information not found\n"); } - return stmmac_res.irq; + return stmmac_res->irq; } /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq @@ -287,64 +267,23 @@ int stmmac_pltfr_probe(struct platform_device *pdev) * In case the wake up interrupt is not passed from the platform * so the driver will continue to use the mac irq (ndev->irq) */ - stmmac_res.wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); - if (stmmac_res.wol_irq < 0) { - if (stmmac_res.wol_irq == -EPROBE_DEFER) + stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); + if (stmmac_res->wol_irq < 0) { + if (stmmac_res->wol_irq == -EPROBE_DEFER) return -EPROBE_DEFER; - stmmac_res.wol_irq = stmmac_res.irq; + stmmac_res->wol_irq = stmmac_res->irq; } - stmmac_res.lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); - if (stmmac_res.lpi_irq == -EPROBE_DEFER) + stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); + if (stmmac_res->lpi_irq == -EPROBE_DEFER) return -EPROBE_DEFER; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - stmmac_res.addr = devm_ioremap_resource(dev, res); - if (IS_ERR(stmmac_res.addr)) - return PTR_ERR(stmmac_res.addr); - - plat_dat = dev_get_platdata(&pdev->dev); - - if (!plat_dat) - plat_dat = devm_kzalloc(&pdev->dev, - sizeof(struct plat_stmmacenet_data), - GFP_KERNEL); - if (!plat_dat) { - pr_err("%s: ERROR: no memory", __func__); - return -ENOMEM; - } - - /* Set default value for multicast hash bins */ - plat_dat->multicast_filter_bins = HASH_TABLE_SIZE; - - /* Set default value for unicast filter entries */ - plat_dat->unicast_filter_entries = 1; - - if (pdev->dev.of_node) { - ret = stmmac_probe_config_dt(pdev, plat_dat, &stmmac_res.mac); - if (ret) { - pr_err("%s: main dt probe failed", __func__); - return ret; - } - } + stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res); - /* Custom setup (if needed) */ - if (plat_dat->setup) { - plat_dat->bsp_priv = plat_dat->setup(pdev); - if (IS_ERR(plat_dat->bsp_priv)) - return PTR_ERR(plat_dat->bsp_priv); - } - - /* Custom initialisation (if needed)*/ - if (plat_dat->init) { - ret = plat_dat->init(pdev, plat_dat->bsp_priv); - if (unlikely(ret)) - return ret; - } - - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + return PTR_ERR_OR_ZERO(stmmac_res->addr); } -EXPORT_SYMBOL_GPL(stmmac_pltfr_probe); +EXPORT_SYMBOL_GPL(stmmac_get_platform_resources); /** * stmmac_pltfr_remove @@ -361,9 +300,6 @@ int stmmac_pltfr_remove(struct platform_device *pdev) if (priv->plat->exit) priv->plat->exit(pdev, priv->plat->bsp_priv); - if (priv->plat->free) - priv->plat->free(pdev, priv->plat->bsp_priv); - return ret; } EXPORT_SYMBOL_GPL(stmmac_pltfr_remove); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h index 71da86d7b..ffeb8d9e2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -19,7 +19,14 @@ #ifndef __STMMAC_PLATFORM_H__ #define __STMMAC_PLATFORM_H__ -int stmmac_pltfr_probe(struct platform_device *pdev); +#include "stmmac.h" + +struct plat_stmmacenet_data * +stmmac_probe_config_dt(struct platform_device *pdev, const char **mac); + +int stmmac_get_platform_resources(struct platform_device *pdev, + struct stmmac_resources *stmmac_res); + int stmmac_pltfr_remove(struct platform_device *pdev); extern const struct dev_pm_ops stmmac_pltfr_pm_ops; diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 53fe200e0..cc106d892 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -1756,7 +1756,8 @@ static const struct net_device_ops vnet_ops = { #endif }; -static struct vnet *vnet_new(const u64 *local_mac) +static struct vnet *vnet_new(const u64 *local_mac, + struct vio_dev *vdev) { struct net_device *dev; struct vnet *vp; @@ -1790,6 +1791,8 @@ static struct vnet *vnet_new(const u64 *local_mac) NETIF_F_HW_CSUM | NETIF_F_SG; dev->features = dev->hw_features; + SET_NETDEV_DEV(dev, &vdev->dev); + err = register_netdev(dev); if (err) { pr_err("Cannot register net device, aborting\n"); @@ -1808,7 +1811,8 @@ err_out_free_dev: return ERR_PTR(err); } -static struct vnet *vnet_find_or_create(const u64 *local_mac) +static struct vnet *vnet_find_or_create(const u64 *local_mac, + struct vio_dev *vdev) { struct vnet *iter, *vp; @@ -1821,7 +1825,7 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac) } } if (!vp) - vp = vnet_new(local_mac); + vp = vnet_new(local_mac, vdev); mutex_unlock(&vnet_list_mutex); return vp; @@ -1848,7 +1852,8 @@ static void vnet_cleanup(void) static const char *local_mac_prop = "local-mac-address"; static struct vnet *vnet_find_parent(struct mdesc_handle *hp, - u64 port_node) + u64 port_node, + struct vio_dev *vdev) { const u64 *local_mac = NULL; u64 a; @@ -1869,7 +1874,7 @@ static struct vnet *vnet_find_parent(struct mdesc_handle *hp, if (!local_mac) return ERR_PTR(-ENODEV); - return vnet_find_or_create(local_mac); + return vnet_find_or_create(local_mac, vdev); } static struct ldc_channel_config vnet_ldc_cfg = { @@ -1923,7 +1928,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) hp = mdesc_grab(); - vp = vnet_find_parent(hp, vdev->mp); + vp = vnet_find_parent(hp, vdev->mp, vdev); if (IS_ERR(vp)) { pr_err("Cannot find port parent vnet\n"); err = PTR_ERR(vp); diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig new file mode 100644 index 000000000..8276ee5a7 --- /dev/null +++ b/drivers/net/ethernet/synopsys/Kconfig @@ -0,0 +1,27 @@ +# +# Synopsys network device configuration +# + +config NET_VENDOR_SYNOPSYS + bool "Synopsys devices" + default y + ---help--- + If you have a network (Ethernet) device belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Synopsys devices. If you say Y, you will be asked + for your specific device in the following questions. + +if NET_VENDOR_SYNOPSYS + +config SYNOPSYS_DWC_ETH_QOS + tristate "Sypnopsys DWC Ethernet QOS v4.10a support" + select PHYLIB + select CRC32 + select MII + depends on OF && HAS_DMA + ---help--- + This driver supports the DWC Ethernet QoS from Synopsys + +endif # NET_VENDOR_SYNOPSYS diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile new file mode 100644 index 000000000..7a375723f --- /dev/null +++ b/drivers/net/ethernet/synopsys/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Synopsys network device drivers. +# + +obj-$(CONFIG_SYNOPSYS_DWC_ETH_QOS) += dwc_eth_qos.o diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c new file mode 100644 index 000000000..85b332677 --- /dev/null +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -0,0 +1,3019 @@ +/* Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver + * + * This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC). + * This version introduced a lot of changes which breaks backwards + * compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers). + * Some fields differ between version 4.00a and 4.10a, mainly the interrupt + * bit fields. The driver could be made compatible with 4.00, if all relevant + * HW erratas are handled. + * + * The GMAC is highly configurable at synthesis time. This driver has been + * developed for a subset of the total available feature set. Currently + * it supports: + * - TSO + * - Checksum offload for RX and TX. + * - Energy efficient ethernet. + * - GMII phy interface. + * - The statistics module. + * - Single RX and TX queue. + * + * Copyright (C) 2015 Axis Communications AB. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "dwceqos" +#define DRIVER_DESCRIPTION "Synopsys DWC Ethernet QoS driver" +#define DRIVER_VERSION "0.9" + +#define DWCEQOS_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) + +#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */ + +#define DWCEQOS_LPI_TIMER_MIN 8 +#define DWCEQOS_LPI_TIMER_MAX ((1 << 20) - 1) + +#define DWCEQOS_RX_BUF_SIZE 2048 + +#define DWCEQOS_RX_DCNT 256 +#define DWCEQOS_TX_DCNT 256 + +#define DWCEQOS_HASH_TABLE_SIZE 64 + +/* The size field in the DMA descriptor is 14 bits */ +#define BYTES_PER_DMA_DESC 16376 + +/* Hardware registers */ +#define START_MAC_REG_OFFSET 0x0000 +#define MAX_MAC_REG_OFFSET 0x0bd0 +#define START_MTL_REG_OFFSET 0x0c00 +#define MAX_MTL_REG_OFFSET 0x0d7c +#define START_DMA_REG_OFFSET 0x1000 +#define MAX_DMA_REG_OFFSET 0x117C + +#define REG_SPACE_SIZE 0x1800 + +/* DMA */ +#define REG_DWCEQOS_DMA_MODE 0x1000 +#define REG_DWCEQOS_DMA_SYSBUS_MODE 0x1004 +#define REG_DWCEQOS_DMA_IS 0x1008 +#define REG_DWCEQOS_DMA_DEBUG_ST0 0x100c + +/* DMA channel registers */ +#define REG_DWCEQOS_DMA_CH0_CTRL 0x1100 +#define REG_DWCEQOS_DMA_CH0_TX_CTRL 0x1104 +#define REG_DWCEQOS_DMA_CH0_RX_CTRL 0x1108 +#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST 0x1114 +#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST 0x111c +#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL 0x1120 +#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL 0x1128 +#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN 0x112c +#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN 0x1130 +#define REG_DWCEQOS_DMA_CH0_IE 0x1134 +#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC 0x1144 +#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC 0x114c +#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF 0x1154 +#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG 0x115c +#define REG_DWCEQOS_DMA_CH0_STA 0x1160 + +#define DWCEQOS_DMA_MODE_TXPR BIT(11) +#define DWCEQOS_DMA_MODE_DA BIT(1) + +#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI BIT(31) +#define DWCEQOS_DMA_SYSBUS_MODE_FB BIT(0) +#define DWCEQOS_DMA_SYSBUS_MODE_AAL BIT(12) + +#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \ + (((x) << 16) & 0x000F0000) +#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT 3 +#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK GENMASK(19, 16) + +#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \ + (((x) << 24) & 0x0F000000) +#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT 3 +#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK GENMASK(27, 24) + +#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1) +#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \ + (((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK) +#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT GENMASK(3, 1) + +#define DWCEQOS_DMA_CH_CTRL_PBLX8 BIT(16) +#define DWCEQOS_DMA_CH_CTRL_DSL(x) ((x) << 18) + +#define DWCEQOS_DMA_CH_CTRL_PBL(x) ((x) << 16) +#define DWCEQOS_DMA_CH_CTRL_START BIT(0) +#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x) ((x) << 1) +#define DWCEQOS_DMA_CH_TX_OSP BIT(4) +#define DWCEQOS_DMA_CH_TX_TSE BIT(12) + +#define DWCEQOS_DMA_CH0_IE_NIE BIT(15) +#define DWCEQOS_DMA_CH0_IE_AIE BIT(14) +#define DWCEQOS_DMA_CH0_IE_RIE BIT(6) +#define DWCEQOS_DMA_CH0_IE_TIE BIT(0) +#define DWCEQOS_DMA_CH0_IE_FBEE BIT(12) +#define DWCEQOS_DMA_CH0_IE_RBUE BIT(7) + +#define DWCEQOS_DMA_IS_DC0IS BIT(0) +#define DWCEQOS_DMA_IS_MTLIS BIT(16) +#define DWCEQOS_DMA_IS_MACIS BIT(17) + +#define DWCEQOS_DMA_CH0_IS_TI BIT(0) +#define DWCEQOS_DMA_CH0_IS_RI BIT(6) +#define DWCEQOS_DMA_CH0_IS_RBU BIT(7) +#define DWCEQOS_DMA_CH0_IS_FBE BIT(12) +#define DWCEQOS_DMA_CH0_IS_CDE BIT(13) +#define DWCEQOS_DMA_CH0_IS_AIS BIT(14) + +#define DWCEQOS_DMA_CH0_IS_TEB GENMASK(18, 16) +#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ BIT(16) +#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR BIT(17) + +#define DWCEQOS_DMA_CH0_IS_REB GENMASK(21, 19) +#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ BIT(19) +#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR BIT(20) + +/* DMA descriptor bits for RX normal descriptor (read format) */ +#define DWCEQOS_DMA_RDES3_OWN BIT(31) +#define DWCEQOS_DMA_RDES3_INTE BIT(30) +#define DWCEQOS_DMA_RDES3_BUF2V BIT(25) +#define DWCEQOS_DMA_RDES3_BUF1V BIT(24) + +/* DMA descriptor bits for RX normal descriptor (write back format) */ +#define DWCEQOS_DMA_RDES1_IPCE BIT(7) +#define DWCEQOS_DMA_RDES3_ES BIT(15) +#define DWCEQOS_DMA_RDES3_E_JT BIT(14) +#define DWCEQOS_DMA_RDES3_PL(x) ((x) & 0x7fff) +#define DWCEQOS_DMA_RDES1_PT 0x00000007 +#define DWCEQOS_DMA_RDES1_PT_UDP BIT(0) +#define DWCEQOS_DMA_RDES1_PT_TCP BIT(1) +#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003 + +/* DMA descriptor bits for TX normal descriptor (read format) */ +#define DWCEQOS_DMA_TDES2_IOC BIT(31) +#define DWCEQOS_DMA_TDES3_OWN BIT(31) +#define DWCEQOS_DMA_TDES3_CTXT BIT(30) +#define DWCEQOS_DMA_TDES3_FD BIT(29) +#define DWCEQOS_DMA_TDES3_LD BIT(28) +#define DWCEQOS_DMA_TDES3_CIPH BIT(16) +#define DWCEQOS_DMA_TDES3_CIPP BIT(17) +#define DWCEQOS_DMA_TDES3_CA 0x00030000 +#define DWCEQOS_DMA_TDES3_TSE BIT(18) +#define DWCEQOS_DMA_DES3_THL(x) ((x) << 19) +#define DWCEQOS_DMA_DES2_B2L(x) ((x) << 16) + +#define DWCEQOS_DMA_TDES3_TCMSSV BIT(26) + +/* DMA channel states */ +#define DMA_TX_CH_STOPPED 0 +#define DMA_TX_CH_SUSPENDED 6 + +#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12) + +/* MTL */ +#define REG_DWCEQOS_MTL_OPER 0x0c00 +#define REG_DWCEQOS_MTL_DEBUG_ST 0x0c0c +#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST 0x0d08 +#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST 0x0d38 + +#define REG_DWCEQOS_MTL_IS 0x0c20 +#define REG_DWCEQOS_MTL_TXQ0_OPER 0x0d00 +#define REG_DWCEQOS_MTL_RXQ0_OPER 0x0d30 +#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT 0x0d34 +#define REG_DWCEQOS_MTL_RXQ0_CTRL 0x0d3c + +#define REG_DWCEQOS_MTL_Q0_ISCTRL 0x0d2c + +#define DWCEQOS_MTL_SCHALG_STRICT 0x00000060 + +#define DWCEQOS_MTL_TXQ_TXQEN BIT(3) +#define DWCEQOS_MTL_TXQ_TSF BIT(1) +#define DWCEQOS_MTL_TXQ_FTQ BIT(0) +#define DWCEQOS_MTL_TXQ_TTC512 0x00000070 + +#define DWCEQOS_MTL_TXQ_SIZE(x) ((((x) - 256) & 0xff00) << 8) + +#define DWCEQOS_MTL_RXQ_SIZE(x) ((((x) - 256) & 0xff00) << 12) +#define DWCEQOS_MTL_RXQ_EHFC BIT(7) +#define DWCEQOS_MTL_RXQ_DIS_TCP_EF BIT(6) +#define DWCEQOS_MTL_RXQ_FEP BIT(4) +#define DWCEQOS_MTL_RXQ_FUP BIT(3) +#define DWCEQOS_MTL_RXQ_RSF BIT(5) +#define DWCEQOS_MTL_RXQ_RTC32 BIT(0) + +/* MAC */ +#define REG_DWCEQOS_MAC_CFG 0x0000 +#define REG_DWCEQOS_MAC_EXT_CFG 0x0004 +#define REG_DWCEQOS_MAC_PKT_FILT 0x0008 +#define REG_DWCEQOS_MAC_WD_TO 0x000c +#define REG_DWCEQOS_HASTABLE_LO 0x0010 +#define REG_DWCEQOS_HASTABLE_HI 0x0014 +#define REG_DWCEQOS_MAC_IS 0x00b0 +#define REG_DWCEQOS_MAC_IE 0x00b4 +#define REG_DWCEQOS_MAC_STAT 0x00b8 +#define REG_DWCEQOS_MAC_MDIO_ADDR 0x0200 +#define REG_DWCEQOS_MAC_MDIO_DATA 0x0204 +#define REG_DWCEQOS_MAC_MAC_ADDR0_HI 0x0300 +#define REG_DWCEQOS_MAC_MAC_ADDR0_LO 0x0304 +#define REG_DWCEQOS_MAC_RXQ0_CTRL0 0x00a0 +#define REG_DWCEQOS_MAC_HW_FEATURE0 0x011c +#define REG_DWCEQOS_MAC_HW_FEATURE1 0x0120 +#define REG_DWCEQOS_MAC_HW_FEATURE2 0x0124 +#define REG_DWCEQOS_MAC_HASHTABLE_LO 0x0010 +#define REG_DWCEQOS_MAC_HASHTABLE_HI 0x0014 +#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS 0x00d0 +#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL 0x00d4 +#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER 0x00d8 +#define REG_DWCEQOS_MAC_1US_TIC_COUNTER 0x00dc +#define REG_DWCEQOS_MAC_RX_FLOW_CTRL 0x0090 +#define REG_DWCEQOS_MAC_Q0_TX_FLOW 0x0070 + +#define DWCEQOS_MAC_CFG_ACS BIT(20) +#define DWCEQOS_MAC_CFG_JD BIT(17) +#define DWCEQOS_MAC_CFG_JE BIT(16) +#define DWCEQOS_MAC_CFG_PS BIT(15) +#define DWCEQOS_MAC_CFG_FES BIT(14) +#define DWCEQOS_MAC_CFG_DM BIT(13) +#define DWCEQOS_MAC_CFG_DO BIT(10) +#define DWCEQOS_MAC_CFG_TE BIT(1) +#define DWCEQOS_MAC_CFG_IPC BIT(27) +#define DWCEQOS_MAC_CFG_RE BIT(0) + +#define DWCEQOS_ADDR_HIGH(reg) (0x00000300 + (reg * 8)) +#define DWCEQOS_ADDR_LOW(reg) (0x00000304 + (reg * 8)) + +#define DWCEQOS_MAC_IS_LPI_INT BIT(5) +#define DWCEQOS_MAC_IS_MMC_INT BIT(8) + +#define DWCEQOS_MAC_RXQ_EN BIT(1) +#define DWCEQOS_MAC_MAC_ADDR_HI_EN BIT(31) +#define DWCEQOS_MAC_PKT_FILT_RA BIT(31) +#define DWCEQOS_MAC_PKT_FILT_HPF BIT(10) +#define DWCEQOS_MAC_PKT_FILT_SAF BIT(9) +#define DWCEQOS_MAC_PKT_FILT_SAIF BIT(8) +#define DWCEQOS_MAC_PKT_FILT_DBF BIT(5) +#define DWCEQOS_MAC_PKT_FILT_PM BIT(4) +#define DWCEQOS_MAC_PKT_FILT_DAIF BIT(3) +#define DWCEQOS_MAC_PKT_FILT_HMC BIT(2) +#define DWCEQOS_MAC_PKT_FILT_HUC BIT(1) +#define DWCEQOS_MAC_PKT_FILT_PR BIT(0) + +#define DWCEQOS_MAC_MDIO_ADDR_CR(x) (((x & 15)) << 8) +#define DWCEQOS_MAC_MDIO_ADDR_CR_20 2 +#define DWCEQOS_MAC_MDIO_ADDR_CR_35 3 +#define DWCEQOS_MAC_MDIO_ADDR_CR_60 0 +#define DWCEQOS_MAC_MDIO_ADDR_CR_100 1 +#define DWCEQOS_MAC_MDIO_ADDR_CR_150 4 +#define DWCEQOS_MAC_MDIO_ADDR_CR_250 5 +#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ 0x0000000c +#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE BIT(2) +#define DWCEQOS_MAC_MDIO_ADDR_GB BIT(0) + +#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN BIT(0) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX BIT(1) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN BIT(2) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX BIT(3) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST BIT(8) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST BIT(9) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN BIT(16) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS BIT(17) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN BIT(18) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA BIT(19) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE BIT(20) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21) + +#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x) ((x) & GENMASK(11, 0)) + +#define DWCEQOS_LPI_CTRL_ENABLE_EEE (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \ + DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \ + DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN) + +#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) + +#define DWCEQOS_MAC_Q0_TX_FLOW_TFE BIT(1) +#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time) ((time) << 16) +#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4) + +/* Features */ +#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16) +#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14) +#define DWCEQOS_MAC_HW_FEATURE0_HDSEL BIT(2) +#define DWCEQOS_MAC_HW_FEATURE0_EEESEL BIT(13) +#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL BIT(1) +#define DWCEQOS_MAC_HW_FEATURE0_MIISEL BIT(0) + +#define DWCEQOS_MAC_HW_FEATURE1_TSOEN BIT(18) +#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6) +#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x) (128 << ((x) & 0x1f)) + +#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \ + (1 + (((feature1) & 0x1fc0000) >> 18)) + +#define DWCEQOS_MDIO_PHYADDR(x) (((x) & 0x1f) << 21) +#define DWCEQOS_MDIO_PHYREG(x) (((x) & 0x1f) << 16) + +#define DWCEQOS_DMA_MODE_SWR BIT(0) + +#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048 + +/* Mac Management Counters */ +#define REG_DWCEQOS_MMC_CTRL 0x0700 +#define REG_DWCEQOS_MMC_RXIRQ 0x0704 +#define REG_DWCEQOS_MMC_TXIRQ 0x0708 +#define REG_DWCEQOS_MMC_RXIRQMASK 0x070c +#define REG_DWCEQOS_MMC_TXIRQMASK 0x0710 + +#define DWCEQOS_MMC_CTRL_CNTRST BIT(0) +#define DWCEQOS_MMC_CTRL_RSTONRD BIT(2) + +#define DWC_MMC_TXLPITRANSCNTR 0x07F0 +#define DWC_MMC_TXLPIUSCNTR 0x07EC +#define DWC_MMC_TXOVERSIZE_G 0x0778 +#define DWC_MMC_TXVLANPACKETS_G 0x0774 +#define DWC_MMC_TXPAUSEPACKETS 0x0770 +#define DWC_MMC_TXEXCESSDEF 0x076C +#define DWC_MMC_TXPACKETCOUNT_G 0x0768 +#define DWC_MMC_TXOCTETCOUNT_G 0x0764 +#define DWC_MMC_TXCARRIERERROR 0x0760 +#define DWC_MMC_TXEXCESSCOL 0x075C +#define DWC_MMC_TXLATECOL 0x0758 +#define DWC_MMC_TXDEFERRED 0x0754 +#define DWC_MMC_TXMULTICOL_G 0x0750 +#define DWC_MMC_TXSINGLECOL_G 0x074C +#define DWC_MMC_TXUNDERFLOWERROR 0x0748 +#define DWC_MMC_TXBROADCASTPACKETS_GB 0x0744 +#define DWC_MMC_TXMULTICASTPACKETS_GB 0x0740 +#define DWC_MMC_TXUNICASTPACKETS_GB 0x073C +#define DWC_MMC_TX1024TOMAXOCTETS_GB 0x0738 +#define DWC_MMC_TX512TO1023OCTETS_GB 0x0734 +#define DWC_MMC_TX256TO511OCTETS_GB 0x0730 +#define DWC_MMC_TX128TO255OCTETS_GB 0x072C +#define DWC_MMC_TX65TO127OCTETS_GB 0x0728 +#define DWC_MMC_TX64OCTETS_GB 0x0724 +#define DWC_MMC_TXMULTICASTPACKETS_G 0x0720 +#define DWC_MMC_TXBROADCASTPACKETS_G 0x071C +#define DWC_MMC_TXPACKETCOUNT_GB 0x0718 +#define DWC_MMC_TXOCTETCOUNT_GB 0x0714 + +#define DWC_MMC_RXLPITRANSCNTR 0x07F8 +#define DWC_MMC_RXLPIUSCNTR 0x07F4 +#define DWC_MMC_RXCTRLPACKETS_G 0x07E4 +#define DWC_MMC_RXRCVERROR 0x07E0 +#define DWC_MMC_RXWATCHDOG 0x07DC +#define DWC_MMC_RXVLANPACKETS_GB 0x07D8 +#define DWC_MMC_RXFIFOOVERFLOW 0x07D4 +#define DWC_MMC_RXPAUSEPACKETS 0x07D0 +#define DWC_MMC_RXOUTOFRANGETYPE 0x07CC +#define DWC_MMC_RXLENGTHERROR 0x07C8 +#define DWC_MMC_RXUNICASTPACKETS_G 0x07C4 +#define DWC_MMC_RX1024TOMAXOCTETS_GB 0x07C0 +#define DWC_MMC_RX512TO1023OCTETS_GB 0x07BC +#define DWC_MMC_RX256TO511OCTETS_GB 0x07B8 +#define DWC_MMC_RX128TO255OCTETS_GB 0x07B4 +#define DWC_MMC_RX65TO127OCTETS_GB 0x07B0 +#define DWC_MMC_RX64OCTETS_GB 0x07AC +#define DWC_MMC_RXOVERSIZE_G 0x07A8 +#define DWC_MMC_RXUNDERSIZE_G 0x07A4 +#define DWC_MMC_RXJABBERERROR 0x07A0 +#define DWC_MMC_RXRUNTERROR 0x079C +#define DWC_MMC_RXALIGNMENTERROR 0x0798 +#define DWC_MMC_RXCRCERROR 0x0794 +#define DWC_MMC_RXMULTICASTPACKETS_G 0x0790 +#define DWC_MMC_RXBROADCASTPACKETS_G 0x078C +#define DWC_MMC_RXOCTETCOUNT_G 0x0788 +#define DWC_MMC_RXOCTETCOUNT_GB 0x0784 +#define DWC_MMC_RXPACKETCOUNT_GB 0x0780 + +static int debug = 3; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)"); + +/* DMA ring descriptor. These are used as support descriptors for the HW DMA */ +struct ring_desc { + struct sk_buff *skb; + dma_addr_t mapping; + size_t len; +}; + +/* DMA hardware descriptor */ +struct dwceqos_dma_desc { + u32 des0; + u32 des1; + u32 des2; + u32 des3; +} ____cacheline_aligned; + +struct dwceqos_mmc_counters { + __u64 txlpitranscntr; + __u64 txpiuscntr; + __u64 txoversize_g; + __u64 txvlanpackets_g; + __u64 txpausepackets; + __u64 txexcessdef; + __u64 txpacketcount_g; + __u64 txoctetcount_g; + __u64 txcarriererror; + __u64 txexcesscol; + __u64 txlatecol; + __u64 txdeferred; + __u64 txmulticol_g; + __u64 txsinglecol_g; + __u64 txunderflowerror; + __u64 txbroadcastpackets_gb; + __u64 txmulticastpackets_gb; + __u64 txunicastpackets_gb; + __u64 tx1024tomaxoctets_gb; + __u64 tx512to1023octets_gb; + __u64 tx256to511octets_gb; + __u64 tx128to255octets_gb; + __u64 tx65to127octets_gb; + __u64 tx64octets_gb; + __u64 txmulticastpackets_g; + __u64 txbroadcastpackets_g; + __u64 txpacketcount_gb; + __u64 txoctetcount_gb; + + __u64 rxlpitranscntr; + __u64 rxlpiuscntr; + __u64 rxctrlpackets_g; + __u64 rxrcverror; + __u64 rxwatchdog; + __u64 rxvlanpackets_gb; + __u64 rxfifooverflow; + __u64 rxpausepackets; + __u64 rxoutofrangetype; + __u64 rxlengtherror; + __u64 rxunicastpackets_g; + __u64 rx1024tomaxoctets_gb; + __u64 rx512to1023octets_gb; + __u64 rx256to511octets_gb; + __u64 rx128to255octets_gb; + __u64 rx65to127octets_gb; + __u64 rx64octets_gb; + __u64 rxoversize_g; + __u64 rxundersize_g; + __u64 rxjabbererror; + __u64 rxrunterror; + __u64 rxalignmenterror; + __u64 rxcrcerror; + __u64 rxmulticastpackets_g; + __u64 rxbroadcastpackets_g; + __u64 rxoctetcount_g; + __u64 rxoctetcount_gb; + __u64 rxpacketcount_gb; +}; + +/* Ethtool statistics */ + +struct dwceqos_stat { + const char stat_name[ETH_GSTRING_LEN]; + int offset; +}; + +#define STAT_ITEM(name, var) \ + {\ + name,\ + offsetof(struct dwceqos_mmc_counters, var),\ + } + +static const struct dwceqos_stat dwceqos_ethtool_stats[] = { + STAT_ITEM("tx_bytes", txoctetcount_gb), + STAT_ITEM("tx_packets", txpacketcount_gb), + STAT_ITEM("tx_unicst_packets", txunicastpackets_gb), + STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb), + STAT_ITEM("tx_multicast_packets", txmulticastpackets_gb), + STAT_ITEM("tx_pause_packets", txpausepackets), + STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb), + STAT_ITEM("tx_65_to_127_byte_packets", tx65to127octets_gb), + STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb), + STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb), + STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb), + STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb), + STAT_ITEM("tx_underflow_errors", txunderflowerror), + STAT_ITEM("tx_lpi_count", txlpitranscntr), + + STAT_ITEM("rx_bytes", rxoctetcount_gb), + STAT_ITEM("rx_packets", rxpacketcount_gb), + STAT_ITEM("rx_unicast_packets", rxunicastpackets_g), + STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g), + STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g), + STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb), + STAT_ITEM("rx_pause_packets", rxpausepackets), + STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb), + STAT_ITEM("rx_65_to_127_byte_packets", rx65to127octets_gb), + STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb), + STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb), + STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb), + STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb), + STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow), + STAT_ITEM("rx_oversize_packets", rxoversize_g), + STAT_ITEM("rx_undersize_packets", rxundersize_g), + STAT_ITEM("rx_jabbers", rxjabbererror), + STAT_ITEM("rx_align_errors", rxalignmenterror), + STAT_ITEM("rx_crc_errors", rxcrcerror), + STAT_ITEM("rx_lpi_count", rxlpitranscntr), +}; + +/* Configuration of AXI bus parameters. + * These values depend on the parameters set on the MAC core as well + * as the AXI interconnect. + */ +struct dwceqos_bus_cfg { + /* Enable AXI low-power interface. */ + bool en_lpi; + /* Limit on number of outstanding AXI write requests. */ + u32 write_requests; + /* Limit on number of outstanding AXI read requests. */ + u32 read_requests; + /* Bitmap of allowed AXI burst lengths, 4-256 beats. */ + u32 burst_map; + /* DMA Programmable burst length*/ + u32 tx_pbl; + u32 rx_pbl; +}; + +struct dwceqos_flowcontrol { + int autoneg; + int rx; + int rx_current; + int tx; + int tx_current; +}; + +struct net_local { + void __iomem *baseaddr; + struct clk *phy_ref_clk; + struct clk *apb_pclk; + + struct device_node *phy_node; + struct net_device *ndev; + struct platform_device *pdev; + + u32 msg_enable; + + struct tasklet_struct tx_bdreclaim_tasklet; + struct workqueue_struct *txtimeout_handler_wq; + struct work_struct txtimeout_reinit; + + phy_interface_t phy_interface; + struct phy_device *phy_dev; + struct mii_bus *mii_bus; + + unsigned int link; + unsigned int speed; + unsigned int duplex; + + struct napi_struct napi; + + /* DMA Descriptor Areas */ + struct ring_desc *rx_skb; + struct ring_desc *tx_skb; + + struct dwceqos_dma_desc *tx_descs; + struct dwceqos_dma_desc *rx_descs; + + /* DMA Mapped Descriptor areas*/ + dma_addr_t tx_descs_addr; + dma_addr_t rx_descs_addr; + dma_addr_t tx_descs_tail_addr; + dma_addr_t rx_descs_tail_addr; + + size_t tx_free; + size_t tx_next; + size_t rx_cur; + size_t tx_cur; + + /* Spinlocks for accessing DMA Descriptors */ + spinlock_t tx_lock; + + /* Spinlock for register read-modify-writes. */ + spinlock_t hw_lock; + + u32 feature0; + u32 feature1; + u32 feature2; + + struct dwceqos_bus_cfg bus_cfg; + bool en_tx_lpi_clockgating; + + int eee_enabled; + int eee_active; + int csr_val; + u32 gso_size; + + struct dwceqos_mmc_counters mmc_counters; + /* Protect the mmc_counter updates. */ + spinlock_t stats_lock; + u32 mmc_rx_counters_mask; + u32 mmc_tx_counters_mask; + + struct dwceqos_flowcontrol flowcontrol; +}; + +static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, + u32 tx_mask); + +static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr, + unsigned int reg_n); +static int dwceqos_stop(struct net_device *ndev); +static int dwceqos_open(struct net_device *ndev); +static void dwceqos_tx_poll_demand(struct net_local *lp); + +static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable); +static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable); + +static void dwceqos_reset_state(struct net_local *lp); + +#define dwceqos_read(lp, reg) \ + readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg)) +#define dwceqos_write(lp, reg, val) \ + writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg)) + +static void dwceqos_reset_state(struct net_local *lp) +{ + lp->link = 0; + lp->speed = 0; + lp->duplex = DUPLEX_UNKNOWN; + lp->flowcontrol.rx_current = 0; + lp->flowcontrol.tx_current = 0; + lp->eee_active = 0; + lp->eee_enabled = 0; +} + +static void print_descriptor(struct net_local *lp, int index, int tx) +{ + struct dwceqos_dma_desc *dd; + + if (tx) + dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index]; + else + dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index]; + + pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX", + index, dd); + pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2, + dd->des3); +} + +static void print_status(struct net_local *lp) +{ + size_t desci, i; + + pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free, + lp->tx_cur, lp->tx_next); + + print_descriptor(lp, lp->rx_cur, 0); + + for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0; + i < DWCEQOS_TX_DCNT; + ++i) { + print_descriptor(lp, desci, 1); + desci = (desci + 1) % DWCEQOS_TX_DCNT; + } + + pr_info("DMA_Debug_Status0: 0x%08x\n", + dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0)); + pr_info("DMA_CH0_Status: 0x%08x\n", + dwceqos_read(lp, REG_DWCEQOS_DMA_IS)); + pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n", + dwceqos_read(lp, 0x1144)); + pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n", + dwceqos_read(lp, 0x1154)); + pr_info("MTL_Debug_Status: 0x%08x\n", + dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST)); + pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n", + dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST)); + pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n", + dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST)); + pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n", + dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC), + dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC)); +} + +static void dwceqos_mdio_set_csr(struct net_local *lp) +{ + int rate = clk_get_rate(lp->apb_pclk); + + if (rate <= 20000000) + lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20; + else if (rate <= 35000000) + lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35; + else if (rate <= 60000000) + lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60; + else if (rate <= 100000000) + lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100; + else if (rate <= 150000000) + lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150; + else if (rate <= 250000000) + lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250; +} + +/* Simple MDIO functions implementing mii_bus */ +static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg) +{ + struct net_local *lp = bus->priv; + u32 regval; + int i; + int data; + + regval = DWCEQOS_MDIO_PHYADDR(mii_id) | + DWCEQOS_MDIO_PHYREG(phyreg) | + DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) | + DWCEQOS_MAC_MDIO_ADDR_GB | + DWCEQOS_MAC_MDIO_ADDR_GOC_READ; + dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval); + + for (i = 0; i < 5; ++i) { + usleep_range(64, 128); + if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) & + DWCEQOS_MAC_MDIO_ADDR_GB)) + break; + } + + data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA); + if (i == 5) { + netdev_warn(lp->ndev, "MDIO read timed out\n"); + data = 0xffff; + } + + return data & 0xffff; +} + +static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg, + u16 value) +{ + struct net_local *lp = bus->priv; + u32 regval; + int i; + + dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value); + + regval = DWCEQOS_MDIO_PHYADDR(mii_id) | + DWCEQOS_MDIO_PHYREG(phyreg) | + DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) | + DWCEQOS_MAC_MDIO_ADDR_GB | + DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE; + dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval); + + for (i = 0; i < 5; ++i) { + usleep_range(64, 128); + if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) & + DWCEQOS_MAC_MDIO_ADDR_GB)) + break; + } + if (i == 5) + netdev_warn(lp->ndev, "MDIO write timed out\n"); + return 0; +} + +static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) +{ + struct net_local *lp = netdev_priv(ndev); + struct phy_device *phydev = lp->phy_dev; + + if (!netif_running(ndev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return phy_mii_ioctl(phydev, rq, cmd); + default: + dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd); + return -EOPNOTSUPP; + } +} + +static void dwceqos_link_down(struct net_local *lp) +{ + u32 regval; + unsigned long flags; + + /* Indicate link down to the LPI state machine */ + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); + regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS; + dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); +} + +static void dwceqos_link_up(struct net_local *lp) +{ + u32 regval; + unsigned long flags; + + /* Indicate link up to the LPI state machine */ + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); + regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS; + dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); + + lp->eee_active = !phy_init_eee(lp->phy_dev, 0); + + /* Check for changed EEE capability */ + if (!lp->eee_active && lp->eee_enabled) { + lp->eee_enabled = 0; + + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); + regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE; + dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); + } +} + +static void dwceqos_set_speed(struct net_local *lp) +{ + struct phy_device *phydev = lp->phy_dev; + u32 regval; + + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); + regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES | + DWCEQOS_MAC_CFG_DM); + + if (phydev->duplex) + regval |= DWCEQOS_MAC_CFG_DM; + if (phydev->speed == SPEED_10) { + regval |= DWCEQOS_MAC_CFG_PS; + } else if (phydev->speed == SPEED_100) { + regval |= DWCEQOS_MAC_CFG_PS | + DWCEQOS_MAC_CFG_FES; + } else if (phydev->speed != SPEED_1000) { + netdev_err(lp->ndev, + "unknown PHY speed %d\n", + phydev->speed); + return; + } + + dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval); +} + +static void dwceqos_adjust_link(struct net_device *ndev) +{ + struct net_local *lp = netdev_priv(ndev); + struct phy_device *phydev = lp->phy_dev; + int status_change = 0; + + if (phydev->link) { + if ((lp->speed != phydev->speed) || + (lp->duplex != phydev->duplex)) { + dwceqos_set_speed(lp); + + lp->speed = phydev->speed; + lp->duplex = phydev->duplex; + status_change = 1; + } + + if (lp->flowcontrol.autoneg) { + lp->flowcontrol.rx = phydev->pause || + phydev->asym_pause; + lp->flowcontrol.tx = phydev->pause || + phydev->asym_pause; + } + + if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) { + if (netif_msg_link(lp)) + netdev_dbg(ndev, "set rx flow to %d\n", + lp->flowcontrol.rx); + dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx); + lp->flowcontrol.rx_current = lp->flowcontrol.rx; + } + if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) { + if (netif_msg_link(lp)) + netdev_dbg(ndev, "set tx flow to %d\n", + lp->flowcontrol.tx); + dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx); + lp->flowcontrol.tx_current = lp->flowcontrol.tx; + } + } + + if (phydev->link != lp->link) { + lp->link = phydev->link; + status_change = 1; + } + + if (status_change) { + if (phydev->link) { + lp->ndev->trans_start = jiffies; + dwceqos_link_up(lp); + } else { + dwceqos_link_down(lp); + } + phy_print_status(phydev); + } +} + +static int dwceqos_mii_probe(struct net_device *ndev) +{ + struct net_local *lp = netdev_priv(ndev); + struct phy_device *phydev = NULL; + + if (lp->phy_node) { + phydev = of_phy_connect(lp->ndev, + lp->phy_node, + &dwceqos_adjust_link, + 0, + lp->phy_interface); + + if (!phydev) { + netdev_err(ndev, "no PHY found\n"); + return -1; + } + } else { + netdev_err(ndev, "no PHY configured\n"); + return -ENODEV; + } + + if (netif_msg_probe(lp)) + netdev_dbg(lp->ndev, + "phydev %p, phydev->phy_id 0xa%x, phydev->addr 0x%x\n", + phydev, phydev->phy_id, phydev->addr); + + phydev->supported &= PHY_GBIT_FEATURES; + + lp->link = 0; + lp->speed = 0; + lp->duplex = DUPLEX_UNKNOWN; + lp->phy_dev = phydev; + + if (netif_msg_probe(lp)) { + netdev_dbg(lp->ndev, "phy_addr 0x%x, phy_id 0x%08x\n", + lp->phy_dev->addr, lp->phy_dev->phy_id); + + netdev_dbg(lp->ndev, "attach [%s] phy driver\n", + lp->phy_dev->drv->name); + } + + return 0; +} + +static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index) +{ + struct sk_buff *new_skb; + dma_addr_t new_skb_baddr = 0; + + new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE); + if (!new_skb) { + netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index); + goto err_out; + } + + new_skb_baddr = dma_map_single(lp->ndev->dev.parent, + new_skb->data, DWCEQOS_RX_BUF_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) { + netdev_err(lp->ndev, "DMA map error\n"); + dev_kfree_skb(new_skb); + new_skb = NULL; + goto err_out; + } + + lp->rx_descs[index].des0 = new_skb_baddr; + lp->rx_descs[index].des1 = 0; + lp->rx_descs[index].des2 = 0; + lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE | + DWCEQOS_DMA_RDES3_BUF1V | + DWCEQOS_DMA_RDES3_OWN; + + lp->rx_skb[index].mapping = new_skb_baddr; + lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE; + +err_out: + lp->rx_skb[index].skb = new_skb; +} + +static void dwceqos_clean_rings(struct net_local *lp) +{ + int i; + + if (lp->rx_skb) { + for (i = 0; i < DWCEQOS_RX_DCNT; i++) { + if (lp->rx_skb[i].skb) { + dma_unmap_single(lp->ndev->dev.parent, + lp->rx_skb[i].mapping, + lp->rx_skb[i].len, + DMA_FROM_DEVICE); + + dev_kfree_skb(lp->rx_skb[i].skb); + lp->rx_skb[i].skb = NULL; + lp->rx_skb[i].mapping = 0; + } + } + } + + if (lp->tx_skb) { + for (i = 0; i < DWCEQOS_TX_DCNT; i++) { + if (lp->tx_skb[i].skb) { + dev_kfree_skb(lp->tx_skb[i].skb); + lp->tx_skb[i].skb = NULL; + } + if (lp->tx_skb[i].mapping) { + dma_unmap_single(lp->ndev->dev.parent, + lp->tx_skb[i].mapping, + lp->tx_skb[i].len, + DMA_TO_DEVICE); + lp->tx_skb[i].mapping = 0; + } + } + } +} + +static void dwceqos_descriptor_free(struct net_local *lp) +{ + int size; + + dwceqos_clean_rings(lp); + + kfree(lp->tx_skb); + lp->tx_skb = NULL; + kfree(lp->rx_skb); + lp->rx_skb = NULL; + + size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); + if (lp->rx_descs) { + dma_free_coherent(lp->ndev->dev.parent, size, + (void *)(lp->rx_descs), lp->rx_descs_addr); + lp->rx_descs = NULL; + } + + size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); + if (lp->tx_descs) { + dma_free_coherent(lp->ndev->dev.parent, size, + (void *)(lp->tx_descs), lp->tx_descs_addr); + lp->tx_descs = NULL; + } +} + +static int dwceqos_descriptor_init(struct net_local *lp) +{ + int size; + u32 i; + + lp->gso_size = 0; + + lp->tx_skb = NULL; + lp->rx_skb = NULL; + lp->rx_descs = NULL; + lp->tx_descs = NULL; + + /* Reset the DMA indexes */ + lp->rx_cur = 0; + lp->tx_cur = 0; + lp->tx_next = 0; + lp->tx_free = DWCEQOS_TX_DCNT; + + /* Allocate Ring descriptors */ + size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc); + lp->rx_skb = kzalloc(size, GFP_KERNEL); + if (!lp->rx_skb) + goto err_out; + + size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc); + lp->tx_skb = kzalloc(size, GFP_KERNEL); + if (!lp->tx_skb) + goto err_out; + + /* Allocate DMA descriptors */ + size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); + lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, + &lp->rx_descs_addr, 0); + if (!lp->rx_descs) + goto err_out; + lp->rx_descs_tail_addr = lp->rx_descs_addr + + sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT; + + size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); + lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, + &lp->tx_descs_addr, 0); + if (!lp->tx_descs) + goto err_out; + lp->tx_descs_tail_addr = lp->tx_descs_addr + + sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT; + + /* Initialize RX Ring Descriptors and buffers */ + for (i = 0; i < DWCEQOS_RX_DCNT; ++i) { + dwceqos_alloc_rxring_desc(lp, i); + if (!(lp->rx_skb[lp->rx_cur].skb)) + goto err_out; + } + + /* Initialize TX Descriptors */ + for (i = 0; i < DWCEQOS_TX_DCNT; ++i) { + lp->tx_descs[i].des0 = 0; + lp->tx_descs[i].des1 = 0; + lp->tx_descs[i].des2 = 0; + lp->tx_descs[i].des3 = 0; + } + + /* Make descriptor writes visible to the DMA. */ + wmb(); + + return 0; + +err_out: + dwceqos_descriptor_free(lp); + return -ENOMEM; +} + +static int dwceqos_packet_avail(struct net_local *lp) +{ + return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN); +} + +static void dwceqos_get_hwfeatures(struct net_local *lp) +{ + lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0); + lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1); + lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2); +} + +static void dwceqos_dma_enable_txirq(struct net_local *lp) +{ + u32 regval; + unsigned long flags; + + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); + regval |= DWCEQOS_DMA_CH0_IE_TIE; + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); +} + +static void dwceqos_dma_disable_txirq(struct net_local *lp) +{ + u32 regval; + unsigned long flags; + + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); + regval &= ~DWCEQOS_DMA_CH0_IE_TIE; + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); +} + +static void dwceqos_dma_enable_rxirq(struct net_local *lp) +{ + u32 regval; + unsigned long flags; + + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); + regval |= DWCEQOS_DMA_CH0_IE_RIE; + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); +} + +static void dwceqos_dma_disable_rxirq(struct net_local *lp) +{ + u32 regval; + unsigned long flags; + + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); + regval &= ~DWCEQOS_DMA_CH0_IE_RIE; + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); +} + +static void dwceqos_enable_mmc_interrupt(struct net_local *lp) +{ + dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0); + dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0); +} + +static int dwceqos_mii_init(struct net_local *lp) +{ + int ret = -ENXIO, i; + struct resource res; + struct device_node *mdionode; + + mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio"); + + if (!mdionode) + return 0; + + lp->mii_bus = mdiobus_alloc(); + if (!lp->mii_bus) { + ret = -ENOMEM; + goto err_out; + } + + lp->mii_bus->name = "DWCEQOS MII bus"; + lp->mii_bus->read = &dwceqos_mdio_read; + lp->mii_bus->write = &dwceqos_mdio_write; + lp->mii_bus->priv = lp; + lp->mii_bus->parent = &lp->ndev->dev; + + lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!lp->mii_bus->irq) { + ret = -ENOMEM; + goto err_out_free_mdiobus; + } + + for (i = 0; i < PHY_MAX_ADDR; i++) + lp->mii_bus->irq[i] = PHY_POLL; + of_address_to_resource(lp->pdev->dev.of_node, 0, &res); + snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx", + (unsigned long long)res.start); + if (of_mdiobus_register(lp->mii_bus, mdionode)) + goto err_out_free_mdio_irq; + + return 0; + +err_out_free_mdio_irq: + kfree(lp->mii_bus->irq); +err_out_free_mdiobus: + mdiobus_free(lp->mii_bus); +err_out: + of_node_put(mdionode); + return ret; +} + +/* DMA reset. When issued also resets all MTL and MAC registers as well */ +static void dwceqos_reset_hw(struct net_local *lp) +{ + /* Wait (at most) 0.5 seconds for DMA reset*/ + int i = 5000; + u32 reg; + + /* Force gigabit to guarantee a TX clock for GMII. */ + reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); + reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES); + reg |= DWCEQOS_MAC_CFG_DM; + dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg); + + dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR); + + do { + udelay(100); + i--; + reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE); + } while ((reg & DWCEQOS_DMA_MODE_SWR) && i); + /* We might experience a timeout if the chip clock mux is broken */ + if (!i) + netdev_err(lp->ndev, "DMA reset timed out!\n"); +} + +static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status) +{ + if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) { + netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n", + dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ? + "read" : "write", + dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ? + "descr" : "data", + dma_status); + + print_status(lp); + } + if (dma_status & DWCEQOS_DMA_CH0_IS_REB) { + netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n", + dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ? + "read" : "write", + dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ? + "descr" : "data", + dma_status); + + print_status(lp); + } +} + +static void dwceqos_mmc_interrupt(struct net_local *lp) +{ + unsigned long flags; + + spin_lock_irqsave(&lp->stats_lock, flags); + + /* A latched mmc interrupt can not be masked, we must read + * all the counters with an interrupt pending. + */ + dwceqos_read_mmc_counters(lp, + dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ), + dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ)); + + spin_unlock_irqrestore(&lp->stats_lock, flags); +} + +static void dwceqos_mac_interrupt(struct net_local *lp) +{ + u32 cause; + + cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS); + + if (cause & DWCEQOS_MAC_IS_MMC_INT) + dwceqos_mmc_interrupt(lp); +} + +static irqreturn_t dwceqos_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct net_local *lp = netdev_priv(ndev); + + u32 cause; + u32 dma_status; + irqreturn_t ret = IRQ_NONE; + + cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS); + /* DMA Channel 0 Interrupt */ + if (cause & DWCEQOS_DMA_IS_DC0IS) { + dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA); + + /* Transmit Interrupt */ + if (dma_status & DWCEQOS_DMA_CH0_IS_TI) { + tasklet_schedule(&lp->tx_bdreclaim_tasklet); + dwceqos_dma_disable_txirq(lp); + } + + /* Receive Interrupt */ + if (dma_status & DWCEQOS_DMA_CH0_IS_RI) { + /* Disable RX IRQs */ + dwceqos_dma_disable_rxirq(lp); + napi_schedule(&lp->napi); + } + + /* Fatal Bus Error interrupt */ + if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) { + dwceqos_fatal_bus_error(lp, dma_status); + + /* errata 9000831707 */ + dma_status |= DWCEQOS_DMA_CH0_IS_TEB | + DWCEQOS_DMA_CH0_IS_REB; + } + + /* Ack all DMA Channel 0 IRQs */ + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status); + ret = IRQ_HANDLED; + } + + if (cause & DWCEQOS_DMA_IS_MTLIS) { + u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL); + + dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val); + ret = IRQ_HANDLED; + } + + if (cause & DWCEQOS_DMA_IS_MACIS) { + dwceqos_mac_interrupt(lp); + ret = IRQ_HANDLED; + } + return ret; +} + +static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable) +{ + u32 regval; + unsigned long flags; + + spin_lock_irqsave(&lp->hw_lock, flags); + + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL); + if (enable) + regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE; + else + regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE; + dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval); + + spin_unlock_irqrestore(&lp->hw_lock, flags); +} + +static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable) +{ + u32 regval; + unsigned long flags; + + spin_lock_irqsave(&lp->hw_lock, flags); + + /* MTL flow control */ + regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER); + if (enable) + regval |= DWCEQOS_MTL_RXQ_EHFC; + else + regval &= ~DWCEQOS_MTL_RXQ_EHFC; + + dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); + + /* MAC flow control */ + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW); + if (enable) + regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE; + else + regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE; + dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval); + + spin_unlock_irqrestore(&lp->hw_lock, flags); +} + +static void dwceqos_configure_flow_control(struct net_local *lp) +{ + u32 regval; + unsigned long flags; + int RQS, RFD, RFA; + + spin_lock_irqsave(&lp->hw_lock, flags); + + regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER); + + /* The queue size is in units of 256 bytes. We want 512 bytes units for + * the threshold fields. + */ + RQS = ((regval >> 20) & 0x3FF) + 1; + RQS /= 2; + + /* The thresholds are relative to a full queue, with a bias + * of 1 KiByte below full. + */ + RFD = RQS / 2 - 2; + RFA = RQS / 8 - 2; + + regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8); + + if (RFD >= 0 && RFA >= 0) { + dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); + } else { + netdev_warn(lp->ndev, + "FIFO too small for flow control."); + } + + regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) | + DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS; + + dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval); + + spin_unlock_irqrestore(&lp->hw_lock, flags); +} + +static void dwceqos_configure_clock(struct net_local *lp) +{ + unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000; + + BUG_ON(!rate_mhz); + + dwceqos_write(lp, + REG_DWCEQOS_MAC_1US_TIC_COUNTER, + DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1)); +} + +static void dwceqos_configure_bus(struct net_local *lp) +{ + u32 sysbus_reg; + + /* N.B. We do not support the Fixed Burst mode because it + * opens a race window by making HW access to DMA descriptors + * non-atomic. + */ + + sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL; + + if (lp->bus_cfg.en_lpi) + sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI; + + if (lp->bus_cfg.burst_map) + sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST( + lp->bus_cfg.burst_map); + else + sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST( + DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT); + + if (lp->bus_cfg.read_requests) + sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT( + lp->bus_cfg.read_requests - 1); + else + sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT( + DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT); + + if (lp->bus_cfg.write_requests) + sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT( + lp->bus_cfg.write_requests - 1); + else + sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT( + DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT); + + if (netif_msg_hw(lp)) + netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg); + + dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg); +} + +static void dwceqos_init_hw(struct net_local *lp) +{ + u32 regval; + u32 buswidth; + u32 dma_skip; + + /* Software reset */ + dwceqos_reset_hw(lp); + + dwceqos_configure_bus(lp); + + /* Probe data bus width, 32/64/128 bits. */ + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF); + regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL); + buswidth = (regval ^ 0xF) + 1; + + /* Cache-align dma descriptors. */ + dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth; + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL, + DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) | + DWCEQOS_DMA_CH_CTRL_PBLX8); + + /* Initialize DMA Channel 0 */ + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1); + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1); + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST, + (u32)lp->tx_descs_addr); + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST, + (u32)lp->rx_descs_addr); + + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, + lp->tx_descs_tail_addr); + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL, + lp->rx_descs_tail_addr); + + if (lp->bus_cfg.tx_pbl) + regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl); + else + regval = DWCEQOS_DMA_CH_CTRL_PBL(2); + + /* Enable TSO if the HW support it */ + if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN) + regval |= DWCEQOS_DMA_CH_TX_TSE; + + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval); + + if (lp->bus_cfg.rx_pbl) + regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl); + else + regval = DWCEQOS_DMA_CH_CTRL_PBL(2); + + regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE); + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval); + + regval |= DWCEQOS_DMA_CH_CTRL_START; + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval); + + /* Initialize MTL Queues */ + regval = DWCEQOS_MTL_SCHALG_STRICT; + dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval); + + regval = DWCEQOS_MTL_TXQ_SIZE( + DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) | + DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF | + DWCEQOS_MTL_TXQ_TTC512; + dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval); + + regval = DWCEQOS_MTL_RXQ_SIZE( + DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) | + DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF; + dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); + + dwceqos_configure_flow_control(lp); + + /* Initialize MAC */ + dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); + + lp->eee_enabled = 0; + + dwceqos_configure_clock(lp); + + /* MMC counters */ + + /* probe implemented counters */ + dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u); + dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u); + lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK); + lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK); + + dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST | + DWCEQOS_MMC_CTRL_RSTONRD); + dwceqos_enable_mmc_interrupt(lp); + + /* Enable Interrupts */ + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, + DWCEQOS_DMA_CH0_IE_NIE | + DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE | + DWCEQOS_DMA_CH0_IE_AIE | + DWCEQOS_DMA_CH0_IE_FBEE); + + dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0); + + dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC | + DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); + + /* Start TX DMA */ + regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL); + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, + regval | DWCEQOS_DMA_CH_CTRL_START); + + /* Enable MAC TX/RX */ + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); + dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, + regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); +} + +static void dwceqos_tx_reclaim(unsigned long data) +{ + struct net_device *ndev = (struct net_device *)data; + struct net_local *lp = netdev_priv(ndev); + unsigned int tx_bytes = 0; + unsigned int tx_packets = 0; + + spin_lock(&lp->tx_lock); + + while (lp->tx_free < DWCEQOS_TX_DCNT) { + struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur]; + struct ring_desc *rd = &lp->tx_skb[lp->tx_cur]; + + /* Descriptor still being held by DMA ? */ + if (dd->des3 & DWCEQOS_DMA_TDES3_OWN) + break; + + if (rd->mapping) + dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len, + DMA_TO_DEVICE); + + if (unlikely(rd->skb)) { + ++tx_packets; + tx_bytes += rd->skb->len; + dev_consume_skb_any(rd->skb); + } + + rd->skb = NULL; + rd->mapping = 0; + lp->tx_free++; + lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT; + + if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) && + (dd->des3 & DWCEQOS_DMA_RDES3_ES)) { + if (netif_msg_tx_err(lp)) + netdev_err(ndev, "TX Error, TDES3 = 0x%x\n", + dd->des3); + if (netif_msg_hw(lp)) + print_status(lp); + } + } + spin_unlock(&lp->tx_lock); + + netdev_completed_queue(ndev, tx_packets, tx_bytes); + + dwceqos_dma_enable_txirq(lp); + netif_wake_queue(ndev); +} + +static int dwceqos_rx(struct net_local *lp, int budget) +{ + struct sk_buff *skb; + u32 tot_size = 0; + unsigned int n_packets = 0; + unsigned int n_descs = 0; + u32 len; + + struct dwceqos_dma_desc *dd; + struct sk_buff *new_skb; + dma_addr_t new_skb_baddr = 0; + + while (n_descs < budget) { + if (!dwceqos_packet_avail(lp)) + break; + + new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE); + if (!new_skb) { + netdev_err(lp->ndev, "no memory for new sk_buff\n"); + break; + } + + /* Get dma handle of skb->data */ + new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent, + new_skb->data, + DWCEQOS_RX_BUF_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) { + netdev_err(lp->ndev, "DMA map error\n"); + dev_kfree_skb(new_skb); + break; + } + + /* Read descriptor data after reading owner bit. */ + dma_rmb(); + + dd = &lp->rx_descs[lp->rx_cur]; + len = DWCEQOS_DMA_RDES3_PL(dd->des3); + skb = lp->rx_skb[lp->rx_cur].skb; + + /* Unmap old buffer */ + dma_unmap_single(lp->ndev->dev.parent, + lp->rx_skb[lp->rx_cur].mapping, + lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE); + + /* Discard packet on reception error or bad checksum */ + if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) || + (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) { + dev_kfree_skb(skb); + skb = NULL; + } else { + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, lp->ndev); + switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) { + case DWCEQOS_DMA_RDES1_PT_UDP: + case DWCEQOS_DMA_RDES1_PT_TCP: + case DWCEQOS_DMA_RDES1_PT_ICMP: + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + default: + skb->ip_summed = CHECKSUM_NONE; + break; + } + } + + if (unlikely(!skb)) { + if (netif_msg_rx_err(lp)) + netdev_dbg(lp->ndev, "rx error: des3=%X\n", + lp->rx_descs[lp->rx_cur].des3); + } else { + tot_size += skb->len; + n_packets++; + + netif_receive_skb(skb); + } + + lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr; + lp->rx_descs[lp->rx_cur].des1 = 0; + lp->rx_descs[lp->rx_cur].des2 = 0; + /* The DMA must observe des0/1/2 written before des3. */ + wmb(); + lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE | + DWCEQOS_DMA_RDES3_OWN | + DWCEQOS_DMA_RDES3_BUF1V; + + lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr; + lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE; + lp->rx_skb[lp->rx_cur].skb = new_skb; + + n_descs++; + lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT; + } + + /* Make sure any ownership update is written to the descriptors before + * DMA wakeup. + */ + wmb(); + + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI); + /* Wake up RX by writing tail pointer */ + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL, + lp->rx_descs_tail_addr); + + return n_descs; +} + +static int dwceqos_rx_poll(struct napi_struct *napi, int budget) +{ + struct net_local *lp = container_of(napi, struct net_local, napi); + int work_done = 0; + + work_done = dwceqos_rx(lp, budget - work_done); + + if (!dwceqos_packet_avail(lp) && work_done < budget) { + napi_complete(napi); + dwceqos_dma_enable_rxirq(lp); + } else { + work_done = budget; + } + + return work_done; +} + +/* Reinitialize function if a TX timed out */ +static void dwceqos_reinit_for_txtimeout(struct work_struct *data) +{ + struct net_local *lp = container_of(data, struct net_local, + txtimeout_reinit); + + netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n", + DWCEQOS_TX_TIMEOUT); + + if (netif_msg_hw(lp)) + print_status(lp); + + rtnl_lock(); + dwceqos_stop(lp->ndev); + dwceqos_open(lp->ndev); + rtnl_unlock(); +} + +/* DT Probing function called by main probe */ +static inline int dwceqos_probe_config_dt(struct platform_device *pdev) +{ + struct net_device *ndev; + struct net_local *lp; + const void *mac_address; + struct dwceqos_bus_cfg *bus_cfg; + struct device_node *np = pdev->dev.of_node; + + ndev = platform_get_drvdata(pdev); + lp = netdev_priv(ndev); + bus_cfg = &lp->bus_cfg; + + /* Set the MAC address. */ + mac_address = of_get_mac_address(pdev->dev.of_node); + if (mac_address) + ether_addr_copy(ndev->dev_addr, mac_address); + + /* These are all optional parameters */ + lp->en_tx_lpi_clockgating = of_property_read_bool(np, + "snps,en-tx-lpi-clockgating"); + bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi"); + of_property_read_u32(np, "snps,write-requests", + &bus_cfg->write_requests); + of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests); + of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map); + of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl); + of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl); + + netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n", + bus_cfg->en_lpi, + bus_cfg->write_requests, + bus_cfg->read_requests, + bus_cfg->burst_map, + bus_cfg->rx_pbl, + bus_cfg->tx_pbl); + + return 0; +} + +static int dwceqos_open(struct net_device *ndev) +{ + struct net_local *lp = netdev_priv(ndev); + int res; + + dwceqos_reset_state(lp); + res = dwceqos_descriptor_init(lp); + if (res) { + netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res); + return res; + } + netdev_reset_queue(ndev); + + napi_enable(&lp->napi); + phy_start(lp->phy_dev); + dwceqos_init_hw(lp); + + netif_start_queue(ndev); + tasklet_enable(&lp->tx_bdreclaim_tasklet); + + return 0; +} + +static bool dweqos_is_tx_dma_suspended(struct net_local *lp) +{ + u32 reg; + + reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0); + reg = DMA_GET_TX_STATE_CH0(reg); + + return reg == DMA_TX_CH_SUSPENDED; +} + +static void dwceqos_drain_dma(struct net_local *lp) +{ + /* Wait for all pending TX buffers to be sent. Upper limit based + * on max frame size on a 10 Mbit link. + */ + size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100; + + while (!dweqos_is_tx_dma_suspended(lp) && limit--) + usleep_range(100, 200); +} + +static int dwceqos_stop(struct net_device *ndev) +{ + struct net_local *lp = netdev_priv(ndev); + + phy_stop(lp->phy_dev); + + tasklet_disable(&lp->tx_bdreclaim_tasklet); + netif_stop_queue(ndev); + napi_disable(&lp->napi); + + dwceqos_drain_dma(lp); + + netif_tx_lock(lp->ndev); + dwceqos_reset_hw(lp); + dwceqos_descriptor_free(lp); + netif_tx_unlock(lp->ndev); + + return 0; +} + +static void dwceqos_dmadesc_set_ctx(struct net_local *lp, + unsigned short gso_size) +{ + struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next]; + + dd->des0 = 0; + dd->des1 = 0; + dd->des2 = gso_size; + dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV; + + lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; +} + +static void dwceqos_tx_poll_demand(struct net_local *lp) +{ + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, + lp->tx_descs_tail_addr); +} + +struct dwceqos_tx { + size_t nr_descriptors; + size_t initial_descriptor; + size_t last_descriptor; + size_t prev_gso_size; + size_t network_header_len; +}; + +static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp, + struct dwceqos_tx *tx) +{ + size_t n = 1; + size_t i; + + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) + ++n; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + n += (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) / + BYTES_PER_DMA_DESC; + } + + tx->nr_descriptors = n; + tx->initial_descriptor = lp->tx_next; + tx->last_descriptor = lp->tx_next; + tx->prev_gso_size = lp->gso_size; + + tx->network_header_len = skb_transport_offset(skb); + if (skb_is_gso(skb)) + tx->network_header_len += tcp_hdrlen(skb); +} + +static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp, + struct dwceqos_tx *tx) +{ + struct ring_desc *rd; + struct dwceqos_dma_desc *dd; + size_t payload_len; + dma_addr_t dma_handle; + + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) { + dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size); + lp->gso_size = skb_shinfo(skb)->gso_size; + } + + dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + + if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) { + netdev_err(lp->ndev, "TX DMA Mapping error\n"); + return -ENOMEM; + } + + rd = &lp->tx_skb[lp->tx_next]; + dd = &lp->tx_descs[lp->tx_next]; + + rd->skb = NULL; + rd->len = skb_headlen(skb); + rd->mapping = dma_handle; + + /* Set up DMA Descriptor */ + dd->des0 = dma_handle; + + if (skb_is_gso(skb)) { + payload_len = skb_headlen(skb) - tx->network_header_len; + + if (payload_len) + dd->des1 = dma_handle + tx->network_header_len; + dd->des2 = tx->network_header_len | + DWCEQOS_DMA_DES2_B2L(payload_len); + dd->des3 = DWCEQOS_DMA_TDES3_TSE | + DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) | + (skb->len - tx->network_header_len); + } else { + dd->des1 = 0; + dd->des2 = skb_headlen(skb); + dd->des3 = skb->len; + + switch (skb->ip_summed) { + case CHECKSUM_PARTIAL: + dd->des3 |= DWCEQOS_DMA_TDES3_CA; + case CHECKSUM_NONE: + case CHECKSUM_UNNECESSARY: + case CHECKSUM_COMPLETE: + default: + break; + } + } + + dd->des3 |= DWCEQOS_DMA_TDES3_FD; + if (lp->tx_next != tx->initial_descriptor) + dd->des3 |= DWCEQOS_DMA_TDES3_OWN; + + tx->last_descriptor = lp->tx_next; + lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; + + return 0; +} + +static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp, + struct dwceqos_tx *tx) +{ + struct ring_desc *rd = NULL; + struct dwceqos_dma_desc *dd; + dma_addr_t dma_handle; + size_t i; + + /* Setup more ring and DMA descriptor if the packet is fragmented */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + size_t frag_size; + size_t consumed_size; + + /* Map DMA Area */ + dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) { + netdev_err(lp->ndev, "DMA Mapping error\n"); + return -ENOMEM; + } + + /* order-3 fragments span more than one descriptor. */ + frag_size = skb_frag_size(frag); + consumed_size = 0; + while (consumed_size < frag_size) { + size_t dma_size = min_t(size_t, 16376, + frag_size - consumed_size); + + rd = &lp->tx_skb[lp->tx_next]; + memset(rd, 0, sizeof(*rd)); + + dd = &lp->tx_descs[lp->tx_next]; + + /* Set DMA Descriptor fields */ + dd->des0 = dma_handle; + dd->des1 = 0; + dd->des2 = dma_size; + + if (skb_is_gso(skb)) + dd->des3 = (skb->len - tx->network_header_len); + else + dd->des3 = skb->len; + + dd->des3 |= DWCEQOS_DMA_TDES3_OWN; + + tx->last_descriptor = lp->tx_next; + lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; + consumed_size += dma_size; + } + + rd->len = skb_frag_size(frag); + rd->mapping = dma_handle; + } + + return 0; +} + +static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp, + struct dwceqos_tx *tx) +{ + lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD; + lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC; + + lp->tx_skb[tx->last_descriptor].skb = skb; + + /* Make all descriptor updates visible to the DMA before setting the + * owner bit. + */ + wmb(); + + lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN; + + /* Make the owner bit visible before TX wakeup. */ + wmb(); + + dwceqos_tx_poll_demand(lp); +} + +static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx) +{ + size_t i = tx->initial_descriptor; + + while (i != lp->tx_next) { + if (lp->tx_skb[i].mapping) + dma_unmap_single(lp->ndev->dev.parent, + lp->tx_skb[i].mapping, + lp->tx_skb[i].len, + DMA_TO_DEVICE); + + lp->tx_skb[i].mapping = 0; + lp->tx_skb[i].skb = NULL; + + memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i])); + + i = (i + 1) % DWCEQOS_TX_DCNT; + } + + lp->tx_next = tx->initial_descriptor; + lp->gso_size = tx->prev_gso_size; +} + +static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct net_local *lp = netdev_priv(ndev); + struct dwceqos_tx trans; + int err; + + dwceqos_tx_prepare(skb, lp, &trans); + if (lp->tx_free < trans.nr_descriptors) { + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + + err = dwceqos_tx_linear(skb, lp, &trans); + if (err) + goto tx_error; + + err = dwceqos_tx_frags(skb, lp, &trans); + if (err) + goto tx_error; + + WARN_ON(lp->tx_next != + ((trans.initial_descriptor + trans.nr_descriptors) % + DWCEQOS_TX_DCNT)); + + dwceqos_tx_finalize(skb, lp, &trans); + + netdev_sent_queue(ndev, skb->len); + + spin_lock_bh(&lp->tx_lock); + lp->tx_free -= trans.nr_descriptors; + spin_unlock_bh(&lp->tx_lock); + + ndev->trans_start = jiffies; + return 0; + +tx_error: + dwceqos_tx_rollback(lp, &trans); + dev_kfree_skb(skb); + return 0; +} + +/* Set MAC address and then update HW accordingly */ +static int dwceqos_set_mac_address(struct net_device *ndev, void *addr) +{ + struct net_local *lp = netdev_priv(ndev); + struct sockaddr *hwaddr = (struct sockaddr *)addr; + + if (netif_running(ndev)) + return -EBUSY; + + if (!is_valid_ether_addr(hwaddr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len); + + dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); + return 0; +} + +static void dwceqos_tx_timeout(struct net_device *ndev) +{ + struct net_local *lp = netdev_priv(ndev); + + queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit); +} + +static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr, + unsigned int reg_n) +{ + unsigned long data; + + data = (addr[5] << 8) | addr[4]; + dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), + data | DWCEQOS_MAC_MAC_ADDR_HI_EN); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data); +} + +static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n) +{ + /* Do not disable MAC address 0 */ + if (reg_n != 0) + dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0); +} + +static void dwceqos_set_rx_mode(struct net_device *ndev) +{ + struct net_local *lp = netdev_priv(ndev); + u32 regval = 0; + u32 mc_filter[2]; + int reg = 1; + struct netdev_hw_addr *ha; + unsigned int max_mac_addr; + + max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); + + if (ndev->flags & IFF_PROMISC) { + regval = DWCEQOS_MAC_PKT_FILT_PR; + } else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) || + (ndev->flags & IFF_ALLMULTI))) { + regval = DWCEQOS_MAC_PKT_FILT_PM; + dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff); + dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff); + } else if (!netdev_mc_empty(ndev)) { + regval = DWCEQOS_MAC_PKT_FILT_HMC; + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, ndev) { + /* The upper 6 bits of the calculated CRC are used to + * index the contens of the hash table + */ + int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26; + /* The most significant bit determines the register + * to use (H/L) while the other 5 bits determine + * the bit within the register. + */ + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } + dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]); + dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]); + } + if (netdev_uc_count(ndev) > max_mac_addr) { + regval |= DWCEQOS_MAC_PKT_FILT_PR; + } else { + netdev_for_each_uc_addr(ha, ndev) { + dwceqos_set_umac_addr(lp, ha->addr, reg); + reg++; + } + for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++) + dwceqos_disable_umac_addr(lp, reg); + } + dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void dwceqos_poll_controller(struct net_device *ndev) +{ + disable_irq(ndev->irq); + dwceqos_interrupt(ndev->irq, ndev); + enable_irq(ndev->irq); +} +#endif + +static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, + u32 tx_mask) +{ + if (tx_mask & BIT(27)) + lp->mmc_counters.txlpitranscntr += + dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR); + if (tx_mask & BIT(26)) + lp->mmc_counters.txpiuscntr += + dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR); + if (tx_mask & BIT(25)) + lp->mmc_counters.txoversize_g += + dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G); + if (tx_mask & BIT(24)) + lp->mmc_counters.txvlanpackets_g += + dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G); + if (tx_mask & BIT(23)) + lp->mmc_counters.txpausepackets += + dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS); + if (tx_mask & BIT(22)) + lp->mmc_counters.txexcessdef += + dwceqos_read(lp, DWC_MMC_TXEXCESSDEF); + if (tx_mask & BIT(21)) + lp->mmc_counters.txpacketcount_g += + dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G); + if (tx_mask & BIT(20)) + lp->mmc_counters.txoctetcount_g += + dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G); + if (tx_mask & BIT(19)) + lp->mmc_counters.txcarriererror += + dwceqos_read(lp, DWC_MMC_TXCARRIERERROR); + if (tx_mask & BIT(18)) + lp->mmc_counters.txexcesscol += + dwceqos_read(lp, DWC_MMC_TXEXCESSCOL); + if (tx_mask & BIT(17)) + lp->mmc_counters.txlatecol += + dwceqos_read(lp, DWC_MMC_TXLATECOL); + if (tx_mask & BIT(16)) + lp->mmc_counters.txdeferred += + dwceqos_read(lp, DWC_MMC_TXDEFERRED); + if (tx_mask & BIT(15)) + lp->mmc_counters.txmulticol_g += + dwceqos_read(lp, DWC_MMC_TXMULTICOL_G); + if (tx_mask & BIT(14)) + lp->mmc_counters.txsinglecol_g += + dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G); + if (tx_mask & BIT(13)) + lp->mmc_counters.txunderflowerror += + dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR); + if (tx_mask & BIT(12)) + lp->mmc_counters.txbroadcastpackets_gb += + dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB); + if (tx_mask & BIT(11)) + lp->mmc_counters.txmulticastpackets_gb += + dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB); + if (tx_mask & BIT(10)) + lp->mmc_counters.txunicastpackets_gb += + dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB); + if (tx_mask & BIT(9)) + lp->mmc_counters.tx1024tomaxoctets_gb += + dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB); + if (tx_mask & BIT(8)) + lp->mmc_counters.tx512to1023octets_gb += + dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB); + if (tx_mask & BIT(7)) + lp->mmc_counters.tx256to511octets_gb += + dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB); + if (tx_mask & BIT(6)) + lp->mmc_counters.tx128to255octets_gb += + dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB); + if (tx_mask & BIT(5)) + lp->mmc_counters.tx65to127octets_gb += + dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB); + if (tx_mask & BIT(4)) + lp->mmc_counters.tx64octets_gb += + dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB); + if (tx_mask & BIT(3)) + lp->mmc_counters.txmulticastpackets_g += + dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G); + if (tx_mask & BIT(2)) + lp->mmc_counters.txbroadcastpackets_g += + dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G); + if (tx_mask & BIT(1)) + lp->mmc_counters.txpacketcount_gb += + dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB); + if (tx_mask & BIT(0)) + lp->mmc_counters.txoctetcount_gb += + dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB); + + if (rx_mask & BIT(27)) + lp->mmc_counters.rxlpitranscntr += + dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR); + if (rx_mask & BIT(26)) + lp->mmc_counters.rxlpiuscntr += + dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR); + if (rx_mask & BIT(25)) + lp->mmc_counters.rxctrlpackets_g += + dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G); + if (rx_mask & BIT(24)) + lp->mmc_counters.rxrcverror += + dwceqos_read(lp, DWC_MMC_RXRCVERROR); + if (rx_mask & BIT(23)) + lp->mmc_counters.rxwatchdog += + dwceqos_read(lp, DWC_MMC_RXWATCHDOG); + if (rx_mask & BIT(22)) + lp->mmc_counters.rxvlanpackets_gb += + dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB); + if (rx_mask & BIT(21)) + lp->mmc_counters.rxfifooverflow += + dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW); + if (rx_mask & BIT(20)) + lp->mmc_counters.rxpausepackets += + dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS); + if (rx_mask & BIT(19)) + lp->mmc_counters.rxoutofrangetype += + dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE); + if (rx_mask & BIT(18)) + lp->mmc_counters.rxlengtherror += + dwceqos_read(lp, DWC_MMC_RXLENGTHERROR); + if (rx_mask & BIT(17)) + lp->mmc_counters.rxunicastpackets_g += + dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G); + if (rx_mask & BIT(16)) + lp->mmc_counters.rx1024tomaxoctets_gb += + dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB); + if (rx_mask & BIT(15)) + lp->mmc_counters.rx512to1023octets_gb += + dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB); + if (rx_mask & BIT(14)) + lp->mmc_counters.rx256to511octets_gb += + dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB); + if (rx_mask & BIT(13)) + lp->mmc_counters.rx128to255octets_gb += + dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB); + if (rx_mask & BIT(12)) + lp->mmc_counters.rx65to127octets_gb += + dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB); + if (rx_mask & BIT(11)) + lp->mmc_counters.rx64octets_gb += + dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB); + if (rx_mask & BIT(10)) + lp->mmc_counters.rxoversize_g += + dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G); + if (rx_mask & BIT(9)) + lp->mmc_counters.rxundersize_g += + dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G); + if (rx_mask & BIT(8)) + lp->mmc_counters.rxjabbererror += + dwceqos_read(lp, DWC_MMC_RXJABBERERROR); + if (rx_mask & BIT(7)) + lp->mmc_counters.rxrunterror += + dwceqos_read(lp, DWC_MMC_RXRUNTERROR); + if (rx_mask & BIT(6)) + lp->mmc_counters.rxalignmenterror += + dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR); + if (rx_mask & BIT(5)) + lp->mmc_counters.rxcrcerror += + dwceqos_read(lp, DWC_MMC_RXCRCERROR); + if (rx_mask & BIT(4)) + lp->mmc_counters.rxmulticastpackets_g += + dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G); + if (rx_mask & BIT(3)) + lp->mmc_counters.rxbroadcastpackets_g += + dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G); + if (rx_mask & BIT(2)) + lp->mmc_counters.rxoctetcount_g += + dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G); + if (rx_mask & BIT(1)) + lp->mmc_counters.rxoctetcount_gb += + dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB); + if (rx_mask & BIT(0)) + lp->mmc_counters.rxpacketcount_gb += + dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB); +} + +static struct rtnl_link_stats64* +dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s) +{ + unsigned long flags; + struct net_local *lp = netdev_priv(ndev); + struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters; + + spin_lock_irqsave(&lp->stats_lock, flags); + dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask, + lp->mmc_tx_counters_mask); + spin_unlock_irqrestore(&lp->stats_lock, flags); + + s->rx_packets = hwstats->rxpacketcount_gb; + s->rx_bytes = hwstats->rxoctetcount_gb; + s->rx_errors = hwstats->rxpacketcount_gb - + hwstats->rxbroadcastpackets_g - + hwstats->rxmulticastpackets_g - + hwstats->rxunicastpackets_g; + s->multicast = hwstats->rxmulticastpackets_g; + s->rx_length_errors = hwstats->rxlengtherror; + s->rx_crc_errors = hwstats->rxcrcerror; + s->rx_fifo_errors = hwstats->rxfifooverflow; + + s->tx_packets = hwstats->txpacketcount_gb; + s->tx_bytes = hwstats->txoctetcount_gb; + + if (lp->mmc_tx_counters_mask & BIT(21)) + s->tx_errors = hwstats->txpacketcount_gb - + hwstats->txpacketcount_g; + else + s->tx_errors = hwstats->txunderflowerror + + hwstats->txcarriererror; + + return s; +} + +static int +dwceqos_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +{ + struct net_local *lp = netdev_priv(ndev); + struct phy_device *phydev = lp->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_gset(phydev, ecmd); +} + +static int +dwceqos_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +{ + struct net_local *lp = netdev_priv(ndev); + struct phy_device *phydev = lp->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_sset(phydev, ecmd); +} + +static void +dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed) +{ + const struct net_local *lp = netdev_priv(ndev); + + strcpy(ed->driver, lp->pdev->dev.driver->name); + strcpy(ed->version, DRIVER_VERSION); +} + +static void dwceqos_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pp) +{ + const struct net_local *lp = netdev_priv(ndev); + + pp->autoneg = lp->flowcontrol.autoneg; + pp->tx_pause = lp->flowcontrol.tx; + pp->rx_pause = lp->flowcontrol.rx; +} + +static int dwceqos_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pp) +{ + struct net_local *lp = netdev_priv(ndev); + int ret = 0; + + lp->flowcontrol.autoneg = pp->autoneg; + if (pp->autoneg) { + lp->phy_dev->advertising |= ADVERTISED_Pause; + lp->phy_dev->advertising |= ADVERTISED_Asym_Pause; + } else { + lp->phy_dev->advertising &= ~ADVERTISED_Pause; + lp->phy_dev->advertising &= ~ADVERTISED_Asym_Pause; + lp->flowcontrol.rx = pp->rx_pause; + lp->flowcontrol.tx = pp->tx_pause; + } + + if (netif_running(ndev)) + ret = phy_start_aneg(lp->phy_dev); + + return ret; +} + +static void dwceqos_get_strings(struct net_device *ndev, u32 stringset, + u8 *data) +{ + size_t i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) { + memcpy(data, dwceqos_ethtool_stats[i].stat_name, + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } +} + +static void dwceqos_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct net_local *lp = netdev_priv(ndev); + unsigned long flags; + size_t i; + u8 *mmcstat = (u8 *)&lp->mmc_counters; + + spin_lock_irqsave(&lp->stats_lock, flags); + dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask, + lp->mmc_tx_counters_mask); + spin_unlock_irqrestore(&lp->stats_lock, flags); + + for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) { + memcpy(data, + mmcstat + dwceqos_ethtool_stats[i].offset, + sizeof(u64)); + data++; + } +} + +static int dwceqos_get_sset_count(struct net_device *ndev, int sset) +{ + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(dwceqos_ethtool_stats); + + return -EOPNOTSUPP; +} + +static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *space) +{ + const struct net_local *lp = netdev_priv(dev); + u32 *reg_space = (u32 *)space; + int reg_offset; + int reg_ix = 0; + + /* MAC registers */ + for (reg_offset = START_MAC_REG_OFFSET; + reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) { + reg_space[reg_ix] = dwceqos_read(lp, reg_offset); + reg_ix++; + } + /* MTL registers */ + for (reg_offset = START_MTL_REG_OFFSET; + reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) { + reg_space[reg_ix] = dwceqos_read(lp, reg_offset); + reg_ix++; + } + + /* DMA registers */ + for (reg_offset = START_DMA_REG_OFFSET; + reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) { + reg_space[reg_ix] = dwceqos_read(lp, reg_offset); + reg_ix++; + } + + BUG_ON(4 * reg_ix > REG_SPACE_SIZE); +} + +static int dwceqos_get_regs_len(struct net_device *dev) +{ + return REG_SPACE_SIZE; +} + +static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl) +{ + return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off"; +} + +static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl) +{ + return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off"; +} + +static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata) +{ + struct net_local *lp = netdev_priv(ndev); + u32 lpi_status; + u32 lpi_enabled; + + if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL)) + return -EOPNOTSUPP; + + edata->eee_active = lp->eee_active; + edata->eee_enabled = lp->eee_enabled; + edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER); + lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); + lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA); + edata->tx_lpi_enabled = lpi_enabled; + + if (netif_msg_hw(lp)) { + u32 regval; + + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); + + netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n", + dwceqos_get_rx_lpi_state(regval), + dwceqos_get_tx_lpi_state(regval)); + } + + return phy_ethtool_get_eee(lp->phy_dev, edata); +} + +static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata) +{ + struct net_local *lp = netdev_priv(ndev); + u32 regval; + unsigned long flags; + + if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL)) + return -EOPNOTSUPP; + + if (edata->eee_enabled && !lp->eee_active) + return -EOPNOTSUPP; + + if (edata->tx_lpi_enabled) { + if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN || + edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX) + return -EINVAL; + } + + lp->eee_enabled = edata->eee_enabled; + + if (edata->eee_enabled && edata->tx_lpi_enabled) { + dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER, + edata->tx_lpi_timer); + + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); + regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE; + if (lp->en_tx_lpi_clockgating) + regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE; + dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); + } else { + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); + regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE; + dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); + } + + return phy_ethtool_set_eee(lp->phy_dev, edata); +} + +static u32 dwceqos_get_msglevel(struct net_device *ndev) +{ + const struct net_local *lp = netdev_priv(ndev); + + return lp->msg_enable; +} + +static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel) +{ + struct net_local *lp = netdev_priv(ndev); + + lp->msg_enable = msglevel; +} + +static struct ethtool_ops dwceqos_ethtool_ops = { + .get_settings = dwceqos_get_settings, + .set_settings = dwceqos_set_settings, + .get_drvinfo = dwceqos_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_pauseparam = dwceqos_get_pauseparam, + .set_pauseparam = dwceqos_set_pauseparam, + .get_strings = dwceqos_get_strings, + .get_ethtool_stats = dwceqos_get_ethtool_stats, + .get_sset_count = dwceqos_get_sset_count, + .get_regs = dwceqos_get_regs, + .get_regs_len = dwceqos_get_regs_len, + .get_eee = dwceqos_get_eee, + .set_eee = dwceqos_set_eee, + .get_msglevel = dwceqos_get_msglevel, + .set_msglevel = dwceqos_set_msglevel, +}; + +static struct net_device_ops netdev_ops = { + .ndo_open = dwceqos_open, + .ndo_stop = dwceqos_stop, + .ndo_start_xmit = dwceqos_start_xmit, + .ndo_set_rx_mode = dwceqos_set_rx_mode, + .ndo_set_mac_address = dwceqos_set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = dwceqos_poll_controller, +#endif + .ndo_do_ioctl = dwceqos_ioctl, + .ndo_tx_timeout = dwceqos_tx_timeout, + .ndo_get_stats64 = dwceqos_get_stats64, +}; + +static const struct of_device_id dwceq_of_match[] = { + { .compatible = "snps,dwc-qos-ethernet-4.10", }, + {} +}; +MODULE_DEVICE_TABLE(of, dwceq_of_match); + +static int dwceqos_probe(struct platform_device *pdev) +{ + struct resource *r_mem = NULL; + struct net_device *ndev; + struct net_local *lp; + int ret = -ENXIO; + + r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r_mem) { + dev_err(&pdev->dev, "no IO resource defined.\n"); + return -ENXIO; + } + + ndev = alloc_etherdev(sizeof(*lp)); + if (!ndev) { + dev_err(&pdev->dev, "etherdev allocation failed.\n"); + return -ENOMEM; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + lp = netdev_priv(ndev); + lp->ndev = ndev; + lp->pdev = pdev; + lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT); + + spin_lock_init(&lp->tx_lock); + spin_lock_init(&lp->hw_lock); + spin_lock_init(&lp->stats_lock); + + lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk"); + if (IS_ERR(lp->apb_pclk)) { + dev_err(&pdev->dev, "apb_pclk clock not found.\n"); + ret = PTR_ERR(lp->apb_pclk); + goto err_out_free_netdev; + } + + ret = clk_prepare_enable(lp->apb_pclk); + if (ret) { + dev_err(&pdev->dev, "Unable to enable APER clock.\n"); + goto err_out_free_netdev; + } + + lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem); + if (IS_ERR(lp->baseaddr)) { + dev_err(&pdev->dev, "failed to map baseaddress.\n"); + ret = PTR_ERR(lp->baseaddr); + goto err_out_clk_dis_aper; + } + + ndev->irq = platform_get_irq(pdev, 0); + ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ; + ndev->netdev_ops = &netdev_ops; + ndev->ethtool_ops = &dwceqos_ethtool_ops; + ndev->base_addr = r_mem->start; + + dwceqos_get_hwfeatures(lp); + dwceqos_mdio_set_csr(lp); + + ndev->hw_features = NETIF_F_SG; + + if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN) + ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + + if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL) + ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + + if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL) + ndev->hw_features |= NETIF_F_RXCSUM; + + ndev->features = ndev->hw_features; + + netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT); + + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); + goto err_out_clk_dis_aper; + } + + lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk"); + if (IS_ERR(lp->phy_ref_clk)) { + dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); + ret = PTR_ERR(lp->phy_ref_clk); + goto err_out_unregister_netdev; + } + + ret = clk_prepare_enable(lp->phy_ref_clk); + if (ret) { + dev_err(&pdev->dev, "Unable to enable device clock.\n"); + goto err_out_unregister_netdev; + } + + lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node, + "phy-handle", 0); + if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) { + ret = of_phy_register_fixed_link(lp->pdev->dev.of_node); + if (ret < 0) { + dev_err(&pdev->dev, "invalid fixed-link"); + goto err_out_unregister_netdev; + } + + lp->phy_node = of_node_get(lp->pdev->dev.of_node); + } + + ret = of_get_phy_mode(lp->pdev->dev.of_node); + if (ret < 0) { + dev_err(&lp->pdev->dev, "error in getting phy i/f\n"); + goto err_out_unregister_clk_notifier; + } + + lp->phy_interface = ret; + + ret = dwceqos_mii_init(lp); + if (ret) { + dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n"); + goto err_out_unregister_clk_notifier; + } + + ret = dwceqos_mii_probe(ndev); + if (ret != 0) { + netdev_err(ndev, "mii_probe fail.\n"); + ret = -ENXIO; + goto err_out_unregister_clk_notifier; + } + + dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); + + tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim, + (unsigned long)ndev); + tasklet_disable(&lp->tx_bdreclaim_tasklet); + + lp->txtimeout_handler_wq = create_singlethread_workqueue(DRIVER_NAME); + INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout); + + platform_set_drvdata(pdev, ndev); + ret = dwceqos_probe_config_dt(pdev); + if (ret) { + dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n", + ret); + goto err_out_unregister_clk_notifier; + } + dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n", + pdev->id, ndev->base_addr, ndev->irq); + + ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0, + ndev->name, ndev); + if (ret) { + dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n", + ndev->irq, ret); + goto err_out_unregister_clk_notifier; + } + + if (netif_msg_probe(lp)) + netdev_dbg(ndev, "net_local@%p\n", lp); + + return 0; + +err_out_unregister_clk_notifier: + clk_disable_unprepare(lp->phy_ref_clk); +err_out_unregister_netdev: + unregister_netdev(ndev); +err_out_clk_dis_aper: + clk_disable_unprepare(lp->apb_pclk); +err_out_free_netdev: + if (lp->phy_node) + of_node_put(lp->phy_node); + free_netdev(ndev); + platform_set_drvdata(pdev, NULL); + return ret; +} + +static int dwceqos_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct net_local *lp; + + if (ndev) { + lp = netdev_priv(ndev); + + if (lp->phy_dev) + phy_disconnect(lp->phy_dev); + mdiobus_unregister(lp->mii_bus); + kfree(lp->mii_bus->irq); + mdiobus_free(lp->mii_bus); + + unregister_netdev(ndev); + + clk_disable_unprepare(lp->phy_ref_clk); + clk_disable_unprepare(lp->apb_pclk); + + free_netdev(ndev); + } + + return 0; +} + +static struct platform_driver dwceqos_driver = { + .probe = dwceqos_probe, + .remove = dwceqos_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = dwceq_of_match, + }, +}; + +module_platform_driver(dwceqos_driver); + +MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Andreas Irestaal "); +MODULE_AUTHOR("Lars Persson "); diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index dd9430043..cba3d9fcb 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -41,6 +41,8 @@ #include #include +#include + MODULE_AUTHOR("Eugene Konev "); MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index d155bf257..874fb297e 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -365,7 +366,9 @@ struct cpsw_priv { spinlock_t lock; struct platform_device *pdev; struct net_device *ndev; - struct napi_struct napi; + struct device_node *phy_node; + struct napi_struct napi_rx; + struct napi_struct napi_tx; struct device *dev; struct cpsw_platform_data data; struct cpsw_ss_regs __iomem *regs; @@ -386,10 +389,12 @@ struct cpsw_priv { struct cpsw_ale *ale; bool rx_pause; bool tx_pause; + bool quirk_irq; + bool rx_irq_disabled; + bool tx_irq_disabled; /* snapshot of IRQ numbers */ u32 irqs_table[4]; u32 num_irqs; - bool irq_enabled; struct cpts *cpts; u32 emac_port; }; @@ -752,13 +757,15 @@ static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id) { struct cpsw_priv *priv = dev_id; + writel(0, &priv->wr_regs->tx_en); cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); - cpdma_chan_process(priv->txch, 128); - priv = cpsw_get_slave_priv(priv, 1); - if (priv) - cpdma_chan_process(priv->txch, 128); + if (priv->quirk_irq) { + disable_irq_nosync(priv->irqs_table[1]); + priv->tx_irq_disabled = true; + } + napi_schedule(&priv->napi_tx); return IRQ_HANDLED; } @@ -767,43 +774,49 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) struct cpsw_priv *priv = dev_id; cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); + writel(0, &priv->wr_regs->rx_en); - cpsw_intr_disable(priv); - if (priv->irq_enabled == true) { + if (priv->quirk_irq) { disable_irq_nosync(priv->irqs_table[0]); - priv->irq_enabled = false; + priv->rx_irq_disabled = true; } - if (netif_running(priv->ndev)) { - napi_schedule(&priv->napi); - return IRQ_HANDLED; - } + napi_schedule(&priv->napi_rx); + return IRQ_HANDLED; +} - priv = cpsw_get_slave_priv(priv, 1); - if (!priv) - return IRQ_NONE; +static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) +{ + struct cpsw_priv *priv = napi_to_priv(napi_tx); + int num_tx; - if (netif_running(priv->ndev)) { - napi_schedule(&priv->napi); - return IRQ_HANDLED; + num_tx = cpdma_chan_process(priv->txch, budget); + if (num_tx < budget) { + napi_complete(napi_tx); + writel(0xff, &priv->wr_regs->tx_en); + if (priv->quirk_irq && priv->tx_irq_disabled) { + priv->tx_irq_disabled = false; + enable_irq(priv->irqs_table[1]); + } } - return IRQ_NONE; + + if (num_tx) + cpsw_dbg(priv, intr, "poll %d tx pkts\n", num_tx); + + return num_tx; } -static int cpsw_poll(struct napi_struct *napi, int budget) +static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) { - struct cpsw_priv *priv = napi_to_priv(napi); + struct cpsw_priv *priv = napi_to_priv(napi_rx); int num_rx; num_rx = cpdma_chan_process(priv->rxch, budget); if (num_rx < budget) { - struct cpsw_priv *prim_cpsw; - - napi_complete(napi); - cpsw_intr_enable(priv); - prim_cpsw = cpsw_get_slave_priv(priv, 0); - if (prim_cpsw->irq_enabled == false) { - prim_cpsw->irq_enabled = true; + napi_complete(napi_rx); + writel(0xff, &priv->wr_regs->rx_en); + if (priv->quirk_irq && priv->rx_irq_disabled) { + priv->rx_irq_disabled = false; enable_irq(priv->irqs_table[0]); } } @@ -1134,7 +1147,11 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); - slave->phy = phy_connect(priv->ndev, slave->data->phy_id, + if (priv->phy_node) + slave->phy = of_phy_connect(priv->ndev, priv->phy_node, + &cpsw_adjust_link, 0, slave->data->phy_if); + else + slave->phy = phy_connect(priv->ndev, slave->data->phy_id, &cpsw_adjust_link, slave->data->phy_if); if (IS_ERR(slave->phy)) { dev_err(priv->dev, "phy %s not found on slave %d\n", @@ -1230,7 +1247,6 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) static int cpsw_ndo_open(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_priv *prim_cpsw; int i, ret; u32 reg; @@ -1260,6 +1276,8 @@ static int cpsw_ndo_open(struct net_device *ndev) ALE_ALL_PORTS << priv->host_port, 0, 0); if (!cpsw_common_res_usage_state(priv)) { + struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0); + /* setup tx dma to fixed prio and zero offset */ cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); @@ -1273,6 +1291,19 @@ static int cpsw_ndo_open(struct net_device *ndev) /* Enable internal fifo flow control */ writel(0x7, &priv->regs->flow_control); + napi_enable(&priv_sl0->napi_rx); + napi_enable(&priv_sl0->napi_tx); + + if (priv_sl0->tx_irq_disabled) { + priv_sl0->tx_irq_disabled = false; + enable_irq(priv->irqs_table[1]); + } + + if (priv_sl0->rx_irq_disabled) { + priv_sl0->rx_irq_disabled = false; + enable_irq(priv->irqs_table[0]); + } + if (WARN_ON(!priv->data.rx_descs)) priv->data.rx_descs = 128; @@ -1311,18 +1342,9 @@ static int cpsw_ndo_open(struct net_device *ndev) cpsw_set_coalesce(ndev, &coal); } - napi_enable(&priv->napi); cpdma_ctlr_start(priv->dma); cpsw_intr_enable(priv); - prim_cpsw = cpsw_get_slave_priv(priv, 0); - if (prim_cpsw->irq_enabled == false) { - if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { - prim_cpsw->irq_enabled = true; - enable_irq(prim_cpsw->irqs_table[0]); - } - } - if (priv->data.dual_emac) priv->slaves[priv->emac_port].open_stat = true; return 0; @@ -1341,10 +1363,13 @@ static int cpsw_ndo_stop(struct net_device *ndev) cpsw_info(priv, ifdown, "shutting down cpsw device\n"); netif_stop_queue(priv->ndev); - napi_disable(&priv->napi); netif_carrier_off(priv->ndev); if (cpsw_common_res_usage_state(priv) <= 1) { + struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0); + + napi_disable(&priv_sl0->napi_rx); + napi_disable(&priv_sl0->napi_tx); cpts_unregister(priv->cpts); cpsw_intr_disable(priv); cpdma_ctlr_stop(priv->dma); @@ -1915,11 +1940,12 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, slave->port_vlan = data->dual_emac_res_vlan; } -static int cpsw_probe_dt(struct cpsw_platform_data *data, +static int cpsw_probe_dt(struct cpsw_priv *priv, struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct device_node *slave_node; + struct cpsw_platform_data *data = &priv->data; int i = 0, ret; u32 prop; @@ -2010,6 +2036,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, if (strcmp(slave_node->name, "slave")) continue; + priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0); parp = of_get_property(slave_node, "phy_id", &lenp); if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); @@ -2025,7 +2052,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), PHY_ID_FMT, mdio->name, phyid); - slave_data->phy_if = of_get_phy_mode(slave_node); if (slave_data->phy_if < 0) { dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", @@ -2127,7 +2153,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev, ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; - netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT); /* register the network device */ SET_NETDEV_DEV(ndev, &pdev->dev); @@ -2141,6 +2166,44 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev, return ret; } +#define CPSW_QUIRK_IRQ BIT(0) + +static struct platform_device_id cpsw_devtype[] = { + { + /* keep it for existing comaptibles */ + .name = "cpsw", + .driver_data = CPSW_QUIRK_IRQ, + }, { + .name = "am335x-cpsw", + .driver_data = CPSW_QUIRK_IRQ, + }, { + .name = "am4372-cpsw", + .driver_data = 0, + }, { + .name = "dra7-cpsw", + .driver_data = 0, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, cpsw_devtype); + +enum ti_cpsw_type { + CPSW = 0, + AM335X_CPSW, + AM4372_CPSW, + DRA7_CPSW, +}; + +static const struct of_device_id cpsw_of_mtable[] = { + { .compatible = "ti,cpsw", .data = &cpsw_devtype[CPSW], }, + { .compatible = "ti,am335x-cpsw", .data = &cpsw_devtype[AM335X_CPSW], }, + { .compatible = "ti,am4372-cpsw", .data = &cpsw_devtype[AM4372_CPSW], }, + { .compatible = "ti,dra7-cpsw", .data = &cpsw_devtype[DRA7_CPSW], }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, cpsw_of_mtable); + static int cpsw_probe(struct platform_device *pdev) { struct cpsw_platform_data *data; @@ -2150,6 +2213,7 @@ static int cpsw_probe(struct platform_device *pdev) struct cpsw_ale_params ale_params; void __iomem *ss_regs; struct resource *res, *ss_res; + const struct of_device_id *of_id; u32 slave_offset, sliver_offset, slave_size; int ret = 0, i; int irq; @@ -2169,7 +2233,6 @@ static int cpsw_probe(struct platform_device *pdev) priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); priv->rx_packet_max = max(rx_packet_max, 128); priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); - priv->irq_enabled = true; if (!priv->cpts) { dev_err(&pdev->dev, "error allocating cpts\n"); ret = -ENOMEM; @@ -2184,7 +2247,7 @@ static int cpsw_probe(struct platform_device *pdev) /* Select default pin state */ pinctrl_pm_select_default_state(&pdev->dev); - if (cpsw_probe_dt(&priv->data, pdev)) { + if (cpsw_probe_dt(priv, pdev)) { dev_err(&pdev->dev, "cpsw: platform data missing\n"); ret = -ENODEV; goto clean_runtime_disable_ret; @@ -2341,6 +2404,13 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_ale_ret; } + of_id = of_match_device(cpsw_of_mtable, &pdev->dev); + if (of_id) { + pdev->id_entry = of_id->data; + if (pdev->id_entry->driver_data) + priv->quirk_irq = true; + } + /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and * MISC IRQs which are always kept disabled with this driver so * we will not request them. @@ -2380,7 +2450,8 @@ static int cpsw_probe(struct platform_device *pdev) ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; - netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT); + netif_napi_add(ndev, &priv->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT); + netif_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT); /* register the network device */ SET_NETDEV_DEV(ndev, &pdev->dev); @@ -2504,12 +2575,6 @@ static int cpsw_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume); -static const struct of_device_id cpsw_of_mtable[] = { - { .compatible = "ti,cpsw", }, - { /* sentinel */ }, -}; -MODULE_DEVICE_TABLE(of, cpsw_of_mtable); - static struct platform_driver cpsw_driver = { .driver = { .name = "cpsw", diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index aeebc0a7b..a21c77bc1 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -2004,8 +2004,10 @@ static int davinci_emac_probe(struct platform_device *pdev) if (res_ctrl) { priv->ctrl_base = devm_ioremap_resource(&pdev->dev, res_ctrl); - if (IS_ERR(priv->ctrl_base)) + if (IS_ERR(priv->ctrl_base)) { + rc = PTR_ERR(priv->ctrl_base); goto no_pdata; + } } else { priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset; } diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 4755838c6..9f9832f0d 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -52,6 +52,8 @@ NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \ NETIF_MSG_RX_STATUS) +#define NETCP_EFUSE_ADDR_SWAP 2 + #define knav_queue_get_id(q) knav_queue_device_control(q, \ KNAV_QUEUE_GET_ID, (unsigned long)NULL) @@ -173,13 +175,22 @@ static void set_words(u32 *words, int num_words, u32 *desc) } /* Read the e-fuse value as 32 bit values to be endian independent */ -static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac) +static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap) { unsigned int addr0, addr1; addr1 = readl(efuse_mac + 4); addr0 = readl(efuse_mac); + switch (swap) { + case NETCP_EFUSE_ADDR_SWAP: + addr0 = addr1; + addr1 = readl(efuse_mac); + break; + default: + break; + } + x[0] = (addr1 & 0x0000ff00) >> 8; x[1] = addr1 & 0x000000ff; x[2] = (addr0 & 0xff000000) >> 24; @@ -280,13 +291,6 @@ static int netcp_module_probe(struct netcp_device *netcp_device, interface_list) { struct netcp_intf_modpriv *intf_modpriv; - /* If interface not registered then register now */ - if (!netcp_intf->netdev_registered) - ret = netcp_register_interface(netcp_intf); - - if (ret) - return -ENODEV; - intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv), GFP_KERNEL); if (!intf_modpriv) @@ -295,6 +299,11 @@ static int netcp_module_probe(struct netcp_device *netcp_device, interface = of_parse_phandle(netcp_intf->node_interface, module->name, 0); + if (!interface) { + devm_kfree(dev, intf_modpriv); + continue; + } + intf_modpriv->netcp_priv = netcp_intf; intf_modpriv->netcp_module = module; list_add_tail(&intf_modpriv->intf_list, @@ -312,6 +321,18 @@ static int netcp_module_probe(struct netcp_device *netcp_device, continue; } } + + /* Now register the interface with netdev */ + list_for_each_entry(netcp_intf, + &netcp_device->interface_head, + interface_list) { + /* If interface not registered then register now */ + if (!netcp_intf->netdev_registered) { + ret = netcp_register_interface(netcp_intf); + if (ret) + return -ENODEV; + } + } return 0; } @@ -346,7 +367,6 @@ int netcp_register_module(struct netcp_module *module) if (ret < 0) goto fail; } - mutex_unlock(&netcp_modules_lock); return 0; @@ -785,7 +805,7 @@ static void netcp_rxpool_free(struct netcp_intf *netcp) netcp->rx_pool = NULL; } -static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) +static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) { struct knav_dma_desc *hwdesc; unsigned int buf_len, dma_sz; @@ -799,7 +819,7 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) hwdesc = knav_pool_desc_get(netcp->rx_pool); if (IS_ERR_OR_NULL(hwdesc)) { dev_dbg(netcp->ndev_dev, "out of rx pool desc\n"); - return; + return -ENOMEM; } if (likely(fdq == 0)) { @@ -851,25 +871,26 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma, &dma_sz); knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0); - return; + return 0; fail: knav_pool_desc_put(netcp->rx_pool, hwdesc); + return -ENOMEM; } /* Refill Rx FDQ with descriptors & attached buffers */ static void netcp_rxpool_refill(struct netcp_intf *netcp) { u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0}; - int i; + int i, ret = 0; /* Calculate the FDQ deficit and refill */ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) { fdq_deficit[i] = netcp->rx_queue_depths[i] - knav_queue_get_count(netcp->rx_fdq[i]); - while (fdq_deficit[i]--) - netcp_allocate_rx_buf(netcp, i); + while (fdq_deficit[i]-- && !ret) + ret = netcp_allocate_rx_buf(netcp, i); } /* end for fdqs */ } @@ -882,12 +903,12 @@ static int netcp_rx_poll(struct napi_struct *napi, int budget) packets = netcp_process_rx_packets(netcp, budget); + netcp_rxpool_refill(netcp); if (packets < budget) { napi_complete(&netcp->rx_napi); knav_queue_enable_notify(netcp->rx_queue); } - netcp_rxpool_refill(netcp); return packets; } @@ -1373,7 +1394,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp) continue; dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n", naddr->addr, naddr->type); - mutex_lock(&netcp_modules_lock); for_each_module(netcp, priv) { module = priv->netcp_module; if (!module->del_addr) @@ -1382,7 +1402,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp) naddr); WARN_ON(error); } - mutex_unlock(&netcp_modules_lock); netcp_addr_del(netcp, naddr); } } @@ -1399,7 +1418,7 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp) continue; dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n", naddr->addr, naddr->type); - mutex_lock(&netcp_modules_lock); + for_each_module(netcp, priv) { module = priv->netcp_module; if (!module->add_addr) @@ -1407,7 +1426,6 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp) error = module->add_addr(priv->module_priv, naddr); WARN_ON(error); } - mutex_unlock(&netcp_modules_lock); } } @@ -1421,6 +1439,7 @@ static void netcp_set_rx_mode(struct net_device *ndev) ndev->flags & IFF_ALLMULTI || netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR); + spin_lock(&netcp->lock); /* first clear all marks */ netcp_addr_clear_mark(netcp); @@ -1439,6 +1458,7 @@ static void netcp_set_rx_mode(struct net_device *ndev) /* finally sweep and callout into modules */ netcp_addr_sweep_del(netcp); netcp_addr_sweep_add(netcp); + spin_unlock(&netcp->lock); } static void netcp_free_navigator_resources(struct netcp_intf *netcp) @@ -1603,7 +1623,6 @@ static int netcp_ndo_open(struct net_device *ndev) goto fail; } - mutex_lock(&netcp_modules_lock); for_each_module(netcp, intf_modpriv) { module = intf_modpriv->netcp_module; if (module->open) { @@ -1614,7 +1633,6 @@ static int netcp_ndo_open(struct net_device *ndev) } } } - mutex_unlock(&netcp_modules_lock); napi_enable(&netcp->rx_napi); napi_enable(&netcp->tx_napi); @@ -1631,7 +1649,6 @@ fail_open: if (module->close) module->close(intf_modpriv->module_priv, ndev); } - mutex_unlock(&netcp_modules_lock); fail: netcp_free_navigator_resources(netcp); @@ -1655,7 +1672,6 @@ static int netcp_ndo_stop(struct net_device *ndev) napi_disable(&netcp->rx_napi); napi_disable(&netcp->tx_napi); - mutex_lock(&netcp_modules_lock); for_each_module(netcp, intf_modpriv) { module = intf_modpriv->netcp_module; if (module->close) { @@ -1664,7 +1680,6 @@ static int netcp_ndo_stop(struct net_device *ndev) dev_err(netcp->ndev_dev, "Close failed\n"); } } - mutex_unlock(&netcp_modules_lock); /* Recycle Rx descriptors from completion queue */ netcp_empty_rx_queue(netcp); @@ -1692,7 +1707,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev, if (!netif_running(ndev)) return -EINVAL; - mutex_lock(&netcp_modules_lock); for_each_module(netcp, intf_modpriv) { module = intf_modpriv->netcp_module; if (!module->ioctl) @@ -1708,7 +1722,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev, } out: - mutex_unlock(&netcp_modules_lock); return (ret == 0) ? 0 : err; } @@ -1743,11 +1756,12 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) struct netcp_intf *netcp = netdev_priv(ndev); struct netcp_intf_modpriv *intf_modpriv; struct netcp_module *module; + unsigned long flags; int err = 0; dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid); - mutex_lock(&netcp_modules_lock); + spin_lock_irqsave(&netcp->lock, flags); for_each_module(netcp, intf_modpriv) { module = intf_modpriv->netcp_module; if ((module->add_vid) && (vid != 0)) { @@ -1759,7 +1773,8 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) } } } - mutex_unlock(&netcp_modules_lock); + spin_unlock_irqrestore(&netcp->lock, flags); + return err; } @@ -1768,11 +1783,12 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) struct netcp_intf *netcp = netdev_priv(ndev); struct netcp_intf_modpriv *intf_modpriv; struct netcp_module *module; + unsigned long flags; int err = 0; dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid); - mutex_lock(&netcp_modules_lock); + spin_lock_irqsave(&netcp->lock, flags); for_each_module(netcp, intf_modpriv) { module = intf_modpriv->netcp_module; if (module->del_vid) { @@ -1784,7 +1800,7 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) } } } - mutex_unlock(&netcp_modules_lock); + spin_unlock_irqrestore(&netcp->lock, flags); return err; } @@ -1901,7 +1917,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device, goto quit; } - emac_arch_get_mac_addr(efuse_mac_addr, efuse); + emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac); if (is_valid_ether_addr(efuse_mac_addr)) ether_addr_copy(ndev->dev_addr, efuse_mac_addr); else @@ -2029,7 +2045,6 @@ static int netcp_probe(struct platform_device *pdev) struct device_node *child, *interfaces; struct netcp_device *netcp_device; struct device *dev = &pdev->dev; - struct netcp_module *module; int ret; if (!node) { @@ -2076,14 +2091,6 @@ static int netcp_probe(struct platform_device *pdev) /* Add the device instance to the list */ list_add_tail(&netcp_device->device_list, &netcp_devices); - /* Probe & attach any modules already registered */ - mutex_lock(&netcp_modules_lock); - for_each_netcp_module(module) { - ret = netcp_module_probe(netcp_device, module); - if (ret < 0) - dev_err(dev, "module(%s) probe failed\n", module->name); - } - mutex_unlock(&netcp_modules_lock); return 0; probe_quit_interface: @@ -2141,7 +2148,6 @@ MODULE_DEVICE_TABLE(of, of_match); static struct platform_driver netcp_driver = { .driver = { .name = "netcp-1.0", - .owner = THIS_MODULE, .of_match_table = of_match, }, .probe = netcp_probe, diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 1974a8ae7..4e70e7586 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -77,6 +77,7 @@ #define GBENU_ALE_OFFSET 0x1e000 #define GBENU_HOST_PORT_NUM 0 #define GBENU_NUM_ALE_ENTRIES 1024 +#define GBENU_SGMII_MODULE_SIZE 0x100 /* 10G Ethernet SS defines */ #define XGBE_MODULE_NAME "netcp-xgbe" @@ -149,8 +150,8 @@ #define XGBE_STATS2_MODULE 2 /* s: 0-based slave_port */ -#define SGMII_BASE(s) \ - (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs) +#define SGMII_BASE(d, s) \ + (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs) #define GBE_TX_QUEUE 648 #define GBE_TXHOOK_ORDER 0 @@ -295,8 +296,6 @@ struct xgbe_hw_stats { u32 rx_dma_overruns; }; -#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32)) - struct gbenu_ss_regs { u32 id_ver; u32 synce_count; /* NU */ @@ -480,7 +479,6 @@ struct gbenu_hw_stats { u32 tx_pri7_drop_bcnt; }; -#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32)) #define GBENU_HW_STATS_REG_MAP_SZ 0x200 struct gbe_ss_regs { @@ -615,7 +613,6 @@ struct gbe_hw_stats { u32 rx_dma_overruns; }; -#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32)) #define GBE_MAX_HW_STAT_MODS 9 #define GBE_HW_STATS_REG_MAP_SZ 0x100 @@ -646,6 +643,7 @@ struct gbe_priv { bool enable_ale; u8 max_num_slaves; u8 max_num_ports; /* max_num_slaves + 1 */ + u8 num_stats_mods; struct netcp_tx_pipe tx_pipe; int host_port; @@ -675,6 +673,7 @@ struct gbe_priv { struct net_device *dummy_ndev; u64 *hw_stats; + u32 *hw_stats_prev; const struct netcp_ethtool_stat *et_stats; int num_et_stats; /* Lock for updating the hwstats */ @@ -874,7 +873,7 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = { }; /* This is the size of entries in GBENU_STATS_HOST */ -#define GBENU_ET_STATS_HOST_SIZE 33 +#define GBENU_ET_STATS_HOST_SIZE 52 #define GBENU_STATS_HOST(field) \ { \ @@ -883,8 +882,8 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = { offsetof(struct gbenu_hw_stats, field) \ } -/* This is the size of entries in GBENU_STATS_HOST */ -#define GBENU_ET_STATS_PORT_SIZE 46 +/* This is the size of entries in GBENU_STATS_PORT */ +#define GBENU_ET_STATS_PORT_SIZE 65 #define GBENU_STATS_P1(field) \ { \ @@ -976,7 +975,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_HOST(ale_unknown_mcast_bytes), GBENU_STATS_HOST(ale_unknown_bcast), GBENU_STATS_HOST(ale_unknown_bcast_bytes), + GBENU_STATS_HOST(ale_pol_match), + GBENU_STATS_HOST(ale_pol_match_red), + GBENU_STATS_HOST(ale_pol_match_yellow), GBENU_STATS_HOST(tx_mem_protect_err), + GBENU_STATS_HOST(tx_pri0_drop), + GBENU_STATS_HOST(tx_pri1_drop), + GBENU_STATS_HOST(tx_pri2_drop), + GBENU_STATS_HOST(tx_pri3_drop), + GBENU_STATS_HOST(tx_pri4_drop), + GBENU_STATS_HOST(tx_pri5_drop), + GBENU_STATS_HOST(tx_pri6_drop), + GBENU_STATS_HOST(tx_pri7_drop), + GBENU_STATS_HOST(tx_pri0_drop_bcnt), + GBENU_STATS_HOST(tx_pri1_drop_bcnt), + GBENU_STATS_HOST(tx_pri2_drop_bcnt), + GBENU_STATS_HOST(tx_pri3_drop_bcnt), + GBENU_STATS_HOST(tx_pri4_drop_bcnt), + GBENU_STATS_HOST(tx_pri5_drop_bcnt), + GBENU_STATS_HOST(tx_pri6_drop_bcnt), + GBENU_STATS_HOST(tx_pri7_drop_bcnt), /* GBENU Module 1 */ GBENU_STATS_P1(rx_good_frames), GBENU_STATS_P1(rx_broadcast_frames), @@ -1023,7 +1041,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P1(ale_unknown_mcast_bytes), GBENU_STATS_P1(ale_unknown_bcast), GBENU_STATS_P1(ale_unknown_bcast_bytes), + GBENU_STATS_P1(ale_pol_match), + GBENU_STATS_P1(ale_pol_match_red), + GBENU_STATS_P1(ale_pol_match_yellow), GBENU_STATS_P1(tx_mem_protect_err), + GBENU_STATS_P1(tx_pri0_drop), + GBENU_STATS_P1(tx_pri1_drop), + GBENU_STATS_P1(tx_pri2_drop), + GBENU_STATS_P1(tx_pri3_drop), + GBENU_STATS_P1(tx_pri4_drop), + GBENU_STATS_P1(tx_pri5_drop), + GBENU_STATS_P1(tx_pri6_drop), + GBENU_STATS_P1(tx_pri7_drop), + GBENU_STATS_P1(tx_pri0_drop_bcnt), + GBENU_STATS_P1(tx_pri1_drop_bcnt), + GBENU_STATS_P1(tx_pri2_drop_bcnt), + GBENU_STATS_P1(tx_pri3_drop_bcnt), + GBENU_STATS_P1(tx_pri4_drop_bcnt), + GBENU_STATS_P1(tx_pri5_drop_bcnt), + GBENU_STATS_P1(tx_pri6_drop_bcnt), + GBENU_STATS_P1(tx_pri7_drop_bcnt), /* GBENU Module 2 */ GBENU_STATS_P2(rx_good_frames), GBENU_STATS_P2(rx_broadcast_frames), @@ -1070,7 +1107,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P2(ale_unknown_mcast_bytes), GBENU_STATS_P2(ale_unknown_bcast), GBENU_STATS_P2(ale_unknown_bcast_bytes), + GBENU_STATS_P2(ale_pol_match), + GBENU_STATS_P2(ale_pol_match_red), + GBENU_STATS_P2(ale_pol_match_yellow), GBENU_STATS_P2(tx_mem_protect_err), + GBENU_STATS_P2(tx_pri0_drop), + GBENU_STATS_P2(tx_pri1_drop), + GBENU_STATS_P2(tx_pri2_drop), + GBENU_STATS_P2(tx_pri3_drop), + GBENU_STATS_P2(tx_pri4_drop), + GBENU_STATS_P2(tx_pri5_drop), + GBENU_STATS_P2(tx_pri6_drop), + GBENU_STATS_P2(tx_pri7_drop), + GBENU_STATS_P2(tx_pri0_drop_bcnt), + GBENU_STATS_P2(tx_pri1_drop_bcnt), + GBENU_STATS_P2(tx_pri2_drop_bcnt), + GBENU_STATS_P2(tx_pri3_drop_bcnt), + GBENU_STATS_P2(tx_pri4_drop_bcnt), + GBENU_STATS_P2(tx_pri5_drop_bcnt), + GBENU_STATS_P2(tx_pri6_drop_bcnt), + GBENU_STATS_P2(tx_pri7_drop_bcnt), /* GBENU Module 3 */ GBENU_STATS_P3(rx_good_frames), GBENU_STATS_P3(rx_broadcast_frames), @@ -1117,7 +1173,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P3(ale_unknown_mcast_bytes), GBENU_STATS_P3(ale_unknown_bcast), GBENU_STATS_P3(ale_unknown_bcast_bytes), + GBENU_STATS_P3(ale_pol_match), + GBENU_STATS_P3(ale_pol_match_red), + GBENU_STATS_P3(ale_pol_match_yellow), GBENU_STATS_P3(tx_mem_protect_err), + GBENU_STATS_P3(tx_pri0_drop), + GBENU_STATS_P3(tx_pri1_drop), + GBENU_STATS_P3(tx_pri2_drop), + GBENU_STATS_P3(tx_pri3_drop), + GBENU_STATS_P3(tx_pri4_drop), + GBENU_STATS_P3(tx_pri5_drop), + GBENU_STATS_P3(tx_pri6_drop), + GBENU_STATS_P3(tx_pri7_drop), + GBENU_STATS_P3(tx_pri0_drop_bcnt), + GBENU_STATS_P3(tx_pri1_drop_bcnt), + GBENU_STATS_P3(tx_pri2_drop_bcnt), + GBENU_STATS_P3(tx_pri3_drop_bcnt), + GBENU_STATS_P3(tx_pri4_drop_bcnt), + GBENU_STATS_P3(tx_pri5_drop_bcnt), + GBENU_STATS_P3(tx_pri6_drop_bcnt), + GBENU_STATS_P3(tx_pri7_drop_bcnt), /* GBENU Module 4 */ GBENU_STATS_P4(rx_good_frames), GBENU_STATS_P4(rx_broadcast_frames), @@ -1164,7 +1239,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P4(ale_unknown_mcast_bytes), GBENU_STATS_P4(ale_unknown_bcast), GBENU_STATS_P4(ale_unknown_bcast_bytes), + GBENU_STATS_P4(ale_pol_match), + GBENU_STATS_P4(ale_pol_match_red), + GBENU_STATS_P4(ale_pol_match_yellow), GBENU_STATS_P4(tx_mem_protect_err), + GBENU_STATS_P4(tx_pri0_drop), + GBENU_STATS_P4(tx_pri1_drop), + GBENU_STATS_P4(tx_pri2_drop), + GBENU_STATS_P4(tx_pri3_drop), + GBENU_STATS_P4(tx_pri4_drop), + GBENU_STATS_P4(tx_pri5_drop), + GBENU_STATS_P4(tx_pri6_drop), + GBENU_STATS_P4(tx_pri7_drop), + GBENU_STATS_P4(tx_pri0_drop_bcnt), + GBENU_STATS_P4(tx_pri1_drop_bcnt), + GBENU_STATS_P4(tx_pri2_drop_bcnt), + GBENU_STATS_P4(tx_pri3_drop_bcnt), + GBENU_STATS_P4(tx_pri4_drop_bcnt), + GBENU_STATS_P4(tx_pri5_drop_bcnt), + GBENU_STATS_P4(tx_pri6_drop_bcnt), + GBENU_STATS_P4(tx_pri7_drop_bcnt), /* GBENU Module 5 */ GBENU_STATS_P5(rx_good_frames), GBENU_STATS_P5(rx_broadcast_frames), @@ -1211,7 +1305,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P5(ale_unknown_mcast_bytes), GBENU_STATS_P5(ale_unknown_bcast), GBENU_STATS_P5(ale_unknown_bcast_bytes), + GBENU_STATS_P5(ale_pol_match), + GBENU_STATS_P5(ale_pol_match_red), + GBENU_STATS_P5(ale_pol_match_yellow), GBENU_STATS_P5(tx_mem_protect_err), + GBENU_STATS_P5(tx_pri0_drop), + GBENU_STATS_P5(tx_pri1_drop), + GBENU_STATS_P5(tx_pri2_drop), + GBENU_STATS_P5(tx_pri3_drop), + GBENU_STATS_P5(tx_pri4_drop), + GBENU_STATS_P5(tx_pri5_drop), + GBENU_STATS_P5(tx_pri6_drop), + GBENU_STATS_P5(tx_pri7_drop), + GBENU_STATS_P5(tx_pri0_drop_bcnt), + GBENU_STATS_P5(tx_pri1_drop_bcnt), + GBENU_STATS_P5(tx_pri2_drop_bcnt), + GBENU_STATS_P5(tx_pri3_drop_bcnt), + GBENU_STATS_P5(tx_pri4_drop_bcnt), + GBENU_STATS_P5(tx_pri5_drop_bcnt), + GBENU_STATS_P5(tx_pri6_drop_bcnt), + GBENU_STATS_P5(tx_pri7_drop_bcnt), /* GBENU Module 6 */ GBENU_STATS_P6(rx_good_frames), GBENU_STATS_P6(rx_broadcast_frames), @@ -1258,7 +1371,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P6(ale_unknown_mcast_bytes), GBENU_STATS_P6(ale_unknown_bcast), GBENU_STATS_P6(ale_unknown_bcast_bytes), + GBENU_STATS_P6(ale_pol_match), + GBENU_STATS_P6(ale_pol_match_red), + GBENU_STATS_P6(ale_pol_match_yellow), GBENU_STATS_P6(tx_mem_protect_err), + GBENU_STATS_P6(tx_pri0_drop), + GBENU_STATS_P6(tx_pri1_drop), + GBENU_STATS_P6(tx_pri2_drop), + GBENU_STATS_P6(tx_pri3_drop), + GBENU_STATS_P6(tx_pri4_drop), + GBENU_STATS_P6(tx_pri5_drop), + GBENU_STATS_P6(tx_pri6_drop), + GBENU_STATS_P6(tx_pri7_drop), + GBENU_STATS_P6(tx_pri0_drop_bcnt), + GBENU_STATS_P6(tx_pri1_drop_bcnt), + GBENU_STATS_P6(tx_pri2_drop_bcnt), + GBENU_STATS_P6(tx_pri3_drop_bcnt), + GBENU_STATS_P6(tx_pri4_drop_bcnt), + GBENU_STATS_P6(tx_pri5_drop_bcnt), + GBENU_STATS_P6(tx_pri6_drop_bcnt), + GBENU_STATS_P6(tx_pri7_drop_bcnt), /* GBENU Module 7 */ GBENU_STATS_P7(rx_good_frames), GBENU_STATS_P7(rx_broadcast_frames), @@ -1305,7 +1437,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P7(ale_unknown_mcast_bytes), GBENU_STATS_P7(ale_unknown_bcast), GBENU_STATS_P7(ale_unknown_bcast_bytes), + GBENU_STATS_P7(ale_pol_match), + GBENU_STATS_P7(ale_pol_match_red), + GBENU_STATS_P7(ale_pol_match_yellow), GBENU_STATS_P7(tx_mem_protect_err), + GBENU_STATS_P7(tx_pri0_drop), + GBENU_STATS_P7(tx_pri1_drop), + GBENU_STATS_P7(tx_pri2_drop), + GBENU_STATS_P7(tx_pri3_drop), + GBENU_STATS_P7(tx_pri4_drop), + GBENU_STATS_P7(tx_pri5_drop), + GBENU_STATS_P7(tx_pri6_drop), + GBENU_STATS_P7(tx_pri7_drop), + GBENU_STATS_P7(tx_pri0_drop_bcnt), + GBENU_STATS_P7(tx_pri1_drop_bcnt), + GBENU_STATS_P7(tx_pri2_drop_bcnt), + GBENU_STATS_P7(tx_pri3_drop_bcnt), + GBENU_STATS_P7(tx_pri4_drop_bcnt), + GBENU_STATS_P7(tx_pri5_drop_bcnt), + GBENU_STATS_P7(tx_pri6_drop_bcnt), + GBENU_STATS_P7(tx_pri7_drop_bcnt), /* GBENU Module 8 */ GBENU_STATS_P8(rx_good_frames), GBENU_STATS_P8(rx_broadcast_frames), @@ -1352,7 +1503,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P8(ale_unknown_mcast_bytes), GBENU_STATS_P8(ale_unknown_bcast), GBENU_STATS_P8(ale_unknown_bcast_bytes), + GBENU_STATS_P8(ale_pol_match), + GBENU_STATS_P8(ale_pol_match_red), + GBENU_STATS_P8(ale_pol_match_yellow), GBENU_STATS_P8(tx_mem_protect_err), + GBENU_STATS_P8(tx_pri0_drop), + GBENU_STATS_P8(tx_pri1_drop), + GBENU_STATS_P8(tx_pri2_drop), + GBENU_STATS_P8(tx_pri3_drop), + GBENU_STATS_P8(tx_pri4_drop), + GBENU_STATS_P8(tx_pri5_drop), + GBENU_STATS_P8(tx_pri6_drop), + GBENU_STATS_P8(tx_pri7_drop), + GBENU_STATS_P8(tx_pri0_drop_bcnt), + GBENU_STATS_P8(tx_pri1_drop_bcnt), + GBENU_STATS_P8(tx_pri2_drop_bcnt), + GBENU_STATS_P8(tx_pri3_drop_bcnt), + GBENU_STATS_P8(tx_pri4_drop_bcnt), + GBENU_STATS_P8(tx_pri5_drop_bcnt), + GBENU_STATS_P8(tx_pri6_drop_bcnt), + GBENU_STATS_P8(tx_pri7_drop_bcnt), }; #define XGBE_STATS0_INFO(field) \ @@ -1554,70 +1724,97 @@ static int keystone_get_sset_count(struct net_device *ndev, int stringset) } } -static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data) +static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod) +{ + void __iomem *base = gbe_dev->hw_stats_regs[stats_mod]; + u32 __iomem *p_stats_entry; + int i; + + for (i = 0; i < gbe_dev->num_et_stats; i++) { + if (gbe_dev->et_stats[i].type == stats_mod) { + p_stats_entry = base + gbe_dev->et_stats[i].offset; + gbe_dev->hw_stats[i] = 0; + gbe_dev->hw_stats_prev[i] = readl(p_stats_entry); + } + } +} + +static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev, + int et_stats_entry) { void __iomem *base = NULL; - u32 __iomem *p; - u32 tmp = 0; + u32 __iomem *p_stats_entry; + u32 curr, delta; + + /* The hw_stats_regs pointers are already + * properly set to point to the right base: + */ + base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type]; + p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset; + curr = readl(p_stats_entry); + delta = curr - gbe_dev->hw_stats_prev[et_stats_entry]; + gbe_dev->hw_stats_prev[et_stats_entry] = curr; + gbe_dev->hw_stats[et_stats_entry] += delta; +} + +static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data) +{ int i; for (i = 0; i < gbe_dev->num_et_stats; i++) { - base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type]; - p = base + gbe_dev->et_stats[i].offset; - tmp = readl(p); - gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp; + gbe_update_hw_stats_entry(gbe_dev, i); + if (data) data[i] = gbe_dev->hw_stats[i]; - /* write-to-decrement: - * new register value = old register value - write value - */ - writel(tmp, p); } } -static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data) +static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev, + int stats_mod) { - void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0]; - void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1]; - u64 *hw_stats = &gbe_dev->hw_stats[0]; - void __iomem *base = NULL; - u32 __iomem *p; - u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2); - int i, j, pair; + u32 val; - for (pair = 0; pair < 2; pair++) { - val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); + val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); - if (pair == 0) - val &= ~GBE_STATS_CD_SEL; - else - val |= GBE_STATS_CD_SEL; + switch (stats_mod) { + case GBE_STATSA_MODULE: + case GBE_STATSB_MODULE: + val &= ~GBE_STATS_CD_SEL; + break; + case GBE_STATSC_MODULE: + case GBE_STATSD_MODULE: + val |= GBE_STATS_CD_SEL; + break; + default: + return; + } - /* make the stat modules visible */ - writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); + /* make the stat module visible */ + writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); +} - for (i = 0; i < pair_size; i++) { - j = pair * pair_size + i; - switch (gbe_dev->et_stats[j].type) { - case GBE_STATSA_MODULE: - case GBE_STATSC_MODULE: - base = gbe_statsa; - break; - case GBE_STATSB_MODULE: - case GBE_STATSD_MODULE: - base = gbe_statsb; - break; - } +static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod) +{ + gbe_stats_mod_visible_ver14(gbe_dev, stats_mod); + gbe_reset_mod_stats(gbe_dev, stats_mod); +} + +static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data) +{ + u32 half_num_et_stats = (gbe_dev->num_et_stats / 2); + int et_entry, j, pair; + + for (pair = 0; pair < 2; pair++) { + gbe_stats_mod_visible_ver14(gbe_dev, (pair ? + GBE_STATSC_MODULE : + GBE_STATSA_MODULE)); + + for (j = 0; j < half_num_et_stats; j++) { + et_entry = pair * half_num_et_stats + j; + gbe_update_hw_stats_entry(gbe_dev, et_entry); - p = base + gbe_dev->et_stats[j].offset; - tmp = readl(p); - hw_stats[j] += tmp; if (data) - data[j] = hw_stats[j]; - /* write-to-decrement: - * new register value = old register value - write value - */ - writel(tmp, p); + data[et_entry] = gbe_dev->hw_stats[et_entry]; } } } @@ -1801,13 +1998,8 @@ static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev, return; if (!SLAVE_LINK_IS_XGMII(slave)) { - if (gbe_dev->ss_version == GBE_SS_VERSION_14) - sgmii_link_state = - netcp_sgmii_get_port_link(SGMII_BASE(sp), sp); - else - sgmii_link_state = - netcp_sgmii_get_port_link( - gbe_dev->sgmii_port_regs, sp); + sgmii_link_state = + netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp); } phy_link_state = gbe_phy_link_status(slave); @@ -1904,17 +2096,11 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave, static void gbe_sgmii_rtreset(struct gbe_priv *priv, struct gbe_slave *slave, bool set) { - void __iomem *sgmii_port_regs; - if (SLAVE_LINK_IS_XGMII(slave)) return; - if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) - sgmii_port_regs = priv->sgmii_port34_regs; - else - sgmii_port_regs = priv->sgmii_port_regs; - - netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set); + netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num), + slave->slave_num, set); } static void gbe_slave_stop(struct gbe_intf *intf) @@ -1940,17 +2126,12 @@ static void gbe_slave_stop(struct gbe_intf *intf) static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave) { - void __iomem *sgmii_port_regs; - - sgmii_port_regs = priv->sgmii_port_regs; - if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) - sgmii_port_regs = priv->sgmii_port34_regs; + if (SLAVE_LINK_IS_XGMII(slave)) + return; - if (!SLAVE_LINK_IS_XGMII(slave)) { - netcp_sgmii_reset(sgmii_port_regs, slave->slave_num); - netcp_sgmii_config(sgmii_port_regs, slave->slave_num, - slave->link_interface); - } + netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num); + netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num, + slave->link_interface); } static int gbe_slave_open(struct gbe_intf *gbe_intf) @@ -2207,14 +2388,15 @@ static void netcp_ethss_timer(unsigned long arg) netcp_ethss_update_link_state(gbe_dev, slave, NULL); } - spin_lock_bh(&gbe_dev->hw_stats_lock); + /* A timer runs as a BH, no need to block them */ + spin_lock(&gbe_dev->hw_stats_lock); if (gbe_dev->ss_version == GBE_SS_VERSION_14) gbe_update_stats_ver14(gbe_dev, NULL); else gbe_update_stats(gbe_dev, NULL); - spin_unlock_bh(&gbe_dev->hw_stats_lock); + spin_unlock(&gbe_dev->hw_stats_lock); gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL; add_timer(&gbe_dev->timer); @@ -2455,8 +2637,10 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, mac_phy_link = true; slave->open = true; - if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) + if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) { + of_node_put(port); break; + } } /* of_phy_connect() is needed only for MAC-PHY interface */ @@ -2571,15 +2755,28 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, } gbe_dev->xgbe_serdes_regs = regs; + gbe_dev->num_stats_mods = gbe_dev->max_num_ports; + gbe_dev->et_stats = xgbe10_et_stats; + gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats); + gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, - XGBE10_NUM_STAT_ENTRIES * - (gbe_dev->max_num_ports) * sizeof(u64), - GFP_KERNEL); + gbe_dev->num_et_stats * sizeof(u64), + GFP_KERNEL); if (!gbe_dev->hw_stats) { dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); return -ENOMEM; } + gbe_dev->hw_stats_prev = + devm_kzalloc(gbe_dev->dev, + gbe_dev->num_et_stats * sizeof(u32), + GFP_KERNEL); + if (!gbe_dev->hw_stats_prev) { + dev_err(gbe_dev->dev, + "hw_stats_prev memory allocation failed\n"); + return -ENOMEM; + } + gbe_dev->ss_version = XGBE_SS_VERSION_10; gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + XGBE10_SGMII_MODULE_OFFSET; @@ -2593,8 +2790,6 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, gbe_dev->ale_ports = gbe_dev->max_num_ports; gbe_dev->host_port = XGBE10_HOST_PORT_NUM; gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES; - gbe_dev->et_stats = xgbe10_et_stats; - gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats); gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1; /* Subsystem registers */ @@ -2679,30 +2874,45 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev, } gbe_dev->switch_regs = regs; + gbe_dev->num_stats_mods = gbe_dev->max_num_slaves; + gbe_dev->et_stats = gbe13_et_stats; + gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats); + gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, - GBE13_NUM_HW_STAT_ENTRIES * - gbe_dev->max_num_slaves * sizeof(u64), - GFP_KERNEL); + gbe_dev->num_et_stats * sizeof(u64), + GFP_KERNEL); if (!gbe_dev->hw_stats) { dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); return -ENOMEM; } + gbe_dev->hw_stats_prev = + devm_kzalloc(gbe_dev->dev, + gbe_dev->num_et_stats * sizeof(u32), + GFP_KERNEL); + if (!gbe_dev->hw_stats_prev) { + dev_err(gbe_dev->dev, + "hw_stats_prev memory allocation failed\n"); + return -ENOMEM; + } + gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET; gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET; + /* K2HK has only 2 hw stats modules visible at a time, so + * module 0 & 2 points to one base and + * module 1 & 3 points to the other base + */ for (i = 0; i < gbe_dev->max_num_slaves; i++) { gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET + - (GBE_HW_STATS_REG_MAP_SZ * i); + (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1)); } gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET; gbe_dev->ale_ports = gbe_dev->max_num_ports; gbe_dev->host_port = GBE13_HOST_PORT_NUM; gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; - gbe_dev->et_stats = gbe13_et_stats; - gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats); gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL; /* Subsystem registers */ @@ -2729,15 +2939,34 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, void __iomem *regs; int i, ret; + gbe_dev->num_stats_mods = gbe_dev->max_num_ports; + gbe_dev->et_stats = gbenu_et_stats; + + if (IS_SS_ID_NU(gbe_dev)) + gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + + (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE); + else + gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + + GBENU_ET_STATS_PORT_SIZE; + gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, - GBENU_NUM_HW_STAT_ENTRIES * - (gbe_dev->max_num_ports) * sizeof(u64), - GFP_KERNEL); + gbe_dev->num_et_stats * sizeof(u64), + GFP_KERNEL); if (!gbe_dev->hw_stats) { dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); return -ENOMEM; } + gbe_dev->hw_stats_prev = + devm_kzalloc(gbe_dev->dev, + gbe_dev->num_et_stats * sizeof(u32), + GFP_KERNEL); + if (!gbe_dev->hw_stats_prev) { + dev_err(gbe_dev->dev, + "hw_stats_prev memory allocation failed\n"); + return -ENOMEM; + } + ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, @@ -2755,6 +2984,14 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, gbe_dev->switch_regs = regs; gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET; + + /* Although sgmii modules are mem mapped to one contiguous + * region on GBENU devices, setting sgmii_port34_regs allows + * consistent code when accessing sgmii api + */ + gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs + + (2 * GBENU_SGMII_MODULE_SIZE); + gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET; for (i = 0; i < (gbe_dev->max_num_ports); i++) @@ -2765,16 +3002,8 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, gbe_dev->ale_ports = gbe_dev->max_num_ports; gbe_dev->host_port = GBENU_HOST_PORT_NUM; gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; - gbe_dev->et_stats = gbenu_et_stats; gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1; - if (IS_SS_ID_NU(gbe_dev)) - gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + - (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE); - else - gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + - GBENU_ET_STATS_PORT_SIZE; - /* Subsystem registers */ GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver); @@ -2804,7 +3033,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, struct cpsw_ale_params ale_params; struct gbe_priv *gbe_dev; u32 slave_num; - int ret = 0; + int i, ret = 0; if (!node) { dev_err(dev, "device tree info unavailable\n"); @@ -2910,8 +3139,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, continue; } gbe_dev->num_slaves++; - if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) + if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) { + of_node_put(interface); break; + } } of_node_put(interfaces); @@ -2951,6 +3182,15 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, /* initialize host port */ gbe_init_host_port(gbe_dev); + spin_lock_bh(&gbe_dev->hw_stats_lock); + for (i = 0; i < gbe_dev->num_stats_mods; i++) { + if (gbe_dev->ss_version == GBE_SS_VERSION_14) + gbe_reset_mod_stats_ver14(gbe_dev, i); + else + gbe_reset_mod_stats(gbe_dev, i); + } + spin_unlock_bh(&gbe_dev->hw_stats_lock); + init_timer(&gbe_dev->timer); gbe_dev->timer.data = (unsigned long)gbe_dev; gbe_dev->timer.function = netcp_ethss_timer; diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index a3f761000..0a15acc07 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -2273,7 +2274,8 @@ static int __init tile_net_init_module(void) tile_net_dev_init(name, mac); if (!network_cpus_init()) - network_cpus_map = *cpu_online_mask; + cpumask_and(&network_cpus_map, housekeeping_cpumask(), + cpu_online_mask); return 0; } diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index a83263743..2b7550c43 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -2134,10 +2134,11 @@ static int rhine_rx(struct net_device *dev, int limit) } skb_put(skb, pkt_len); - skb->protocol = eth_type_trans(skb, dev); rhine_rx_vlan_tag(skb, desc, data_size); + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); u64_stats_update_begin(&rp->rx_stats.syncp); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 6008eee01..cf468c87c 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -828,6 +828,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) if (!phydev) dev_info(dev, "MDIO of the phy is not registered yet\n"); + else + put_device(&phydev->dev); return 0; } diff --git a/drivers/net/fddi/skfp/h/hwmtm.h b/drivers/net/fddi/skfp/h/hwmtm.h index 5924d4219..4ca2341d7 100644 --- a/drivers/net/fddi/skfp/h/hwmtm.h +++ b/drivers/net/fddi/skfp/h/hwmtm.h @@ -74,15 +74,6 @@ #define NULL 0 #endif -#ifdef LITTLE_ENDIAN -#define HWM_REVERSE(x) (x) -#else -#define HWM_REVERSE(x) ((((x)<<24L)&0xff000000L) + \ - (((x)<< 8L)&0x00ff0000L) + \ - (((x)>> 8L)&0x0000ff00L) + \ - (((x)>>24L)&0x000000ffL)) -#endif - #define C_INDIC (1L<<25) #define A_INDIC (1L<<26) #define RD_FS_LOCAL 0x80 diff --git a/drivers/net/fjes/Makefile b/drivers/net/fjes/Makefile new file mode 100644 index 000000000..523e3d7cf --- /dev/null +++ b/drivers/net/fjes/Makefile @@ -0,0 +1,30 @@ +################################################################################ +# +# FUJITSU Extended Socket Network Device driver +# Copyright (c) 2015 FUJITSU LIMITED +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +################################################################################ + + +# +# Makefile for the FUJITSU Extended Socket network device driver +# + +obj-$(CONFIG_FUJITSU_ES) += fjes.o + +fjes-objs := fjes_main.o fjes_hw.o fjes_ethtool.o diff --git a/drivers/net/fjes/fjes.h b/drivers/net/fjes/fjes.h new file mode 100644 index 000000000..a592fe21c --- /dev/null +++ b/drivers/net/fjes/fjes.h @@ -0,0 +1,77 @@ +/* + * FUJITSU Extended Socket Network Device driver + * Copyright (c) 2015 FUJITSU LIMITED + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#ifndef FJES_H_ +#define FJES_H_ + +#include + +#include "fjes_hw.h" + +#define FJES_ACPI_SYMBOL "Extended Socket" +#define FJES_MAX_QUEUES 1 +#define FJES_TX_RETRY_INTERVAL (20 * HZ) +#define FJES_TX_RETRY_TIMEOUT (100) +#define FJES_TX_TX_STALL_TIMEOUT (FJES_TX_RETRY_INTERVAL / 2) +#define FJES_OPEN_ZONE_UPDATE_WAIT (300) /* msec */ +#define FJES_IRQ_WATCH_DELAY (HZ) + +/* board specific private data structure */ +struct fjes_adapter { + struct net_device *netdev; + struct platform_device *plat_dev; + + struct napi_struct napi; + struct rtnl_link_stats64 stats64; + + unsigned int tx_retry_count; + unsigned long tx_start_jiffies; + unsigned long rx_last_jiffies; + bool unset_rx_last; + + struct work_struct force_close_task; + bool force_reset; + bool open_guard; + + bool irq_registered; + + struct workqueue_struct *txrx_wq; + struct workqueue_struct *control_wq; + + struct work_struct tx_stall_task; + struct work_struct raise_intr_rxdata_task; + + struct work_struct unshare_watch_task; + unsigned long unshare_watch_bitmask; + + struct delayed_work interrupt_watch_task; + bool interrupt_watch_enable; + + struct fjes_hw hw; +}; + +extern char fjes_driver_name[]; +extern char fjes_driver_version[]; +extern const u32 fjes_support_mtu[]; + +void fjes_set_ethtool_ops(struct net_device *); + +#endif /* FJES_H_ */ diff --git a/drivers/net/fjes/fjes_ethtool.c b/drivers/net/fjes/fjes_ethtool.c new file mode 100644 index 000000000..0119dd199 --- /dev/null +++ b/drivers/net/fjes/fjes_ethtool.c @@ -0,0 +1,137 @@ +/* + * FUJITSU Extended Socket Network Device driver + * Copyright (c) 2015 FUJITSU LIMITED + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +/* ethtool support for fjes */ + +#include +#include +#include +#include + +#include "fjes.h" + +struct fjes_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define FJES_STAT(name, stat) { \ + .stat_string = name, \ + .sizeof_stat = FIELD_SIZEOF(struct fjes_adapter, stat), \ + .stat_offset = offsetof(struct fjes_adapter, stat) \ +} + +static const struct fjes_stats fjes_gstrings_stats[] = { + FJES_STAT("rx_packets", stats64.rx_packets), + FJES_STAT("tx_packets", stats64.tx_packets), + FJES_STAT("rx_bytes", stats64.rx_bytes), + FJES_STAT("tx_bytes", stats64.rx_bytes), + FJES_STAT("rx_dropped", stats64.rx_dropped), + FJES_STAT("tx_dropped", stats64.tx_dropped), +}; + +static void fjes_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct fjes_adapter *adapter = netdev_priv(netdev); + char *p; + int i; + + for (i = 0; i < ARRAY_SIZE(fjes_gstrings_stats); i++) { + p = (char *)adapter + fjes_gstrings_stats[i].stat_offset; + data[i] = (fjes_gstrings_stats[i].sizeof_stat == sizeof(u64)) + ? *(u64 *)p : *(u32 *)p; + } +} + +static void fjes_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(fjes_gstrings_stats); i++) { + memcpy(p, fjes_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int fjes_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(fjes_gstrings_stats); + default: + return -EOPNOTSUPP; + } +} + +static void fjes_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct fjes_adapter *adapter = netdev_priv(netdev); + struct platform_device *plat_dev; + + plat_dev = adapter->plat_dev; + + strlcpy(drvinfo->driver, fjes_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, fjes_driver_version, + sizeof(drvinfo->version)); + + strlcpy(drvinfo->fw_version, "none", sizeof(drvinfo->fw_version)); + snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info), + "platform:%s", plat_dev->name); + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + +static int fjes_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + ecmd->supported = 0; + ecmd->advertising = 0; + ecmd->duplex = DUPLEX_FULL; + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->transceiver = XCVR_DUMMY1; + ecmd->port = PORT_NONE; + ethtool_cmd_speed_set(ecmd, 20000); /* 20Gb/s */ + + return 0; +} + +static const struct ethtool_ops fjes_ethtool_ops = { + .get_settings = fjes_get_settings, + .get_drvinfo = fjes_get_drvinfo, + .get_ethtool_stats = fjes_get_ethtool_stats, + .get_strings = fjes_get_strings, + .get_sset_count = fjes_get_sset_count, +}; + +void fjes_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &fjes_ethtool_ops; +} diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c new file mode 100644 index 000000000..2d3848c9d --- /dev/null +++ b/drivers/net/fjes/fjes_hw.c @@ -0,0 +1,1125 @@ +/* + * FUJITSU Extended Socket Network Device driver + * Copyright (c) 2015 FUJITSU LIMITED + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "fjes_hw.h" +#include "fjes.h" + +static void fjes_hw_update_zone_task(struct work_struct *); +static void fjes_hw_epstop_task(struct work_struct *); + +/* supported MTU list */ +const u32 fjes_support_mtu[] = { + FJES_MTU_DEFINE(8 * 1024), + FJES_MTU_DEFINE(16 * 1024), + FJES_MTU_DEFINE(32 * 1024), + FJES_MTU_DEFINE(64 * 1024), + 0 +}; + +u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg) +{ + u8 *base = hw->base; + u32 value = 0; + + value = readl(&base[reg]); + + return value; +} + +static u8 *fjes_hw_iomap(struct fjes_hw *hw) +{ + u8 *base; + + if (!request_mem_region(hw->hw_res.start, hw->hw_res.size, + fjes_driver_name)) { + pr_err("request_mem_region failed\n"); + return NULL; + } + + base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size); + + return base; +} + +static void fjes_hw_iounmap(struct fjes_hw *hw) +{ + iounmap(hw->base); + release_mem_region(hw->hw_res.start, hw->hw_res.size); +} + +int fjes_hw_reset(struct fjes_hw *hw) +{ + union REG_DCTL dctl; + int timeout; + + dctl.reg = 0; + dctl.bits.reset = 1; + wr32(XSCT_DCTL, dctl.reg); + + timeout = FJES_DEVICE_RESET_TIMEOUT * 1000; + dctl.reg = rd32(XSCT_DCTL); + while ((dctl.bits.reset == 1) && (timeout > 0)) { + msleep(1000); + dctl.reg = rd32(XSCT_DCTL); + timeout -= 1000; + } + + return timeout > 0 ? 0 : -EIO; +} + +static int fjes_hw_get_max_epid(struct fjes_hw *hw) +{ + union REG_MAX_EP info; + + info.reg = rd32(XSCT_MAX_EP); + + return info.bits.maxep; +} + +static int fjes_hw_get_my_epid(struct fjes_hw *hw) +{ + union REG_OWNER_EPID info; + + info.reg = rd32(XSCT_OWNER_EPID); + + return info.bits.epid; +} + +static int fjes_hw_alloc_shared_status_region(struct fjes_hw *hw) +{ + size_t size; + + size = sizeof(struct fjes_device_shared_info) + + (sizeof(u8) * hw->max_epid); + hw->hw_info.share = kzalloc(size, GFP_KERNEL); + if (!hw->hw_info.share) + return -ENOMEM; + + hw->hw_info.share->epnum = hw->max_epid; + + return 0; +} + +static void fjes_hw_free_shared_status_region(struct fjes_hw *hw) +{ + kfree(hw->hw_info.share); + hw->hw_info.share = NULL; +} + +static int fjes_hw_alloc_epbuf(struct epbuf_handler *epbh) +{ + void *mem; + + mem = vzalloc(EP_BUFFER_SIZE); + if (!mem) + return -ENOMEM; + + epbh->buffer = mem; + epbh->size = EP_BUFFER_SIZE; + + epbh->info = (union ep_buffer_info *)mem; + epbh->ring = (u8 *)(mem + sizeof(union ep_buffer_info)); + + return 0; +} + +static void fjes_hw_free_epbuf(struct epbuf_handler *epbh) +{ + if (epbh->buffer) + vfree(epbh->buffer); + + epbh->buffer = NULL; + epbh->size = 0; + + epbh->info = NULL; + epbh->ring = NULL; +} + +void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu) +{ + union ep_buffer_info *info = epbh->info; + u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX]; + int i; + + for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) + vlan_id[i] = info->v1i.vlan_id[i]; + + memset(info, 0, sizeof(union ep_buffer_info)); + + info->v1i.version = 0; /* version 0 */ + + for (i = 0; i < ETH_ALEN; i++) + info->v1i.mac_addr[i] = mac_addr[i]; + + info->v1i.head = 0; + info->v1i.tail = 1; + + info->v1i.info_size = sizeof(union ep_buffer_info); + info->v1i.buffer_size = epbh->size - info->v1i.info_size; + + info->v1i.frame_max = FJES_MTU_TO_FRAME_SIZE(mtu); + info->v1i.count_max = + EP_RING_NUM(info->v1i.buffer_size, info->v1i.frame_max); + + for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) + info->v1i.vlan_id[i] = vlan_id[i]; +} + +void +fjes_hw_init_command_registers(struct fjes_hw *hw, + struct fjes_device_command_param *param) +{ + /* Request Buffer length */ + wr32(XSCT_REQBL, (__le32)(param->req_len)); + /* Response Buffer Length */ + wr32(XSCT_RESPBL, (__le32)(param->res_len)); + + /* Request Buffer Address */ + wr32(XSCT_REQBAL, + (__le32)(param->req_start & GENMASK_ULL(31, 0))); + wr32(XSCT_REQBAH, + (__le32)((param->req_start & GENMASK_ULL(63, 32)) >> 32)); + + /* Response Buffer Address */ + wr32(XSCT_RESPBAL, + (__le32)(param->res_start & GENMASK_ULL(31, 0))); + wr32(XSCT_RESPBAH, + (__le32)((param->res_start & GENMASK_ULL(63, 32)) >> 32)); + + /* Share status address */ + wr32(XSCT_SHSTSAL, + (__le32)(param->share_start & GENMASK_ULL(31, 0))); + wr32(XSCT_SHSTSAH, + (__le32)((param->share_start & GENMASK_ULL(63, 32)) >> 32)); +} + +static int fjes_hw_setup(struct fjes_hw *hw) +{ + u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + struct fjes_device_command_param param; + struct ep_share_mem_info *buf_pair; + size_t mem_size; + int result; + int epidx; + void *buf; + + hw->hw_info.max_epid = &hw->max_epid; + hw->hw_info.my_epid = &hw->my_epid; + + buf = kcalloc(hw->max_epid, sizeof(struct ep_share_mem_info), + GFP_KERNEL); + if (!buf) + return -ENOMEM; + + hw->ep_shm_info = (struct ep_share_mem_info *)buf; + + mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid); + hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL); + if (!(hw->hw_info.req_buf)) + return -ENOMEM; + + hw->hw_info.req_buf_size = mem_size; + + mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid); + hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL); + if (!(hw->hw_info.res_buf)) + return -ENOMEM; + + hw->hw_info.res_buf_size = mem_size; + + result = fjes_hw_alloc_shared_status_region(hw); + if (result) + return result; + + hw->hw_info.buffer_share_bit = 0; + hw->hw_info.buffer_unshare_reserve_bit = 0; + + for (epidx = 0; epidx < hw->max_epid; epidx++) { + if (epidx != hw->my_epid) { + buf_pair = &hw->ep_shm_info[epidx]; + + result = fjes_hw_alloc_epbuf(&buf_pair->tx); + if (result) + return result; + + result = fjes_hw_alloc_epbuf(&buf_pair->rx); + if (result) + return result; + + fjes_hw_setup_epbuf(&buf_pair->tx, mac, + fjes_support_mtu[0]); + fjes_hw_setup_epbuf(&buf_pair->rx, mac, + fjes_support_mtu[0]); + } + } + + memset(¶m, 0, sizeof(param)); + + param.req_len = hw->hw_info.req_buf_size; + param.req_start = __pa(hw->hw_info.req_buf); + param.res_len = hw->hw_info.res_buf_size; + param.res_start = __pa(hw->hw_info.res_buf); + + param.share_start = __pa(hw->hw_info.share->ep_status); + + fjes_hw_init_command_registers(hw, ¶m); + + return 0; +} + +static void fjes_hw_cleanup(struct fjes_hw *hw) +{ + int epidx; + + if (!hw->ep_shm_info) + return; + + fjes_hw_free_shared_status_region(hw); + + kfree(hw->hw_info.req_buf); + hw->hw_info.req_buf = NULL; + + kfree(hw->hw_info.res_buf); + hw->hw_info.res_buf = NULL; + + for (epidx = 0; epidx < hw->max_epid ; epidx++) { + if (epidx == hw->my_epid) + continue; + fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx); + fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx); + } + + kfree(hw->ep_shm_info); + hw->ep_shm_info = NULL; +} + +int fjes_hw_init(struct fjes_hw *hw) +{ + int ret; + + hw->base = fjes_hw_iomap(hw); + if (!hw->base) + return -EIO; + + ret = fjes_hw_reset(hw); + if (ret) + return ret; + + fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true); + + INIT_WORK(&hw->update_zone_task, fjes_hw_update_zone_task); + INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task); + + mutex_init(&hw->hw_info.lock); + + hw->max_epid = fjes_hw_get_max_epid(hw); + hw->my_epid = fjes_hw_get_my_epid(hw); + + if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid)) + return -ENXIO; + + ret = fjes_hw_setup(hw); + + return ret; +} + +void fjes_hw_exit(struct fjes_hw *hw) +{ + int ret; + + if (hw->base) { + ret = fjes_hw_reset(hw); + if (ret) + pr_err("%s: reset error", __func__); + + fjes_hw_iounmap(hw); + hw->base = NULL; + } + + fjes_hw_cleanup(hw); + + cancel_work_sync(&hw->update_zone_task); + cancel_work_sync(&hw->epstop_task); +} + +static enum fjes_dev_command_response_e +fjes_hw_issue_request_command(struct fjes_hw *hw, + enum fjes_dev_command_request_type type) +{ + enum fjes_dev_command_response_e ret = FJES_CMD_STATUS_UNKNOWN; + union REG_CR cr; + union REG_CS cs; + int timeout; + + cr.reg = 0; + cr.bits.req_start = 1; + cr.bits.req_code = type; + wr32(XSCT_CR, cr.reg); + cr.reg = rd32(XSCT_CR); + + if (cr.bits.error == 0) { + timeout = FJES_COMMAND_REQ_TIMEOUT * 1000; + cs.reg = rd32(XSCT_CS); + + while ((cs.bits.complete != 1) && timeout > 0) { + msleep(1000); + cs.reg = rd32(XSCT_CS); + timeout -= 1000; + } + + if (cs.bits.complete == 1) + ret = FJES_CMD_STATUS_NORMAL; + else if (timeout <= 0) + ret = FJES_CMD_STATUS_TIMEOUT; + + } else { + switch (cr.bits.err_info) { + case FJES_CMD_REQ_ERR_INFO_PARAM: + ret = FJES_CMD_STATUS_ERROR_PARAM; + break; + case FJES_CMD_REQ_ERR_INFO_STATUS: + ret = FJES_CMD_STATUS_ERROR_STATUS; + break; + default: + ret = FJES_CMD_STATUS_UNKNOWN; + break; + } + } + + return ret; +} + +int fjes_hw_request_info(struct fjes_hw *hw) +{ + union fjes_device_command_req *req_buf = hw->hw_info.req_buf; + union fjes_device_command_res *res_buf = hw->hw_info.res_buf; + enum fjes_dev_command_response_e ret; + int result; + + memset(req_buf, 0, hw->hw_info.req_buf_size); + memset(res_buf, 0, hw->hw_info.res_buf_size); + + req_buf->info.length = FJES_DEV_COMMAND_INFO_REQ_LEN; + + res_buf->info.length = 0; + res_buf->info.code = 0; + + ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_INFO); + + result = 0; + + if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw->hw_info.max_epid)) != + res_buf->info.length) { + result = -ENOMSG; + } else if (ret == FJES_CMD_STATUS_NORMAL) { + switch (res_buf->info.code) { + case FJES_CMD_REQ_RES_CODE_NORMAL: + result = 0; + break; + default: + result = -EPERM; + break; + } + } else { + switch (ret) { + case FJES_CMD_STATUS_UNKNOWN: + result = -EPERM; + break; + case FJES_CMD_STATUS_TIMEOUT: + result = -EBUSY; + break; + case FJES_CMD_STATUS_ERROR_PARAM: + result = -EPERM; + break; + case FJES_CMD_STATUS_ERROR_STATUS: + result = -EPERM; + break; + default: + result = -EPERM; + break; + } + } + + return result; +} + +int fjes_hw_register_buff_addr(struct fjes_hw *hw, int dest_epid, + struct ep_share_mem_info *buf_pair) +{ + union fjes_device_command_req *req_buf = hw->hw_info.req_buf; + union fjes_device_command_res *res_buf = hw->hw_info.res_buf; + enum fjes_dev_command_response_e ret; + int page_count; + int timeout; + int i, idx; + void *addr; + int result; + + if (test_bit(dest_epid, &hw->hw_info.buffer_share_bit)) + return 0; + + memset(req_buf, 0, hw->hw_info.req_buf_size); + memset(res_buf, 0, hw->hw_info.res_buf_size); + + req_buf->share_buffer.length = FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN( + buf_pair->tx.size, + buf_pair->rx.size); + req_buf->share_buffer.epid = dest_epid; + + idx = 0; + req_buf->share_buffer.buffer[idx++] = buf_pair->tx.size; + page_count = buf_pair->tx.size / EP_BUFFER_INFO_SIZE; + for (i = 0; i < page_count; i++) { + addr = ((u8 *)(buf_pair->tx.buffer)) + + (i * EP_BUFFER_INFO_SIZE); + req_buf->share_buffer.buffer[idx++] = + (__le64)(page_to_phys(vmalloc_to_page(addr)) + + offset_in_page(addr)); + } + + req_buf->share_buffer.buffer[idx++] = buf_pair->rx.size; + page_count = buf_pair->rx.size / EP_BUFFER_INFO_SIZE; + for (i = 0; i < page_count; i++) { + addr = ((u8 *)(buf_pair->rx.buffer)) + + (i * EP_BUFFER_INFO_SIZE); + req_buf->share_buffer.buffer[idx++] = + (__le64)(page_to_phys(vmalloc_to_page(addr)) + + offset_in_page(addr)); + } + + res_buf->share_buffer.length = 0; + res_buf->share_buffer.code = 0; + + ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_SHARE_BUFFER); + + timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000; + while ((ret == FJES_CMD_STATUS_NORMAL) && + (res_buf->share_buffer.length == + FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) && + (res_buf->share_buffer.code == FJES_CMD_REQ_RES_CODE_BUSY) && + (timeout > 0)) { + msleep(200 + hw->my_epid * 20); + timeout -= (200 + hw->my_epid * 20); + + res_buf->share_buffer.length = 0; + res_buf->share_buffer.code = 0; + + ret = fjes_hw_issue_request_command( + hw, FJES_CMD_REQ_SHARE_BUFFER); + } + + result = 0; + + if (res_buf->share_buffer.length != + FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) + result = -ENOMSG; + else if (ret == FJES_CMD_STATUS_NORMAL) { + switch (res_buf->share_buffer.code) { + case FJES_CMD_REQ_RES_CODE_NORMAL: + result = 0; + set_bit(dest_epid, &hw->hw_info.buffer_share_bit); + break; + case FJES_CMD_REQ_RES_CODE_BUSY: + result = -EBUSY; + break; + default: + result = -EPERM; + break; + } + } else { + switch (ret) { + case FJES_CMD_STATUS_UNKNOWN: + result = -EPERM; + break; + case FJES_CMD_STATUS_TIMEOUT: + result = -EBUSY; + break; + case FJES_CMD_STATUS_ERROR_PARAM: + case FJES_CMD_STATUS_ERROR_STATUS: + default: + result = -EPERM; + break; + } + } + + return result; +} + +int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid) +{ + union fjes_device_command_req *req_buf = hw->hw_info.req_buf; + union fjes_device_command_res *res_buf = hw->hw_info.res_buf; + struct fjes_device_shared_info *share = hw->hw_info.share; + enum fjes_dev_command_response_e ret; + int timeout; + int result; + + if (!hw->base) + return -EPERM; + + if (!req_buf || !res_buf || !share) + return -EPERM; + + if (!test_bit(dest_epid, &hw->hw_info.buffer_share_bit)) + return 0; + + memset(req_buf, 0, hw->hw_info.req_buf_size); + memset(res_buf, 0, hw->hw_info.res_buf_size); + + req_buf->unshare_buffer.length = + FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN; + req_buf->unshare_buffer.epid = dest_epid; + + res_buf->unshare_buffer.length = 0; + res_buf->unshare_buffer.code = 0; + + ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER); + + timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000; + while ((ret == FJES_CMD_STATUS_NORMAL) && + (res_buf->unshare_buffer.length == + FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) && + (res_buf->unshare_buffer.code == + FJES_CMD_REQ_RES_CODE_BUSY) && + (timeout > 0)) { + msleep(200 + hw->my_epid * 20); + timeout -= (200 + hw->my_epid * 20); + + res_buf->unshare_buffer.length = 0; + res_buf->unshare_buffer.code = 0; + + ret = + fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER); + } + + result = 0; + + if (res_buf->unshare_buffer.length != + FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) { + result = -ENOMSG; + } else if (ret == FJES_CMD_STATUS_NORMAL) { + switch (res_buf->unshare_buffer.code) { + case FJES_CMD_REQ_RES_CODE_NORMAL: + result = 0; + clear_bit(dest_epid, &hw->hw_info.buffer_share_bit); + break; + case FJES_CMD_REQ_RES_CODE_BUSY: + result = -EBUSY; + break; + default: + result = -EPERM; + break; + } + } else { + switch (ret) { + case FJES_CMD_STATUS_UNKNOWN: + result = -EPERM; + break; + case FJES_CMD_STATUS_TIMEOUT: + result = -EBUSY; + break; + case FJES_CMD_STATUS_ERROR_PARAM: + case FJES_CMD_STATUS_ERROR_STATUS: + default: + result = -EPERM; + break; + } + } + + return result; +} + +int fjes_hw_raise_interrupt(struct fjes_hw *hw, int dest_epid, + enum REG_ICTL_MASK mask) +{ + u32 ig = mask | dest_epid; + + wr32(XSCT_IG, cpu_to_le32(ig)); + + return 0; +} + +u32 fjes_hw_capture_interrupt_status(struct fjes_hw *hw) +{ + u32 cur_is; + + cur_is = rd32(XSCT_IS); + + return cur_is; +} + +void fjes_hw_set_irqmask(struct fjes_hw *hw, + enum REG_ICTL_MASK intr_mask, bool mask) +{ + if (mask) + wr32(XSCT_IMS, intr_mask); + else + wr32(XSCT_IMC, intr_mask); +} + +bool fjes_hw_epid_is_same_zone(struct fjes_hw *hw, int epid) +{ + if (epid >= hw->max_epid) + return false; + + if ((hw->ep_shm_info[epid].es_status != + FJES_ZONING_STATUS_ENABLE) || + (hw->ep_shm_info[hw->my_epid].zone == + FJES_ZONING_ZONE_TYPE_NONE)) + return false; + else + return (hw->ep_shm_info[epid].zone == + hw->ep_shm_info[hw->my_epid].zone); +} + +int fjes_hw_epid_is_shared(struct fjes_device_shared_info *share, + int dest_epid) +{ + int value = false; + + if (dest_epid < share->epnum) + value = share->ep_status[dest_epid]; + + return value; +} + +static bool fjes_hw_epid_is_stop_requested(struct fjes_hw *hw, int src_epid) +{ + return test_bit(src_epid, &hw->txrx_stop_req_bit); +} + +static bool fjes_hw_epid_is_stop_process_done(struct fjes_hw *hw, int src_epid) +{ + return (hw->ep_shm_info[src_epid].tx.info->v1i.rx_status & + FJES_RX_STOP_REQ_DONE); +} + +enum ep_partner_status +fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid) +{ + enum ep_partner_status status; + + if (fjes_hw_epid_is_shared(hw->hw_info.share, epid)) { + if (fjes_hw_epid_is_stop_requested(hw, epid)) { + status = EP_PARTNER_WAITING; + } else { + if (fjes_hw_epid_is_stop_process_done(hw, epid)) + status = EP_PARTNER_COMPLETE; + else + status = EP_PARTNER_SHARED; + } + } else { + status = EP_PARTNER_UNSHARE; + } + + return status; +} + +void fjes_hw_raise_epstop(struct fjes_hw *hw) +{ + enum ep_partner_status status; + int epidx; + + for (epidx = 0; epidx < hw->max_epid; epidx++) { + if (epidx == hw->my_epid) + continue; + + status = fjes_hw_get_partner_ep_status(hw, epidx); + switch (status) { + case EP_PARTNER_SHARED: + fjes_hw_raise_interrupt(hw, epidx, + REG_ICTL_MASK_TXRX_STOP_REQ); + break; + default: + break; + } + + set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit); + set_bit(epidx, &hw->txrx_stop_req_bit); + + hw->ep_shm_info[epidx].tx.info->v1i.rx_status |= + FJES_RX_STOP_REQ_REQUEST; + } +} + +int fjes_hw_wait_epstop(struct fjes_hw *hw) +{ + enum ep_partner_status status; + union ep_buffer_info *info; + int wait_time = 0; + int epidx; + + while (hw->hw_info.buffer_unshare_reserve_bit && + (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)) { + for (epidx = 0; epidx < hw->max_epid; epidx++) { + if (epidx == hw->my_epid) + continue; + status = fjes_hw_epid_is_shared(hw->hw_info.share, + epidx); + info = hw->ep_shm_info[epidx].rx.info; + if ((!status || + (info->v1i.rx_status & + FJES_RX_STOP_REQ_DONE)) && + test_bit(epidx, + &hw->hw_info.buffer_unshare_reserve_bit)) { + clear_bit(epidx, + &hw->hw_info.buffer_unshare_reserve_bit); + } + } + + msleep(100); + wait_time += 100; + } + + for (epidx = 0; epidx < hw->max_epid; epidx++) { + if (epidx == hw->my_epid) + continue; + if (test_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit)) + clear_bit(epidx, + &hw->hw_info.buffer_unshare_reserve_bit); + } + + return (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000) + ? 0 : -EBUSY; +} + +bool fjes_hw_check_epbuf_version(struct epbuf_handler *epbh, u32 version) +{ + union ep_buffer_info *info = epbh->info; + + return (info->common.version == version); +} + +bool fjes_hw_check_mtu(struct epbuf_handler *epbh, u32 mtu) +{ + union ep_buffer_info *info = epbh->info; + + return (info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu)); +} + +bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id) +{ + union ep_buffer_info *info = epbh->info; + bool ret = false; + int i; + + if (vlan_id == 0) { + ret = true; + } else { + for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) { + if (vlan_id == info->v1i.vlan_id[i]) { + ret = true; + break; + } + } + } + return ret; +} + +bool fjes_hw_set_vlan_id(struct epbuf_handler *epbh, u16 vlan_id) +{ + union ep_buffer_info *info = epbh->info; + int i; + + for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) { + if (info->v1i.vlan_id[i] == 0) { + info->v1i.vlan_id[i] = vlan_id; + return true; + } + } + return false; +} + +void fjes_hw_del_vlan_id(struct epbuf_handler *epbh, u16 vlan_id) +{ + union ep_buffer_info *info = epbh->info; + int i; + + if (0 != vlan_id) { + for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) { + if (vlan_id == info->v1i.vlan_id[i]) + info->v1i.vlan_id[i] = 0; + } + } +} + +bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh) +{ + union ep_buffer_info *info = epbh->info; + + if (info->v1i.count_max == 0) + return true; + + return EP_RING_EMPTY(info->v1i.head, info->v1i.tail, + info->v1i.count_max); +} + +void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *epbh, + size_t *psize) +{ + union ep_buffer_info *info = epbh->info; + struct esmem_frame *ring_frame; + void *frame; + + ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX + (info->v1i.head, + info->v1i.count_max) * + info->v1i.frame_max]); + + *psize = (size_t)ring_frame->frame_size; + + frame = ring_frame->frame_data; + + return frame; +} + +void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *epbh) +{ + union ep_buffer_info *info = epbh->info; + + if (fjes_hw_epbuf_rx_is_empty(epbh)) + return; + + EP_RING_INDEX_INC(epbh->info->v1i.head, info->v1i.count_max); +} + +int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *epbh, + void *frame, size_t size) +{ + union ep_buffer_info *info = epbh->info; + struct esmem_frame *ring_frame; + + if (EP_RING_FULL(info->v1i.head, info->v1i.tail, info->v1i.count_max)) + return -ENOBUFS; + + ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX + (info->v1i.tail - 1, + info->v1i.count_max) * + info->v1i.frame_max]); + + ring_frame->frame_size = size; + memcpy((void *)(ring_frame->frame_data), (void *)frame, size); + + EP_RING_INDEX_INC(epbh->info->v1i.tail, info->v1i.count_max); + + return 0; +} + +static void fjes_hw_update_zone_task(struct work_struct *work) +{ + struct fjes_hw *hw = container_of(work, + struct fjes_hw, update_zone_task); + + struct my_s {u8 es_status; u8 zone; } *info; + union fjes_device_command_res *res_buf; + enum ep_partner_status pstatus; + + struct fjes_adapter *adapter; + struct net_device *netdev; + + ulong unshare_bit = 0; + ulong share_bit = 0; + ulong irq_bit = 0; + + int epidx; + int ret; + + adapter = (struct fjes_adapter *)hw->back; + netdev = adapter->netdev; + res_buf = hw->hw_info.res_buf; + info = (struct my_s *)&res_buf->info.info; + + mutex_lock(&hw->hw_info.lock); + + ret = fjes_hw_request_info(hw); + switch (ret) { + case -ENOMSG: + case -EBUSY: + default: + if (!work_pending(&adapter->force_close_task)) { + adapter->force_reset = true; + schedule_work(&adapter->force_close_task); + } + break; + + case 0: + + for (epidx = 0; epidx < hw->max_epid; epidx++) { + if (epidx == hw->my_epid) { + hw->ep_shm_info[epidx].es_status = + info[epidx].es_status; + hw->ep_shm_info[epidx].zone = + info[epidx].zone; + continue; + } + + pstatus = fjes_hw_get_partner_ep_status(hw, epidx); + switch (pstatus) { + case EP_PARTNER_UNSHARE: + default: + if ((info[epidx].zone != + FJES_ZONING_ZONE_TYPE_NONE) && + (info[epidx].es_status == + FJES_ZONING_STATUS_ENABLE) && + (info[epidx].zone == + info[hw->my_epid].zone)) + set_bit(epidx, &share_bit); + else + set_bit(epidx, &unshare_bit); + break; + + case EP_PARTNER_COMPLETE: + case EP_PARTNER_WAITING: + if ((info[epidx].zone == + FJES_ZONING_ZONE_TYPE_NONE) || + (info[epidx].es_status != + FJES_ZONING_STATUS_ENABLE) || + (info[epidx].zone != + info[hw->my_epid].zone)) { + set_bit(epidx, + &adapter->unshare_watch_bitmask); + set_bit(epidx, + &hw->hw_info.buffer_unshare_reserve_bit); + } + break; + + case EP_PARTNER_SHARED: + if ((info[epidx].zone == + FJES_ZONING_ZONE_TYPE_NONE) || + (info[epidx].es_status != + FJES_ZONING_STATUS_ENABLE) || + (info[epidx].zone != + info[hw->my_epid].zone)) + set_bit(epidx, &irq_bit); + break; + } + + hw->ep_shm_info[epidx].es_status = + info[epidx].es_status; + hw->ep_shm_info[epidx].zone = info[epidx].zone; + } + break; + } + + mutex_unlock(&hw->hw_info.lock); + + for (epidx = 0; epidx < hw->max_epid; epidx++) { + if (epidx == hw->my_epid) + continue; + + if (test_bit(epidx, &share_bit)) { + fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx, + netdev->dev_addr, netdev->mtu); + + mutex_lock(&hw->hw_info.lock); + + ret = fjes_hw_register_buff_addr( + hw, epidx, &hw->ep_shm_info[epidx]); + + switch (ret) { + case 0: + break; + case -ENOMSG: + case -EBUSY: + default: + if (!work_pending(&adapter->force_close_task)) { + adapter->force_reset = true; + schedule_work( + &adapter->force_close_task); + } + break; + } + mutex_unlock(&hw->hw_info.lock); + } + + if (test_bit(epidx, &unshare_bit)) { + mutex_lock(&hw->hw_info.lock); + + ret = fjes_hw_unregister_buff_addr(hw, epidx); + + switch (ret) { + case 0: + break; + case -ENOMSG: + case -EBUSY: + default: + if (!work_pending(&adapter->force_close_task)) { + adapter->force_reset = true; + schedule_work( + &adapter->force_close_task); + } + break; + } + + mutex_unlock(&hw->hw_info.lock); + + if (ret == 0) + fjes_hw_setup_epbuf( + &hw->ep_shm_info[epidx].tx, + netdev->dev_addr, netdev->mtu); + } + + if (test_bit(epidx, &irq_bit)) { + fjes_hw_raise_interrupt(hw, epidx, + REG_ICTL_MASK_TXRX_STOP_REQ); + + set_bit(epidx, &hw->txrx_stop_req_bit); + hw->ep_shm_info[epidx].tx. + info->v1i.rx_status |= + FJES_RX_STOP_REQ_REQUEST; + set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit); + } + } + + if (irq_bit || adapter->unshare_watch_bitmask) { + if (!work_pending(&adapter->unshare_watch_task)) + queue_work(adapter->control_wq, + &adapter->unshare_watch_task); + } +} + +static void fjes_hw_epstop_task(struct work_struct *work) +{ + struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task); + struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back; + + ulong remain_bit; + int epid_bit; + + while ((remain_bit = hw->epstop_req_bit)) { + for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) { + if (remain_bit & 1) { + hw->ep_shm_info[epid_bit]. + tx.info->v1i.rx_status |= + FJES_RX_STOP_REQ_DONE; + + clear_bit(epid_bit, &hw->epstop_req_bit); + set_bit(epid_bit, + &adapter->unshare_watch_bitmask); + + if (!work_pending(&adapter->unshare_watch_task)) + queue_work( + adapter->control_wq, + &adapter->unshare_watch_task); + } + } + } +} diff --git a/drivers/net/fjes/fjes_hw.h b/drivers/net/fjes/fjes_hw.h new file mode 100644 index 000000000..6d57b89a0 --- /dev/null +++ b/drivers/net/fjes/fjes_hw.h @@ -0,0 +1,334 @@ +/* + * FUJITSU Extended Socket Network Device driver + * Copyright (c) 2015 FUJITSU LIMITED + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#ifndef FJES_HW_H_ +#define FJES_HW_H_ + +#include +#include +#include + +#include "fjes_regs.h" + +struct fjes_hw; + +#define EP_BUFFER_SUPPORT_VLAN_MAX 4 +#define EP_BUFFER_INFO_SIZE 4096 + +#define FJES_DEVICE_RESET_TIMEOUT ((17 + 1) * 3) /* sec */ +#define FJES_COMMAND_REQ_TIMEOUT (5 + 1) /* sec */ +#define FJES_COMMAND_REQ_BUFF_TIMEOUT (8 * 3) /* sec */ +#define FJES_COMMAND_EPSTOP_WAIT_TIMEOUT (1) /* sec */ + +#define FJES_CMD_REQ_ERR_INFO_PARAM (0x0001) +#define FJES_CMD_REQ_ERR_INFO_STATUS (0x0002) + +#define FJES_CMD_REQ_RES_CODE_NORMAL (0) +#define FJES_CMD_REQ_RES_CODE_BUSY (1) + +#define FJES_ZONING_STATUS_DISABLE (0x00) +#define FJES_ZONING_STATUS_ENABLE (0x01) +#define FJES_ZONING_STATUS_INVALID (0xFF) + +#define FJES_ZONING_ZONE_TYPE_NONE (0xFF) + +#define FJES_TX_DELAY_SEND_NONE (0) +#define FJES_TX_DELAY_SEND_PENDING (1) + +#define FJES_RX_STOP_REQ_NONE (0x0) +#define FJES_RX_STOP_REQ_DONE (0x1) +#define FJES_RX_STOP_REQ_REQUEST (0x2) +#define FJES_RX_POLL_WORK (0x4) + +#define EP_BUFFER_SIZE \ + (((sizeof(union ep_buffer_info) + (128 * (64 * 1024))) \ + / EP_BUFFER_INFO_SIZE) * EP_BUFFER_INFO_SIZE) + +#define EP_RING_NUM(buffer_size, frame_size) \ + (u32)((buffer_size) / (frame_size)) +#define EP_RING_INDEX(_num, _max) (((_num) + (_max)) % (_max)) +#define EP_RING_INDEX_INC(_num, _max) \ + ((_num) = EP_RING_INDEX((_num) + 1, (_max))) +#define EP_RING_FULL(_head, _tail, _max) \ + (0 == EP_RING_INDEX(((_tail) - (_head)), (_max))) +#define EP_RING_EMPTY(_head, _tail, _max) \ + (1 == EP_RING_INDEX(((_tail) - (_head)), (_max))) + +#define FJES_MTU_TO_BUFFER_SIZE(mtu) \ + (ETH_HLEN + VLAN_HLEN + (mtu) + ETH_FCS_LEN) +#define FJES_MTU_TO_FRAME_SIZE(mtu) \ + (sizeof(struct esmem_frame) + FJES_MTU_TO_BUFFER_SIZE(mtu)) +#define FJES_MTU_DEFINE(size) \ + ((size) - sizeof(struct esmem_frame) - \ + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) + +#define FJES_DEV_COMMAND_INFO_REQ_LEN (4) +#define FJES_DEV_COMMAND_INFO_RES_LEN(epnum) (8 + 2 * (epnum)) +#define FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(txb, rxb) \ + (24 + (8 * ((txb) / EP_BUFFER_INFO_SIZE + (rxb) / EP_BUFFER_INFO_SIZE))) +#define FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN (8) +#define FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN (8) +#define FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN (8) + +#define FJES_DEV_REQ_BUF_SIZE(maxep) \ + FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(EP_BUFFER_SIZE, EP_BUFFER_SIZE) +#define FJES_DEV_RES_BUF_SIZE(maxep) \ + FJES_DEV_COMMAND_INFO_RES_LEN(maxep) + +/* Frame & MTU */ +struct esmem_frame { + __le32 frame_size; + u8 frame_data[]; +}; + +/* EP partner status */ +enum ep_partner_status { + EP_PARTNER_UNSHARE, + EP_PARTNER_SHARED, + EP_PARTNER_WAITING, + EP_PARTNER_COMPLETE, + EP_PARTNER_STATUS_MAX, +}; + +/* shared status region */ +struct fjes_device_shared_info { + int epnum; + u8 ep_status[]; +}; + +/* structures for command control request data*/ +union fjes_device_command_req { + struct { + __le32 length; + } info; + struct { + __le32 length; + __le32 epid; + __le64 buffer[]; + } share_buffer; + struct { + __le32 length; + __le32 epid; + } unshare_buffer; + struct { + __le32 length; + __le32 mode; + __le64 buffer_len; + __le64 buffer[]; + } start_trace; + struct { + __le32 length; + } stop_trace; +}; + +/* structures for command control response data */ +union fjes_device_command_res { + struct { + __le32 length; + __le32 code; + struct { + u8 es_status; + u8 zone; + } info[]; + } info; + struct { + __le32 length; + __le32 code; + } share_buffer; + struct { + __le32 length; + __le32 code; + } unshare_buffer; + struct { + __le32 length; + __le32 code; + } start_trace; + struct { + __le32 length; + __le32 code; + } stop_trace; +}; + +/* request command type */ +enum fjes_dev_command_request_type { + FJES_CMD_REQ_INFO = 0x0001, + FJES_CMD_REQ_SHARE_BUFFER = 0x0002, + FJES_CMD_REQ_UNSHARE_BUFFER = 0x0004, +}; + +/* parameter for command control */ +struct fjes_device_command_param { + u32 req_len; + phys_addr_t req_start; + u32 res_len; + phys_addr_t res_start; + phys_addr_t share_start; +}; + +/* error code for command control */ +enum fjes_dev_command_response_e { + FJES_CMD_STATUS_UNKNOWN, + FJES_CMD_STATUS_NORMAL, + FJES_CMD_STATUS_TIMEOUT, + FJES_CMD_STATUS_ERROR_PARAM, + FJES_CMD_STATUS_ERROR_STATUS, +}; + +/* EP buffer information */ +union ep_buffer_info { + u8 raw[EP_BUFFER_INFO_SIZE]; + + struct _ep_buffer_info_common_t { + u32 version; + } common; + + struct _ep_buffer_info_v1_t { + u32 version; + u32 info_size; + + u32 buffer_size; + u16 count_max; + + u16 _rsv_1; + + u32 frame_max; + u8 mac_addr[ETH_ALEN]; + + u16 _rsv_2; + u32 _rsv_3; + + u16 tx_status; + u16 rx_status; + + u32 head; + u32 tail; + + u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX]; + + } v1i; + +}; + +/* buffer pair for Extended Partition */ +struct ep_share_mem_info { + struct epbuf_handler { + void *buffer; + size_t size; + union ep_buffer_info *info; + u8 *ring; + } tx, rx; + + struct rtnl_link_stats64 net_stats; + + u16 tx_status_work; + + u8 es_status; + u8 zone; +}; + +struct es_device_trace { + u32 record_num; + u32 current_record; + u32 status_flag; + u32 _rsv; + + struct { + u16 epid; + u16 dir_offset; + u32 data; + u64 tsc; + } record[]; +}; + +struct fjes_hw_info { + struct fjes_device_shared_info *share; + union fjes_device_command_req *req_buf; + u64 req_buf_size; + union fjes_device_command_res *res_buf; + u64 res_buf_size; + + int *my_epid; + int *max_epid; + + struct es_device_trace *trace; + u64 trace_size; + + struct mutex lock; /* buffer lock*/ + + unsigned long buffer_share_bit; + unsigned long buffer_unshare_reserve_bit; +}; + +struct fjes_hw { + void *back; + + unsigned long txrx_stop_req_bit; + unsigned long epstop_req_bit; + struct work_struct update_zone_task; + struct work_struct epstop_task; + + int my_epid; + int max_epid; + + struct ep_share_mem_info *ep_shm_info; + + struct fjes_hw_resource { + u64 start; + u64 size; + int irq; + } hw_res; + + u8 *base; + + struct fjes_hw_info hw_info; +}; + +int fjes_hw_init(struct fjes_hw *); +void fjes_hw_exit(struct fjes_hw *); +int fjes_hw_reset(struct fjes_hw *); +int fjes_hw_request_info(struct fjes_hw *); +int fjes_hw_register_buff_addr(struct fjes_hw *, int, + struct ep_share_mem_info *); +int fjes_hw_unregister_buff_addr(struct fjes_hw *, int); +void fjes_hw_init_command_registers(struct fjes_hw *, + struct fjes_device_command_param *); +void fjes_hw_setup_epbuf(struct epbuf_handler *, u8 *, u32); +int fjes_hw_raise_interrupt(struct fjes_hw *, int, enum REG_ICTL_MASK); +void fjes_hw_set_irqmask(struct fjes_hw *, enum REG_ICTL_MASK, bool); +u32 fjes_hw_capture_interrupt_status(struct fjes_hw *); +void fjes_hw_raise_epstop(struct fjes_hw *); +int fjes_hw_wait_epstop(struct fjes_hw *); +enum ep_partner_status + fjes_hw_get_partner_ep_status(struct fjes_hw *, int); + +bool fjes_hw_epid_is_same_zone(struct fjes_hw *, int); +int fjes_hw_epid_is_shared(struct fjes_device_shared_info *, int); +bool fjes_hw_check_epbuf_version(struct epbuf_handler *, u32); +bool fjes_hw_check_mtu(struct epbuf_handler *, u32); +bool fjes_hw_check_vlan_id(struct epbuf_handler *, u16); +bool fjes_hw_set_vlan_id(struct epbuf_handler *, u16); +void fjes_hw_del_vlan_id(struct epbuf_handler *, u16); +bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *); +void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *, size_t *); +void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *); +int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *, void *, size_t); + +#endif /* FJES_HW_H_ */ diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c new file mode 100644 index 000000000..0ddb54fe3 --- /dev/null +++ b/drivers/net/fjes/fjes_main.c @@ -0,0 +1,1383 @@ +/* + * FUJITSU Extended Socket Network Device driver + * Copyright (c) 2015 FUJITSU LIMITED + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include +#include +#include +#include +#include +#include + +#include "fjes.h" + +#define MAJ 1 +#define MIN 0 +#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) +#define DRV_NAME "fjes" +char fjes_driver_name[] = DRV_NAME; +char fjes_driver_version[] = DRV_VERSION; +static const char fjes_driver_string[] = + "FUJITSU Extended Socket Network Device Driver"; +static const char fjes_copyright[] = + "Copyright (c) 2015 FUJITSU LIMITED"; + +MODULE_AUTHOR("Taku Izumi "); +MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static int fjes_request_irq(struct fjes_adapter *); +static void fjes_free_irq(struct fjes_adapter *); + +static int fjes_open(struct net_device *); +static int fjes_close(struct net_device *); +static int fjes_setup_resources(struct fjes_adapter *); +static void fjes_free_resources(struct fjes_adapter *); +static netdev_tx_t fjes_xmit_frame(struct sk_buff *, struct net_device *); +static void fjes_raise_intr_rxdata_task(struct work_struct *); +static void fjes_tx_stall_task(struct work_struct *); +static void fjes_force_close_task(struct work_struct *); +static irqreturn_t fjes_intr(int, void*); +static struct rtnl_link_stats64 * +fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *); +static int fjes_change_mtu(struct net_device *, int); +static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16); +static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16); +static void fjes_tx_retry(struct net_device *); + +static int fjes_acpi_add(struct acpi_device *); +static int fjes_acpi_remove(struct acpi_device *); +static acpi_status fjes_get_acpi_resource(struct acpi_resource *, void*); + +static int fjes_probe(struct platform_device *); +static int fjes_remove(struct platform_device *); + +static int fjes_sw_init(struct fjes_adapter *); +static void fjes_netdev_setup(struct net_device *); +static void fjes_irq_watch_task(struct work_struct *); +static void fjes_watch_unshare_task(struct work_struct *); +static void fjes_rx_irq(struct fjes_adapter *, int); +static int fjes_poll(struct napi_struct *, int); + +static const struct acpi_device_id fjes_acpi_ids[] = { + {"PNP0C02", 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids); + +static struct acpi_driver fjes_acpi_driver = { + .name = DRV_NAME, + .class = DRV_NAME, + .owner = THIS_MODULE, + .ids = fjes_acpi_ids, + .ops = { + .add = fjes_acpi_add, + .remove = fjes_acpi_remove, + }, +}; + +static struct platform_driver fjes_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = fjes_probe, + .remove = fjes_remove, +}; + +static struct resource fjes_resource[] = { + { + .flags = IORESOURCE_MEM, + .start = 0, + .end = 0, + }, + { + .flags = IORESOURCE_IRQ, + .start = 0, + .end = 0, + }, +}; + +static int fjes_acpi_add(struct acpi_device *device) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; + char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1]; + struct platform_device *plat_dev; + union acpi_object *str; + acpi_status status; + int result; + + status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer); + if (ACPI_FAILURE(status)) + return -ENODEV; + + str = buffer.pointer; + result = utf16s_to_utf8s((wchar_t *)str->string.pointer, + str->string.length, UTF16_LITTLE_ENDIAN, + str_buf, sizeof(str_buf) - 1); + str_buf[result] = 0; + + if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) { + kfree(buffer.pointer); + return -ENODEV; + } + kfree(buffer.pointer); + + status = acpi_walk_resources(device->handle, METHOD_NAME__CRS, + fjes_get_acpi_resource, fjes_resource); + if (ACPI_FAILURE(status)) + return -ENODEV; + + /* create platform_device */ + plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource, + ARRAY_SIZE(fjes_resource)); + device->driver_data = plat_dev; + + return 0; +} + +static int fjes_acpi_remove(struct acpi_device *device) +{ + struct platform_device *plat_dev; + + plat_dev = (struct platform_device *)acpi_driver_data(device); + platform_device_unregister(plat_dev); + + return 0; +} + +static acpi_status +fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data) +{ + struct acpi_resource_address32 *addr; + struct acpi_resource_irq *irq; + struct resource *res = data; + + switch (acpi_res->type) { + case ACPI_RESOURCE_TYPE_ADDRESS32: + addr = &acpi_res->data.address32; + res[0].start = addr->address.minimum; + res[0].end = addr->address.minimum + + addr->address.address_length - 1; + break; + + case ACPI_RESOURCE_TYPE_IRQ: + irq = &acpi_res->data.irq; + if (irq->interrupt_count != 1) + return AE_ERROR; + res[1].start = irq->interrupts[0]; + res[1].end = irq->interrupts[0]; + break; + + default: + break; + } + + return AE_OK; +} + +static int fjes_request_irq(struct fjes_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int result = -1; + + adapter->interrupt_watch_enable = true; + if (!delayed_work_pending(&adapter->interrupt_watch_task)) { + queue_delayed_work(adapter->control_wq, + &adapter->interrupt_watch_task, + FJES_IRQ_WATCH_DELAY); + } + + if (!adapter->irq_registered) { + result = request_irq(adapter->hw.hw_res.irq, fjes_intr, + IRQF_SHARED, netdev->name, adapter); + if (result) + adapter->irq_registered = false; + else + adapter->irq_registered = true; + } + + return result; +} + +static void fjes_free_irq(struct fjes_adapter *adapter) +{ + struct fjes_hw *hw = &adapter->hw; + + adapter->interrupt_watch_enable = false; + cancel_delayed_work_sync(&adapter->interrupt_watch_task); + + fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true); + + if (adapter->irq_registered) { + free_irq(adapter->hw.hw_res.irq, adapter); + adapter->irq_registered = false; + } +} + +static const struct net_device_ops fjes_netdev_ops = { + .ndo_open = fjes_open, + .ndo_stop = fjes_close, + .ndo_start_xmit = fjes_xmit_frame, + .ndo_get_stats64 = fjes_get_stats64, + .ndo_change_mtu = fjes_change_mtu, + .ndo_tx_timeout = fjes_tx_retry, + .ndo_vlan_rx_add_vid = fjes_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid, +}; + +/* fjes_open - Called when a network interface is made active */ +static int fjes_open(struct net_device *netdev) +{ + struct fjes_adapter *adapter = netdev_priv(netdev); + struct fjes_hw *hw = &adapter->hw; + int result; + + if (adapter->open_guard) + return -ENXIO; + + result = fjes_setup_resources(adapter); + if (result) + goto err_setup_res; + + hw->txrx_stop_req_bit = 0; + hw->epstop_req_bit = 0; + + napi_enable(&adapter->napi); + + fjes_hw_capture_interrupt_status(hw); + + result = fjes_request_irq(adapter); + if (result) + goto err_req_irq; + + fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false); + + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + + return 0; + +err_req_irq: + fjes_free_irq(adapter); + napi_disable(&adapter->napi); + +err_setup_res: + fjes_free_resources(adapter); + return result; +} + +/* fjes_close - Disables a network interface */ +static int fjes_close(struct net_device *netdev) +{ + struct fjes_adapter *adapter = netdev_priv(netdev); + struct fjes_hw *hw = &adapter->hw; + int epidx; + + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + + fjes_hw_raise_epstop(hw); + + napi_disable(&adapter->napi); + + for (epidx = 0; epidx < hw->max_epid; epidx++) { + if (epidx == hw->my_epid) + continue; + + adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status &= + ~FJES_RX_POLL_WORK; + } + + fjes_free_irq(adapter); + + cancel_delayed_work_sync(&adapter->interrupt_watch_task); + cancel_work_sync(&adapter->unshare_watch_task); + adapter->unshare_watch_bitmask = 0; + cancel_work_sync(&adapter->raise_intr_rxdata_task); + cancel_work_sync(&adapter->tx_stall_task); + + cancel_work_sync(&hw->update_zone_task); + cancel_work_sync(&hw->epstop_task); + + fjes_hw_wait_epstop(hw); + + fjes_free_resources(adapter); + + return 0; +} + +static int fjes_setup_resources(struct fjes_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ep_share_mem_info *buf_pair; + struct fjes_hw *hw = &adapter->hw; + int result; + int epidx; + + mutex_lock(&hw->hw_info.lock); + result = fjes_hw_request_info(hw); + switch (result) { + case 0: + for (epidx = 0; epidx < hw->max_epid; epidx++) { + hw->ep_shm_info[epidx].es_status = + hw->hw_info.res_buf->info.info[epidx].es_status; + hw->ep_shm_info[epidx].zone = + hw->hw_info.res_buf->info.info[epidx].zone; + } + break; + default: + case -ENOMSG: + case -EBUSY: + adapter->force_reset = true; + + mutex_unlock(&hw->hw_info.lock); + return result; + } + mutex_unlock(&hw->hw_info.lock); + + for (epidx = 0; epidx < (hw->max_epid); epidx++) { + if ((epidx != hw->my_epid) && + (hw->ep_shm_info[epidx].es_status == + FJES_ZONING_STATUS_ENABLE)) { + fjes_hw_raise_interrupt(hw, epidx, + REG_ICTL_MASK_INFO_UPDATE); + } + } + + msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid); + + for (epidx = 0; epidx < (hw->max_epid); epidx++) { + if (epidx == hw->my_epid) + continue; + + buf_pair = &hw->ep_shm_info[epidx]; + + fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr, + netdev->mtu); + + if (fjes_hw_epid_is_same_zone(hw, epidx)) { + mutex_lock(&hw->hw_info.lock); + result = + fjes_hw_register_buff_addr(hw, epidx, buf_pair); + mutex_unlock(&hw->hw_info.lock); + + switch (result) { + case 0: + break; + case -ENOMSG: + case -EBUSY: + default: + adapter->force_reset = true; + return result; + } + } + } + + return 0; +} + +static void fjes_free_resources(struct fjes_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct fjes_device_command_param param; + struct ep_share_mem_info *buf_pair; + struct fjes_hw *hw = &adapter->hw; + bool reset_flag = false; + int result; + int epidx; + + for (epidx = 0; epidx < hw->max_epid; epidx++) { + if (epidx == hw->my_epid) + continue; + + mutex_lock(&hw->hw_info.lock); + result = fjes_hw_unregister_buff_addr(hw, epidx); + mutex_unlock(&hw->hw_info.lock); + + if (result) + reset_flag = true; + + buf_pair = &hw->ep_shm_info[epidx]; + + fjes_hw_setup_epbuf(&buf_pair->tx, + netdev->dev_addr, netdev->mtu); + + clear_bit(epidx, &hw->txrx_stop_req_bit); + } + + if (reset_flag || adapter->force_reset) { + result = fjes_hw_reset(hw); + + adapter->force_reset = false; + + if (result) + adapter->open_guard = true; + + hw->hw_info.buffer_share_bit = 0; + + memset((void *)¶m, 0, sizeof(param)); + + param.req_len = hw->hw_info.req_buf_size; + param.req_start = __pa(hw->hw_info.req_buf); + param.res_len = hw->hw_info.res_buf_size; + param.res_start = __pa(hw->hw_info.res_buf); + param.share_start = __pa(hw->hw_info.share->ep_status); + + fjes_hw_init_command_registers(hw, ¶m); + } +} + +static void fjes_tx_stall_task(struct work_struct *work) +{ + struct fjes_adapter *adapter = container_of(work, + struct fjes_adapter, tx_stall_task); + struct net_device *netdev = adapter->netdev; + struct fjes_hw *hw = &adapter->hw; + int all_queue_available, sendable; + enum ep_partner_status pstatus; + int max_epid, my_epid, epid; + union ep_buffer_info *info; + int i; + + if (((long)jiffies - + (long)(netdev->trans_start)) > FJES_TX_TX_STALL_TIMEOUT) { + netif_wake_queue(netdev); + return; + } + + my_epid = hw->my_epid; + max_epid = hw->max_epid; + + for (i = 0; i < 5; i++) { + all_queue_available = 1; + + for (epid = 0; epid < max_epid; epid++) { + if (my_epid == epid) + continue; + + pstatus = fjes_hw_get_partner_ep_status(hw, epid); + sendable = (pstatus == EP_PARTNER_SHARED); + if (!sendable) + continue; + + info = adapter->hw.ep_shm_info[epid].tx.info; + + if (EP_RING_FULL(info->v1i.head, info->v1i.tail, + info->v1i.count_max)) { + all_queue_available = 0; + break; + } + } + + if (all_queue_available) { + netif_wake_queue(netdev); + return; + } + } + + usleep_range(50, 100); + + queue_work(adapter->txrx_wq, &adapter->tx_stall_task); +} + +static void fjes_force_close_task(struct work_struct *work) +{ + struct fjes_adapter *adapter = container_of(work, + struct fjes_adapter, force_close_task); + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + dev_close(netdev); + rtnl_unlock(); +} + +static void fjes_raise_intr_rxdata_task(struct work_struct *work) +{ + struct fjes_adapter *adapter = container_of(work, + struct fjes_adapter, raise_intr_rxdata_task); + struct fjes_hw *hw = &adapter->hw; + enum ep_partner_status pstatus; + int max_epid, my_epid, epid; + + my_epid = hw->my_epid; + max_epid = hw->max_epid; + + for (epid = 0; epid < max_epid; epid++) + hw->ep_shm_info[epid].tx_status_work = 0; + + for (epid = 0; epid < max_epid; epid++) { + if (epid == my_epid) + continue; + + pstatus = fjes_hw_get_partner_ep_status(hw, epid); + if (pstatus == EP_PARTNER_SHARED) { + hw->ep_shm_info[epid].tx_status_work = + hw->ep_shm_info[epid].tx.info->v1i.tx_status; + + if (hw->ep_shm_info[epid].tx_status_work == + FJES_TX_DELAY_SEND_PENDING) { + hw->ep_shm_info[epid].tx.info->v1i.tx_status = + FJES_TX_DELAY_SEND_NONE; + } + } + } + + for (epid = 0; epid < max_epid; epid++) { + if (epid == my_epid) + continue; + + pstatus = fjes_hw_get_partner_ep_status(hw, epid); + if ((hw->ep_shm_info[epid].tx_status_work == + FJES_TX_DELAY_SEND_PENDING) && + (pstatus == EP_PARTNER_SHARED) && + !(hw->ep_shm_info[epid].rx.info->v1i.rx_status)) { + fjes_hw_raise_interrupt(hw, epid, + REG_ICTL_MASK_RX_DATA); + } + } + + usleep_range(500, 1000); +} + +static int fjes_tx_send(struct fjes_adapter *adapter, int dest, + void *data, size_t len) +{ + int retval; + + retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx, + data, len); + if (retval) + return retval; + + adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status = + FJES_TX_DELAY_SEND_PENDING; + if (!work_pending(&adapter->raise_intr_rxdata_task)) + queue_work(adapter->txrx_wq, + &adapter->raise_intr_rxdata_task); + + retval = 0; + return retval; +} + +static netdev_tx_t +fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct fjes_adapter *adapter = netdev_priv(netdev); + struct fjes_hw *hw = &adapter->hw; + + int max_epid, my_epid, dest_epid; + enum ep_partner_status pstatus; + struct netdev_queue *cur_queue; + char shortpkt[VLAN_ETH_HLEN]; + bool is_multi, vlan; + struct ethhdr *eth; + u16 queue_no = 0; + u16 vlan_id = 0; + netdev_tx_t ret; + char *data; + int len; + + ret = NETDEV_TX_OK; + is_multi = false; + cur_queue = netdev_get_tx_queue(netdev, queue_no); + + eth = (struct ethhdr *)skb->data; + my_epid = hw->my_epid; + + vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false; + + data = skb->data; + len = skb->len; + + if (is_multicast_ether_addr(eth->h_dest)) { + dest_epid = 0; + max_epid = hw->max_epid; + is_multi = true; + } else if (is_local_ether_addr(eth->h_dest)) { + dest_epid = eth->h_dest[ETH_ALEN - 1]; + max_epid = dest_epid + 1; + + if ((eth->h_dest[0] == 0x02) && + (0x00 == (eth->h_dest[1] | eth->h_dest[2] | + eth->h_dest[3] | eth->h_dest[4])) && + (dest_epid < hw->max_epid)) { + ; + } else { + dest_epid = 0; + max_epid = 0; + ret = NETDEV_TX_OK; + + adapter->stats64.tx_packets += 1; + hw->ep_shm_info[my_epid].net_stats.tx_packets += 1; + adapter->stats64.tx_bytes += len; + hw->ep_shm_info[my_epid].net_stats.tx_bytes += len; + } + } else { + dest_epid = 0; + max_epid = 0; + ret = NETDEV_TX_OK; + + adapter->stats64.tx_packets += 1; + hw->ep_shm_info[my_epid].net_stats.tx_packets += 1; + adapter->stats64.tx_bytes += len; + hw->ep_shm_info[my_epid].net_stats.tx_bytes += len; + } + + for (; dest_epid < max_epid; dest_epid++) { + if (my_epid == dest_epid) + continue; + + pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid); + if (pstatus != EP_PARTNER_SHARED) { + ret = NETDEV_TX_OK; + } else if (!fjes_hw_check_epbuf_version( + &adapter->hw.ep_shm_info[dest_epid].rx, 0)) { + /* version is NOT 0 */ + adapter->stats64.tx_carrier_errors += 1; + hw->ep_shm_info[my_epid].net_stats + .tx_carrier_errors += 1; + + ret = NETDEV_TX_OK; + } else if (!fjes_hw_check_mtu( + &adapter->hw.ep_shm_info[dest_epid].rx, + netdev->mtu)) { + adapter->stats64.tx_dropped += 1; + hw->ep_shm_info[my_epid].net_stats.tx_dropped += 1; + adapter->stats64.tx_errors += 1; + hw->ep_shm_info[my_epid].net_stats.tx_errors += 1; + + ret = NETDEV_TX_OK; + } else if (vlan && + !fjes_hw_check_vlan_id( + &adapter->hw.ep_shm_info[dest_epid].rx, + vlan_id)) { + ret = NETDEV_TX_OK; + } else { + if (len < VLAN_ETH_HLEN) { + memset(shortpkt, 0, VLAN_ETH_HLEN); + memcpy(shortpkt, skb->data, skb->len); + len = VLAN_ETH_HLEN; + data = shortpkt; + } + + if (adapter->tx_retry_count == 0) { + adapter->tx_start_jiffies = jiffies; + adapter->tx_retry_count = 1; + } else { + adapter->tx_retry_count++; + } + + if (fjes_tx_send(adapter, dest_epid, data, len)) { + if (is_multi) { + ret = NETDEV_TX_OK; + } else if ( + ((long)jiffies - + (long)adapter->tx_start_jiffies) >= + FJES_TX_RETRY_TIMEOUT) { + adapter->stats64.tx_fifo_errors += 1; + hw->ep_shm_info[my_epid].net_stats + .tx_fifo_errors += 1; + adapter->stats64.tx_errors += 1; + hw->ep_shm_info[my_epid].net_stats + .tx_errors += 1; + + ret = NETDEV_TX_OK; + } else { + netdev->trans_start = jiffies; + netif_tx_stop_queue(cur_queue); + + if (!work_pending(&adapter->tx_stall_task)) + queue_work(adapter->txrx_wq, + &adapter->tx_stall_task); + + ret = NETDEV_TX_BUSY; + } + } else { + if (!is_multi) { + adapter->stats64.tx_packets += 1; + hw->ep_shm_info[my_epid].net_stats + .tx_packets += 1; + adapter->stats64.tx_bytes += len; + hw->ep_shm_info[my_epid].net_stats + .tx_bytes += len; + } + + adapter->tx_retry_count = 0; + ret = NETDEV_TX_OK; + } + } + } + + if (ret == NETDEV_TX_OK) { + dev_kfree_skb(skb); + if (is_multi) { + adapter->stats64.tx_packets += 1; + hw->ep_shm_info[my_epid].net_stats.tx_packets += 1; + adapter->stats64.tx_bytes += 1; + hw->ep_shm_info[my_epid].net_stats.tx_bytes += len; + } + } + + return ret; +} + +static void fjes_tx_retry(struct net_device *netdev) +{ + struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0); + + netif_tx_wake_queue(queue); +} + +static struct rtnl_link_stats64 * +fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct fjes_adapter *adapter = netdev_priv(netdev); + + memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64)); + + return stats; +} + +static int fjes_change_mtu(struct net_device *netdev, int new_mtu) +{ + bool running = netif_running(netdev); + int ret = 0; + int idx; + + for (idx = 0; fjes_support_mtu[idx] != 0; idx++) { + if (new_mtu <= fjes_support_mtu[idx]) { + new_mtu = fjes_support_mtu[idx]; + if (new_mtu == netdev->mtu) + return 0; + + if (running) + fjes_close(netdev); + + netdev->mtu = new_mtu; + + if (running) + ret = fjes_open(netdev); + + return ret; + } + } + + return -EINVAL; +} + +static int fjes_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct fjes_adapter *adapter = netdev_priv(netdev); + bool ret = true; + int epid; + + for (epid = 0; epid < adapter->hw.max_epid; epid++) { + if (epid == adapter->hw.my_epid) + continue; + + if (!fjes_hw_check_vlan_id( + &adapter->hw.ep_shm_info[epid].tx, vid)) + ret = fjes_hw_set_vlan_id( + &adapter->hw.ep_shm_info[epid].tx, vid); + } + + return ret ? 0 : -ENOSPC; +} + +static int fjes_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct fjes_adapter *adapter = netdev_priv(netdev); + int epid; + + for (epid = 0; epid < adapter->hw.max_epid; epid++) { + if (epid == adapter->hw.my_epid) + continue; + + fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid); + } + + return 0; +} + +static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter, + int src_epid) +{ + struct fjes_hw *hw = &adapter->hw; + enum ep_partner_status status; + + status = fjes_hw_get_partner_ep_status(hw, src_epid); + switch (status) { + case EP_PARTNER_UNSHARE: + case EP_PARTNER_COMPLETE: + default: + break; + case EP_PARTNER_WAITING: + if (src_epid < hw->my_epid) { + hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |= + FJES_RX_STOP_REQ_DONE; + + clear_bit(src_epid, &hw->txrx_stop_req_bit); + set_bit(src_epid, &adapter->unshare_watch_bitmask); + + if (!work_pending(&adapter->unshare_watch_task)) + queue_work(adapter->control_wq, + &adapter->unshare_watch_task); + } + break; + case EP_PARTNER_SHARED: + if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status & + FJES_RX_STOP_REQ_REQUEST) { + set_bit(src_epid, &hw->epstop_req_bit); + if (!work_pending(&hw->epstop_task)) + queue_work(adapter->control_wq, + &hw->epstop_task); + } + break; + } +} + +static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid) +{ + struct fjes_hw *hw = &adapter->hw; + enum ep_partner_status status; + + set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit); + + status = fjes_hw_get_partner_ep_status(hw, src_epid); + switch (status) { + case EP_PARTNER_WAITING: + hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |= + FJES_RX_STOP_REQ_DONE; + clear_bit(src_epid, &hw->txrx_stop_req_bit); + /* fall through */ + case EP_PARTNER_UNSHARE: + case EP_PARTNER_COMPLETE: + default: + set_bit(src_epid, &adapter->unshare_watch_bitmask); + if (!work_pending(&adapter->unshare_watch_task)) + queue_work(adapter->control_wq, + &adapter->unshare_watch_task); + break; + case EP_PARTNER_SHARED: + set_bit(src_epid, &hw->epstop_req_bit); + + if (!work_pending(&hw->epstop_task)) + queue_work(adapter->control_wq, &hw->epstop_task); + break; + } +} + +static void fjes_update_zone_irq(struct fjes_adapter *adapter, + int src_epid) +{ + struct fjes_hw *hw = &adapter->hw; + + if (!work_pending(&hw->update_zone_task)) + queue_work(adapter->control_wq, &hw->update_zone_task); +} + +static irqreturn_t fjes_intr(int irq, void *data) +{ + struct fjes_adapter *adapter = data; + struct fjes_hw *hw = &adapter->hw; + irqreturn_t ret; + u32 icr; + + icr = fjes_hw_capture_interrupt_status(hw); + + if (icr & REG_IS_MASK_IS_ASSERT) { + if (icr & REG_ICTL_MASK_RX_DATA) + fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID); + + if (icr & REG_ICTL_MASK_DEV_STOP_REQ) + fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID); + + if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) + fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID); + + if (icr & REG_ICTL_MASK_TXRX_STOP_DONE) + fjes_hw_set_irqmask(hw, + REG_ICTL_MASK_TXRX_STOP_DONE, true); + + if (icr & REG_ICTL_MASK_INFO_UPDATE) + fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID); + + ret = IRQ_HANDLED; + } else { + ret = IRQ_NONE; + } + + return ret; +} + +static int fjes_rxframe_search_exist(struct fjes_adapter *adapter, + int start_epid) +{ + struct fjes_hw *hw = &adapter->hw; + enum ep_partner_status pstatus; + int max_epid, cur_epid; + int i; + + max_epid = hw->max_epid; + start_epid = (start_epid + 1 + max_epid) % max_epid; + + for (i = 0; i < max_epid; i++) { + cur_epid = (start_epid + i) % max_epid; + if (cur_epid == hw->my_epid) + continue; + + pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid); + if (pstatus == EP_PARTNER_SHARED) { + if (!fjes_hw_epbuf_rx_is_empty( + &hw->ep_shm_info[cur_epid].rx)) + return cur_epid; + } + } + return -1; +} + +static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize, + int *cur_epid) +{ + void *frame; + + *cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid); + if (*cur_epid < 0) + return NULL; + + frame = + fjes_hw_epbuf_rx_curpkt_get_addr( + &adapter->hw.ep_shm_info[*cur_epid].rx, psize); + + return frame; +} + +static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid) +{ + fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx); +} + +static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid) +{ + struct fjes_hw *hw = &adapter->hw; + + fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true); + + adapter->unset_rx_last = true; + napi_schedule(&adapter->napi); +} + +static int fjes_poll(struct napi_struct *napi, int budget) +{ + struct fjes_adapter *adapter = + container_of(napi, struct fjes_adapter, napi); + struct net_device *netdev = napi->dev; + struct fjes_hw *hw = &adapter->hw; + struct sk_buff *skb; + int work_done = 0; + int cur_epid = 0; + int epidx; + size_t frame_len; + void *frame; + + for (epidx = 0; epidx < hw->max_epid; epidx++) { + if (epidx == hw->my_epid) + continue; + + adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status |= + FJES_RX_POLL_WORK; + } + + while (work_done < budget) { + prefetch(&adapter->hw); + frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid); + + if (frame) { + skb = napi_alloc_skb(napi, frame_len); + if (!skb) { + adapter->stats64.rx_dropped += 1; + hw->ep_shm_info[cur_epid].net_stats + .rx_dropped += 1; + adapter->stats64.rx_errors += 1; + hw->ep_shm_info[cur_epid].net_stats + .rx_errors += 1; + } else { + memcpy(skb_put(skb, frame_len), + frame, frame_len); + skb->protocol = eth_type_trans(skb, netdev); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + netif_receive_skb(skb); + + work_done++; + + adapter->stats64.rx_packets += 1; + hw->ep_shm_info[cur_epid].net_stats + .rx_packets += 1; + adapter->stats64.rx_bytes += frame_len; + hw->ep_shm_info[cur_epid].net_stats + .rx_bytes += frame_len; + + if (is_multicast_ether_addr( + ((struct ethhdr *)frame)->h_dest)) { + adapter->stats64.multicast += 1; + hw->ep_shm_info[cur_epid].net_stats + .multicast += 1; + } + } + + fjes_rxframe_release(adapter, cur_epid); + adapter->unset_rx_last = true; + } else { + break; + } + } + + if (work_done < budget) { + napi_complete(napi); + + if (adapter->unset_rx_last) { + adapter->rx_last_jiffies = jiffies; + adapter->unset_rx_last = false; + } + + if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) { + napi_reschedule(napi); + } else { + for (epidx = 0; epidx < hw->max_epid; epidx++) { + if (epidx == hw->my_epid) + continue; + adapter->hw.ep_shm_info[epidx] + .tx.info->v1i.rx_status &= + ~FJES_RX_POLL_WORK; + } + + fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false); + } + } + + return work_done; +} + +/* fjes_probe - Device Initialization Routine */ +static int fjes_probe(struct platform_device *plat_dev) +{ + struct fjes_adapter *adapter; + struct net_device *netdev; + struct resource *res; + struct fjes_hw *hw; + int err; + + err = -ENOMEM; + netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d", + NET_NAME_UNKNOWN, fjes_netdev_setup, + FJES_MAX_QUEUES); + + if (!netdev) + goto err_out; + + SET_NETDEV_DEV(netdev, &plat_dev->dev); + + dev_set_drvdata(&plat_dev->dev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->plat_dev = plat_dev; + hw = &adapter->hw; + hw->back = adapter; + + /* setup the private structure */ + err = fjes_sw_init(adapter); + if (err) + goto err_free_netdev; + + INIT_WORK(&adapter->force_close_task, fjes_force_close_task); + adapter->force_reset = false; + adapter->open_guard = false; + + adapter->txrx_wq = create_workqueue(DRV_NAME "/txrx"); + adapter->control_wq = create_workqueue(DRV_NAME "/control"); + + INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task); + INIT_WORK(&adapter->raise_intr_rxdata_task, + fjes_raise_intr_rxdata_task); + INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task); + adapter->unshare_watch_bitmask = 0; + + INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task); + adapter->interrupt_watch_enable = false; + + res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0); + hw->hw_res.start = res->start; + hw->hw_res.size = res->end - res->start + 1; + hw->hw_res.irq = platform_get_irq(plat_dev, 0); + err = fjes_hw_init(&adapter->hw); + if (err) + goto err_free_netdev; + + /* setup MAC address (02:00:00:00:00:[epid])*/ + netdev->dev_addr[0] = 2; + netdev->dev_addr[1] = 0; + netdev->dev_addr[2] = 0; + netdev->dev_addr[3] = 0; + netdev->dev_addr[4] = 0; + netdev->dev_addr[5] = hw->my_epid; /* EPID */ + + err = register_netdev(netdev); + if (err) + goto err_hw_exit; + + netif_carrier_off(netdev); + + return 0; + +err_hw_exit: + fjes_hw_exit(&adapter->hw); +err_free_netdev: + free_netdev(netdev); +err_out: + return err; +} + +/* fjes_remove - Device Removal Routine */ +static int fjes_remove(struct platform_device *plat_dev) +{ + struct net_device *netdev = dev_get_drvdata(&plat_dev->dev); + struct fjes_adapter *adapter = netdev_priv(netdev); + struct fjes_hw *hw = &adapter->hw; + + cancel_delayed_work_sync(&adapter->interrupt_watch_task); + cancel_work_sync(&adapter->unshare_watch_task); + cancel_work_sync(&adapter->raise_intr_rxdata_task); + cancel_work_sync(&adapter->tx_stall_task); + if (adapter->control_wq) + destroy_workqueue(adapter->control_wq); + if (adapter->txrx_wq) + destroy_workqueue(adapter->txrx_wq); + + unregister_netdev(netdev); + + fjes_hw_exit(hw); + + netif_napi_del(&adapter->napi); + + free_netdev(netdev); + + return 0; +} + +static int fjes_sw_init(struct fjes_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + netif_napi_add(netdev, &adapter->napi, fjes_poll, 64); + + return 0; +} + +/* fjes_netdev_setup - netdevice initialization routine */ +static void fjes_netdev_setup(struct net_device *netdev) +{ + ether_setup(netdev); + + netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL; + netdev->netdev_ops = &fjes_netdev_ops; + fjes_set_ethtool_ops(netdev); + netdev->mtu = fjes_support_mtu[0]; + netdev->flags |= IFF_BROADCAST; + netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER; +} + +static void fjes_irq_watch_task(struct work_struct *work) +{ + struct fjes_adapter *adapter = container_of(to_delayed_work(work), + struct fjes_adapter, interrupt_watch_task); + + local_irq_disable(); + fjes_intr(adapter->hw.hw_res.irq, adapter); + local_irq_enable(); + + if (fjes_rxframe_search_exist(adapter, 0) >= 0) + napi_schedule(&adapter->napi); + + if (adapter->interrupt_watch_enable) { + if (!delayed_work_pending(&adapter->interrupt_watch_task)) + queue_delayed_work(adapter->control_wq, + &adapter->interrupt_watch_task, + FJES_IRQ_WATCH_DELAY); + } +} + +static void fjes_watch_unshare_task(struct work_struct *work) +{ + struct fjes_adapter *adapter = + container_of(work, struct fjes_adapter, unshare_watch_task); + + struct net_device *netdev = adapter->netdev; + struct fjes_hw *hw = &adapter->hw; + + int unshare_watch, unshare_reserve; + int max_epid, my_epid, epidx; + int stop_req, stop_req_done; + ulong unshare_watch_bitmask; + int wait_time = 0; + int is_shared; + int ret; + + my_epid = hw->my_epid; + max_epid = hw->max_epid; + + unshare_watch_bitmask = adapter->unshare_watch_bitmask; + adapter->unshare_watch_bitmask = 0; + + while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) && + (wait_time < 3000)) { + for (epidx = 0; epidx < hw->max_epid; epidx++) { + if (epidx == hw->my_epid) + continue; + + is_shared = fjes_hw_epid_is_shared(hw->hw_info.share, + epidx); + + stop_req = test_bit(epidx, &hw->txrx_stop_req_bit); + + stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status & + FJES_RX_STOP_REQ_DONE; + + unshare_watch = test_bit(epidx, &unshare_watch_bitmask); + + unshare_reserve = test_bit(epidx, + &hw->hw_info.buffer_unshare_reserve_bit); + + if ((!stop_req || + (is_shared && (!is_shared || !stop_req_done))) && + (is_shared || !unshare_watch || !unshare_reserve)) + continue; + + mutex_lock(&hw->hw_info.lock); + ret = fjes_hw_unregister_buff_addr(hw, epidx); + switch (ret) { + case 0: + break; + case -ENOMSG: + case -EBUSY: + default: + if (!work_pending( + &adapter->force_close_task)) { + adapter->force_reset = true; + schedule_work( + &adapter->force_close_task); + } + break; + } + mutex_unlock(&hw->hw_info.lock); + + fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx, + netdev->dev_addr, netdev->mtu); + + clear_bit(epidx, &hw->txrx_stop_req_bit); + clear_bit(epidx, &unshare_watch_bitmask); + clear_bit(epidx, + &hw->hw_info.buffer_unshare_reserve_bit); + } + + msleep(100); + wait_time += 100; + } + + if (hw->hw_info.buffer_unshare_reserve_bit) { + for (epidx = 0; epidx < hw->max_epid; epidx++) { + if (epidx == hw->my_epid) + continue; + + if (test_bit(epidx, + &hw->hw_info.buffer_unshare_reserve_bit)) { + mutex_lock(&hw->hw_info.lock); + + ret = fjes_hw_unregister_buff_addr(hw, epidx); + switch (ret) { + case 0: + break; + case -ENOMSG: + case -EBUSY: + default: + if (!work_pending( + &adapter->force_close_task)) { + adapter->force_reset = true; + schedule_work( + &adapter->force_close_task); + } + break; + } + mutex_unlock(&hw->hw_info.lock); + + fjes_hw_setup_epbuf( + &hw->ep_shm_info[epidx].tx, + netdev->dev_addr, netdev->mtu); + + clear_bit(epidx, &hw->txrx_stop_req_bit); + clear_bit(epidx, &unshare_watch_bitmask); + clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit); + } + + if (test_bit(epidx, &unshare_watch_bitmask)) { + hw->ep_shm_info[epidx].tx.info->v1i.rx_status &= + ~FJES_RX_STOP_REQ_DONE; + } + } + } +} + +/* fjes_init_module - Driver Registration Routine */ +static int __init fjes_init_module(void) +{ + int result; + + pr_info("%s - version %s - %s\n", + fjes_driver_string, fjes_driver_version, fjes_copyright); + + result = platform_driver_register(&fjes_driver); + if (result < 0) + return result; + + result = acpi_bus_register_driver(&fjes_acpi_driver); + if (result < 0) + goto fail_acpi_driver; + + return 0; + +fail_acpi_driver: + platform_driver_unregister(&fjes_driver); + return result; +} + +module_init(fjes_init_module); + +/* fjes_exit_module - Driver Exit Cleanup Routine */ +static void __exit fjes_exit_module(void) +{ + acpi_bus_unregister_driver(&fjes_acpi_driver); + platform_driver_unregister(&fjes_driver); +} + +module_exit(fjes_exit_module); diff --git a/drivers/net/fjes/fjes_regs.h b/drivers/net/fjes/fjes_regs.h new file mode 100644 index 000000000..029c924dc --- /dev/null +++ b/drivers/net/fjes/fjes_regs.h @@ -0,0 +1,142 @@ +/* + * FUJITSU Extended Socket Network Device driver + * Copyright (c) 2015 FUJITSU LIMITED + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#ifndef FJES_REGS_H_ +#define FJES_REGS_H_ + +#include + +#define XSCT_DEVICE_REGISTER_SIZE 0x1000 + +/* register offset */ +/* Information registers */ +#define XSCT_OWNER_EPID 0x0000 /* Owner EPID */ +#define XSCT_MAX_EP 0x0004 /* Maximum EP */ + +/* Device Control registers */ +#define XSCT_DCTL 0x0010 /* Device Control */ + +/* Command Control registers */ +#define XSCT_CR 0x0020 /* Command request */ +#define XSCT_CS 0x0024 /* Command status */ +#define XSCT_SHSTSAL 0x0028 /* Share status address Low */ +#define XSCT_SHSTSAH 0x002C /* Share status address High */ + +#define XSCT_REQBL 0x0034 /* Request Buffer length */ +#define XSCT_REQBAL 0x0038 /* Request Buffer Address Low */ +#define XSCT_REQBAH 0x003C /* Request Buffer Address High */ + +#define XSCT_RESPBL 0x0044 /* Response Buffer Length */ +#define XSCT_RESPBAL 0x0048 /* Response Buffer Address Low */ +#define XSCT_RESPBAH 0x004C /* Response Buffer Address High */ + +/* Interrupt Control registers */ +#define XSCT_IS 0x0080 /* Interrupt status */ +#define XSCT_IMS 0x0084 /* Interrupt mask set */ +#define XSCT_IMC 0x0088 /* Interrupt mask clear */ +#define XSCT_IG 0x008C /* Interrupt generator */ +#define XSCT_ICTL 0x0090 /* Interrupt control */ + +/* register structure */ +/* Information registers */ +union REG_OWNER_EPID { + struct { + __le32 epid:16; + __le32:16; + } bits; + __le32 reg; +}; + +union REG_MAX_EP { + struct { + __le32 maxep:16; + __le32:16; + } bits; + __le32 reg; +}; + +/* Device Control registers */ +union REG_DCTL { + struct { + __le32 reset:1; + __le32 rsv0:15; + __le32 rsv1:16; + } bits; + __le32 reg; +}; + +/* Command Control registers */ +union REG_CR { + struct { + __le32 req_code:16; + __le32 err_info:14; + __le32 error:1; + __le32 req_start:1; + } bits; + __le32 reg; +}; + +union REG_CS { + struct { + __le32 req_code:16; + __le32 rsv0:14; + __le32 busy:1; + __le32 complete:1; + } bits; + __le32 reg; +}; + +/* Interrupt Control registers */ +union REG_ICTL { + struct { + __le32 automak:1; + __le32 rsv0:31; + } bits; + __le32 reg; +}; + +enum REG_ICTL_MASK { + REG_ICTL_MASK_INFO_UPDATE = 1 << 20, + REG_ICTL_MASK_DEV_STOP_REQ = 1 << 19, + REG_ICTL_MASK_TXRX_STOP_REQ = 1 << 18, + REG_ICTL_MASK_TXRX_STOP_DONE = 1 << 17, + REG_ICTL_MASK_RX_DATA = 1 << 16, + REG_ICTL_MASK_ALL = GENMASK(20, 16), +}; + +enum REG_IS_MASK { + REG_IS_MASK_IS_ASSERT = 1 << 31, + REG_IS_MASK_EPID = GENMASK(15, 0), +}; + +struct fjes_hw; + +u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg); + +#define wr32(reg, val) \ +do { \ + u8 *base = hw->base; \ + writel((val), &base[(reg)]); \ +} while (0) + +#define rd32(reg) (fjes_hw_rd32(hw, reg)) + +#endif /* FJES_REGS_H_ */ diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 78d49d186..445071c16 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -15,8 +15,11 @@ #include #include #include +#include +#include #include #include +#include #define GENEVE_NETDEV_VER "0.6" @@ -32,12 +35,17 @@ static bool log_ecn_error = true; module_param(log_ecn_error, bool, 0644); MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); +#define GENEVE_VER 0 +#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr)) + /* per-network namespace private data for this module */ struct geneve_net { - struct list_head geneve_list; - struct hlist_head vni_list[VNI_HASH_SIZE]; + struct list_head geneve_list; + struct list_head sock_list; }; +static int geneve_net_id; + /* Pseudo network device */ struct geneve_dev { struct hlist_node hlist; /* vni hash table */ @@ -49,9 +57,20 @@ struct geneve_dev { u8 tos; /* TOS override */ struct sockaddr_in remote; /* IPv4 address for link partner */ struct list_head next; /* geneve's per namespace list */ + __be16 dst_port; + bool collect_md; + struct gro_cells gro_cells; }; -static int geneve_net_id; +struct geneve_sock { + bool collect_md; + struct list_head list; + struct socket *sock; + struct rcu_head rcu; + int refcnt; + struct udp_offload udp_offloads; + struct hlist_head vni_list[VNI_HASH_SIZE]; +}; static inline __u32 geneve_net_vni_hash(u8 vni[3]) { @@ -61,53 +80,105 @@ static inline __u32 geneve_net_vni_hash(u8 vni[3]) return hash_32(vnid, VNI_HASH_BITS); } +static __be64 vni_to_tunnel_id(const __u8 *vni) +{ +#ifdef __BIG_ENDIAN + return (vni[0] << 16) | (vni[1] << 8) | vni[2]; +#else + return (__force __be64)(((__force u64)vni[0] << 40) | + ((__force u64)vni[1] << 48) | + ((__force u64)vni[2] << 56)); +#endif +} + +static struct geneve_dev *geneve_lookup(struct geneve_sock *gs, + __be32 addr, u8 vni[]) +{ + struct hlist_head *vni_list_head; + struct geneve_dev *geneve; + __u32 hash; + + /* Find the device for this VNI */ + hash = geneve_net_vni_hash(vni); + vni_list_head = &gs->vni_list[hash]; + hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) { + if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) && + addr == geneve->remote.sin_addr.s_addr) + return geneve; + } + return NULL; +} + +static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb) +{ + return (struct genevehdr *)(udp_hdr(skb) + 1); +} + /* geneve receive/decap routine */ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) { struct genevehdr *gnvh = geneve_hdr(skb); - struct geneve_dev *dummy, *geneve = NULL; - struct geneve_net *gn; - struct iphdr *iph = NULL; + struct metadata_dst *tun_dst = NULL; + struct geneve_dev *geneve = NULL; struct pcpu_sw_netstats *stats; - struct hlist_head *vni_list_head; - int err = 0; - __u32 hash; + struct iphdr *iph; + u8 *vni; + __be32 addr; + int err; - iph = ip_hdr(skb); /* Still outer IP header... */ + iph = ip_hdr(skb); /* outer IP header... */ - gn = gs->rcv_data; + if (gs->collect_md) { + static u8 zero_vni[3]; - /* Find the device for this VNI */ - hash = geneve_net_vni_hash(gnvh->vni); - vni_list_head = &gn->vni_list[hash]; - hlist_for_each_entry_rcu(dummy, vni_list_head, hlist) { - if (!memcmp(gnvh->vni, dummy->vni, sizeof(dummy->vni)) && - iph->saddr == dummy->remote.sin_addr.s_addr) { - geneve = dummy; - break; - } + vni = zero_vni; + addr = 0; + } else { + vni = gnvh->vni; + addr = iph->saddr; } + + geneve = geneve_lookup(gs, addr, vni); if (!geneve) goto drop; - /* Drop packets w/ critical options, - * since we don't support any... - */ - if (gnvh->critical) - goto drop; + if (ip_tunnel_collect_metadata() || gs->collect_md) { + __be16 flags; + + flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT | + (gnvh->oam ? TUNNEL_OAM : 0) | + (gnvh->critical ? TUNNEL_CRIT_OPT : 0); + + tun_dst = udp_tun_rx_dst(skb, AF_INET, flags, + vni_to_tunnel_id(gnvh->vni), + gnvh->opt_len * 4); + if (!tun_dst) + goto drop; + /* Update tunnel dst according to Geneve options. */ + ip_tunnel_info_opts_set(&tun_dst->u.tun_info, + gnvh->options, gnvh->opt_len * 4); + } else { + /* Drop packets w/ critical options, + * since we don't support any... + */ + if (gnvh->critical) + goto drop; + } skb_reset_mac_header(skb); skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev))); skb->protocol = eth_type_trans(skb, geneve->dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); + if (tun_dst) + skb_dst_set(skb, &tun_dst->dst); + /* Ignore packet loops (and multicast echo) */ if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) goto drop; skb_reset_network_header(skb); - iph = ip_hdr(skb); /* Now inner IP header... */ err = IP_ECN_decapsulate(iph, skb); if (unlikely(err)) { @@ -127,8 +198,7 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) stats->rx_bytes += skb->len; u64_stats_update_end(&stats->syncp); - netif_rx(skb); - + gro_cells_receive(&geneve->gro_cells, skb); return; drop: /* Consume bad packet */ @@ -138,32 +208,305 @@ drop: /* Setup stats when device is created */ static int geneve_init(struct net_device *dev) { + struct geneve_dev *geneve = netdev_priv(dev); + int err; + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; + err = gro_cells_init(&geneve->gro_cells, dev); + if (err) { + free_percpu(dev->tstats); + return err; + } + return 0; } static void geneve_uninit(struct net_device *dev) { + struct geneve_dev *geneve = netdev_priv(dev); + + gro_cells_destroy(&geneve->gro_cells); free_percpu(dev->tstats); } +/* Callback from net/ipv4/udp.c to receive packets */ +static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) +{ + struct genevehdr *geneveh; + struct geneve_sock *gs; + int opts_len; + + /* Need Geneve and inner Ethernet header to be present */ + if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN))) + goto error; + + /* Return packets with reserved bits set */ + geneveh = geneve_hdr(skb); + if (unlikely(geneveh->ver != GENEVE_VER)) + goto error; + + if (unlikely(geneveh->proto_type != htons(ETH_P_TEB))) + goto error; + + opts_len = geneveh->opt_len * 4; + if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, + htons(ETH_P_TEB))) + goto drop; + + gs = rcu_dereference_sk_user_data(sk); + if (!gs) + goto drop; + + geneve_rx(gs, skb); + return 0; + +drop: + /* Consume bad packet */ + kfree_skb(skb); + return 0; + +error: + /* Let the UDP layer deal with the skb */ + return 1; +} + +static struct socket *geneve_create_sock(struct net *net, bool ipv6, + __be16 port) +{ + struct socket *sock; + struct udp_port_cfg udp_conf; + int err; + + memset(&udp_conf, 0, sizeof(udp_conf)); + + if (ipv6) { + udp_conf.family = AF_INET6; + } else { + udp_conf.family = AF_INET; + udp_conf.local_ip.s_addr = htonl(INADDR_ANY); + } + + udp_conf.local_udp_port = port; + + /* Open UDP socket */ + err = udp_sock_create(net, &udp_conf, &sock); + if (err < 0) + return ERR_PTR(err); + + return sock; +} + +static void geneve_notify_add_rx_port(struct geneve_sock *gs) +{ + struct sock *sk = gs->sock->sk; + sa_family_t sa_family = sk->sk_family; + int err; + + if (sa_family == AF_INET) { + err = udp_add_offload(&gs->udp_offloads); + if (err) + pr_warn("geneve: udp_add_offload failed with status %d\n", + err); + } +} + +static int geneve_hlen(struct genevehdr *gh) +{ + return sizeof(*gh) + gh->opt_len * 4; +} + +static struct sk_buff **geneve_gro_receive(struct sk_buff **head, + struct sk_buff *skb, + struct udp_offload *uoff) +{ + struct sk_buff *p, **pp = NULL; + struct genevehdr *gh, *gh2; + unsigned int hlen, gh_len, off_gnv; + const struct packet_offload *ptype; + __be16 type; + int flush = 1; + + off_gnv = skb_gro_offset(skb); + hlen = off_gnv + sizeof(*gh); + gh = skb_gro_header_fast(skb, off_gnv); + if (skb_gro_header_hard(skb, hlen)) { + gh = skb_gro_header_slow(skb, hlen, off_gnv); + if (unlikely(!gh)) + goto out; + } + + if (gh->ver != GENEVE_VER || gh->oam) + goto out; + gh_len = geneve_hlen(gh); + + hlen = off_gnv + gh_len; + if (skb_gro_header_hard(skb, hlen)) { + gh = skb_gro_header_slow(skb, hlen, off_gnv); + if (unlikely(!gh)) + goto out; + } + + flush = 0; + + for (p = *head; p; p = p->next) { + if (!NAPI_GRO_CB(p)->same_flow) + continue; + + gh2 = (struct genevehdr *)(p->data + off_gnv); + if (gh->opt_len != gh2->opt_len || + memcmp(gh, gh2, gh_len)) { + NAPI_GRO_CB(p)->same_flow = 0; + continue; + } + } + + type = gh->proto_type; + + rcu_read_lock(); + ptype = gro_find_receive_by_type(type); + if (!ptype) { + flush = 1; + goto out_unlock; + } + + skb_gro_pull(skb, gh_len); + skb_gro_postpull_rcsum(skb, gh, gh_len); + pp = ptype->callbacks.gro_receive(head, skb); + +out_unlock: + rcu_read_unlock(); +out: + NAPI_GRO_CB(skb)->flush |= flush; + + return pp; +} + +static int geneve_gro_complete(struct sk_buff *skb, int nhoff, + struct udp_offload *uoff) +{ + struct genevehdr *gh; + struct packet_offload *ptype; + __be16 type; + int gh_len; + int err = -ENOSYS; + + udp_tunnel_gro_complete(skb, nhoff); + + gh = (struct genevehdr *)(skb->data + nhoff); + gh_len = geneve_hlen(gh); + type = gh->proto_type; + + rcu_read_lock(); + ptype = gro_find_complete_by_type(type); + if (ptype) + err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); + + rcu_read_unlock(); + return err; +} + +/* Create new listen socket if needed */ +static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, + bool ipv6) +{ + struct geneve_net *gn = net_generic(net, geneve_net_id); + struct geneve_sock *gs; + struct socket *sock; + struct udp_tunnel_sock_cfg tunnel_cfg; + int h; + + gs = kzalloc(sizeof(*gs), GFP_KERNEL); + if (!gs) + return ERR_PTR(-ENOMEM); + + sock = geneve_create_sock(net, ipv6, port); + if (IS_ERR(sock)) { + kfree(gs); + return ERR_CAST(sock); + } + + gs->sock = sock; + gs->refcnt = 1; + for (h = 0; h < VNI_HASH_SIZE; ++h) + INIT_HLIST_HEAD(&gs->vni_list[h]); + + /* Initialize the geneve udp offloads structure */ + gs->udp_offloads.port = port; + gs->udp_offloads.callbacks.gro_receive = geneve_gro_receive; + gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete; + geneve_notify_add_rx_port(gs); + + /* Mark socket as an encapsulation socket */ + tunnel_cfg.sk_user_data = gs; + tunnel_cfg.encap_type = 1; + tunnel_cfg.encap_rcv = geneve_udp_encap_recv; + tunnel_cfg.encap_destroy = NULL; + setup_udp_tunnel_sock(net, sock, &tunnel_cfg); + list_add(&gs->list, &gn->sock_list); + return gs; +} + +static void geneve_notify_del_rx_port(struct geneve_sock *gs) +{ + struct sock *sk = gs->sock->sk; + sa_family_t sa_family = sk->sk_family; + + if (sa_family == AF_INET) + udp_del_offload(&gs->udp_offloads); +} + +static void geneve_sock_release(struct geneve_sock *gs) +{ + if (--gs->refcnt) + return; + + list_del(&gs->list); + geneve_notify_del_rx_port(gs); + udp_tunnel_sock_release(gs->sock); + kfree_rcu(gs, rcu); +} + +static struct geneve_sock *geneve_find_sock(struct geneve_net *gn, + __be16 dst_port) +{ + struct geneve_sock *gs; + + list_for_each_entry(gs, &gn->sock_list, list) { + if (inet_sk(gs->sock->sk)->inet_sport == dst_port && + inet_sk(gs->sock->sk)->sk.sk_family == AF_INET) { + return gs; + } + } + return NULL; +} + static int geneve_open(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); struct net *net = geneve->net; - struct geneve_net *gn = net_generic(geneve->net, geneve_net_id); + struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_sock *gs; + __u32 hash; + + gs = geneve_find_sock(gn, geneve->dst_port); + if (gs) { + gs->refcnt++; + goto out; + } - gs = geneve_sock_add(net, htons(GENEVE_UDP_PORT), geneve_rx, gn, - false, false); + gs = geneve_socket_create(net, geneve->dst_port, false); if (IS_ERR(gs)) return PTR_ERR(gs); +out: + gs->collect_md = geneve->collect_md; geneve->sock = gs; + hash = geneve_net_vni_hash(geneve->vni); + hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]); return 0; } @@ -172,77 +515,219 @@ static int geneve_stop(struct net_device *dev) struct geneve_dev *geneve = netdev_priv(dev); struct geneve_sock *gs = geneve->sock; + if (!hlist_unhashed(&geneve->hlist)) + hlist_del_rcu(&geneve->hlist); geneve_sock_release(gs); + return 0; +} +static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb, + __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt, + bool csum) +{ + struct genevehdr *gnvh; + int min_headroom; + int err; + + min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr); + err = skb_cow_head(skb, min_headroom); + if (unlikely(err)) { + kfree_skb(skb); + goto free_rt; + } + + skb = udp_tunnel_handle_offloads(skb, csum); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + goto free_rt; + } + + gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); + gnvh->ver = GENEVE_VER; + gnvh->opt_len = opt_len / 4; + gnvh->oam = !!(tun_flags & TUNNEL_OAM); + gnvh->critical = !!(tun_flags & TUNNEL_CRIT_OPT); + gnvh->rsvd1 = 0; + memcpy(gnvh->vni, vni, 3); + gnvh->proto_type = htons(ETH_P_TEB); + gnvh->rsvd2 = 0; + memcpy(gnvh->options, opt, opt_len); + + skb_set_inner_protocol(skb, htons(ETH_P_TEB)); return 0; + +free_rt: + ip_rt_put(rt); + return err; +} + +static struct rtable *geneve_get_rt(struct sk_buff *skb, + struct net_device *dev, + struct flowi4 *fl4, + struct ip_tunnel_info *info) +{ + struct geneve_dev *geneve = netdev_priv(dev); + struct rtable *rt = NULL; + __u8 tos; + + memset(fl4, 0, sizeof(*fl4)); + fl4->flowi4_mark = skb->mark; + fl4->flowi4_proto = IPPROTO_UDP; + + if (info) { + fl4->daddr = info->key.u.ipv4.dst; + fl4->saddr = info->key.u.ipv4.src; + fl4->flowi4_tos = RT_TOS(info->key.tos); + } else { + tos = geneve->tos; + if (tos == 1) { + const struct iphdr *iip = ip_hdr(skb); + + tos = ip_tunnel_get_dsfield(iip, skb); + } + + fl4->flowi4_tos = RT_TOS(tos); + fl4->daddr = geneve->remote.sin_addr.s_addr; + } + + rt = ip_route_output_key(geneve->net, fl4); + if (IS_ERR(rt)) { + netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr); + return ERR_PTR(-ENETUNREACH); + } + if (rt->dst.dev == dev) { /* is this necessary? */ + netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr); + ip_rt_put(rt); + return ERR_PTR(-ELOOP); + } + return rt; +} + +/* Convert 64 bit tunnel ID to 24 bit VNI. */ +static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni) +{ +#ifdef __BIG_ENDIAN + vni[0] = (__force __u8)(tun_id >> 16); + vni[1] = (__force __u8)(tun_id >> 8); + vni[2] = (__force __u8)tun_id; +#else + vni[0] = (__force __u8)((__force u64)tun_id >> 40); + vni[1] = (__force __u8)((__force u64)tun_id >> 48); + vni[2] = (__force __u8)((__force u64)tun_id >> 56); +#endif } static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); struct geneve_sock *gs = geneve->sock; + struct ip_tunnel_info *info = NULL; struct rtable *rt = NULL; const struct iphdr *iip; /* interior IP header */ + int err = -EINVAL; struct flowi4 fl4; - int err; - __be16 sport; __u8 tos, ttl; + __be16 sport; + bool udp_csum; + __be16 df; + + if (geneve->collect_md) { + info = skb_tunnel_info(skb); + if (unlikely(info && !(info->mode & IP_TUNNEL_INFO_TX))) { + netdev_dbg(dev, "no tunnel metadata\n"); + goto tx_error; + } + if (info && ip_tunnel_info_af(info) != AF_INET) + goto tx_error; + } - iip = ip_hdr(skb); - - skb_reset_mac_header(skb); - - /* TODO: port min/max limits should be configurable */ - sport = udp_flow_src_port(dev_net(dev), skb, 0, 0, true); - - tos = geneve->tos; - if (tos == 1) - tos = ip_tunnel_get_dsfield(iip, skb); - - memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_tos = RT_TOS(tos); - fl4.daddr = geneve->remote.sin_addr.s_addr; - rt = ip_route_output_key(geneve->net, &fl4); + rt = geneve_get_rt(skb, dev, &fl4, info); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr); - dev->stats.tx_carrier_errors++; + err = PTR_ERR(rt); goto tx_error; } - if (rt->dst.dev == dev) { /* is this necessary? */ - netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr); - dev->stats.collisions++; - goto rt_tx_error; - } - tos = ip_tunnel_ecn_encap(tos, iip, skb); - - ttl = geneve->ttl; - if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) - ttl = 1; - - ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); + skb_reset_mac_header(skb); - /* no need to handle local destination and encap bypass...yet... */ + iip = ip_hdr(skb); - err = geneve_xmit_skb(gs, rt, skb, fl4.saddr, fl4.daddr, - tos, ttl, 0, sport, htons(GENEVE_UDP_PORT), 0, - geneve->vni, 0, NULL, false, - !net_eq(geneve->net, dev_net(geneve->dev))); - if (err < 0) - ip_rt_put(rt); + if (info) { + const struct ip_tunnel_key *key = &info->key; + u8 *opts = NULL; + u8 vni[3]; + + tunnel_id_to_vni(key->tun_id, vni); + if (key->tun_flags & TUNNEL_GENEVE_OPT) + opts = ip_tunnel_info_opts(info); + + udp_csum = !!(key->tun_flags & TUNNEL_CSUM); + err = geneve_build_skb(rt, skb, key->tun_flags, vni, + info->options_len, opts, udp_csum); + if (unlikely(err)) + goto err; + + tos = ip_tunnel_ecn_encap(key->tos, iip, skb); + ttl = key->ttl; + df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; + } else { + udp_csum = false; + err = geneve_build_skb(rt, skb, 0, geneve->vni, + 0, NULL, udp_csum); + if (unlikely(err)) + goto err; + + tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); + ttl = geneve->ttl; + if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) + ttl = 1; + ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); + df = 0; + } + err = udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, fl4.saddr, fl4.daddr, + tos, ttl, df, sport, geneve->dst_port, + !net_eq(geneve->net, dev_net(geneve->dev)), + !udp_csum); iptunnel_xmit_stats(err, &dev->stats, dev->tstats); - return NETDEV_TX_OK; -rt_tx_error: - ip_rt_put(rt); tx_error: - dev->stats.tx_errors++; dev_kfree_skb(skb); +err: + if (err == -ELOOP) + dev->stats.collisions++; + else if (err == -ENETUNREACH) + dev->stats.tx_carrier_errors++; + else + dev->stats.tx_errors++; return NETDEV_TX_OK; } +static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) +{ + struct ip_tunnel_info *info = skb_tunnel_info(skb); + struct geneve_dev *geneve = netdev_priv(dev); + struct rtable *rt; + struct flowi4 fl4; + + if (ip_tunnel_info_af(info) != AF_INET) + return -EINVAL; + + rt = geneve_get_rt(skb, dev, &fl4, info); + if (IS_ERR(rt)) + return PTR_ERR(rt); + + ip_rt_put(rt); + info->key.u.ipv4.src = fl4.saddr; + info->key.tp_src = udp_flow_src_port(geneve->net, skb, + 1, USHRT_MAX, true); + info->key.tp_dst = geneve->dst_port; + return 0; +} + static const struct net_device_ops geneve_netdev_ops = { .ndo_init = geneve_init, .ndo_uninit = geneve_uninit, @@ -253,6 +738,7 @@ static const struct net_device_ops geneve_netdev_ops = { .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, + .ndo_fill_metadata_dst = geneve_fill_metadata_dst, }; static void geneve_get_drvinfo(struct net_device *dev, @@ -283,21 +769,17 @@ static void geneve_setup(struct net_device *dev) SET_NETDEV_DEVTYPE(dev, &geneve_type); - dev->tx_queue_len = 0; dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; dev->features |= NETIF_F_RXCSUM; dev->features |= NETIF_F_GSO_SOFTWARE; - dev->vlan_features = dev->features; - dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; - dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; dev->hw_features |= NETIF_F_GSO_SOFTWARE; - dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; netif_keep_dst(dev); - dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; + eth_hw_addr_random(dev); } static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { @@ -305,6 +787,8 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { [IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, [IFLA_GENEVE_TTL] = { .type = NLA_U8 }, [IFLA_GENEVE_TOS] = { .type = NLA_U8 }, + [IFLA_GENEVE_PORT] = { .type = NLA_U16 }, + [IFLA_GENEVE_COLLECT_METADATA] = { .type = NLA_FLAG }, }; static int geneve_validate(struct nlattr *tb[], struct nlattr *data[]) @@ -330,68 +814,117 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[]) return 0; } -static int geneve_newlink(struct net *net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[]) +static struct geneve_dev *geneve_find_dev(struct geneve_net *gn, + __be16 dst_port, + __be32 rem_addr, + u8 vni[], + bool *tun_on_same_port, + bool *tun_collect_md) +{ + struct geneve_dev *geneve, *t; + + *tun_on_same_port = false; + *tun_collect_md = false; + t = NULL; + list_for_each_entry(geneve, &gn->geneve_list, next) { + if (geneve->dst_port == dst_port) { + *tun_collect_md = geneve->collect_md; + *tun_on_same_port = true; + } + if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) && + rem_addr == geneve->remote.sin_addr.s_addr && + dst_port == geneve->dst_port) + t = geneve; + } + return t; +} + +static int geneve_configure(struct net *net, struct net_device *dev, + __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos, + __be16 dst_port, bool metadata) { struct geneve_net *gn = net_generic(net, geneve_net_id); - struct geneve_dev *dummy, *geneve = netdev_priv(dev); - struct hlist_head *vni_list_head; - struct sockaddr_in remote; /* IPv4 address for link partner */ - __u32 vni, hash; + struct geneve_dev *t, *geneve = netdev_priv(dev); + bool tun_collect_md, tun_on_same_port; int err; - if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE]) - return -EINVAL; + if (metadata) { + if (rem_addr || vni || tos || ttl) + return -EINVAL; + } geneve->net = net; geneve->dev = dev; - vni = nla_get_u32(data[IFLA_GENEVE_ID]); geneve->vni[0] = (vni & 0x00ff0000) >> 16; geneve->vni[1] = (vni & 0x0000ff00) >> 8; geneve->vni[2] = vni & 0x000000ff; - geneve->remote.sin_addr.s_addr = - nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); + geneve->remote.sin_addr.s_addr = rem_addr; if (IN_MULTICAST(ntohl(geneve->remote.sin_addr.s_addr))) return -EINVAL; - remote = geneve->remote; - hash = geneve_net_vni_hash(geneve->vni); - vni_list_head = &gn->vni_list[hash]; - hlist_for_each_entry_rcu(dummy, vni_list_head, hlist) { - if (!memcmp(geneve->vni, dummy->vni, sizeof(dummy->vni)) && - !memcmp(&remote, &dummy->remote, sizeof(dummy->remote))) - return -EBUSY; + geneve->ttl = ttl; + geneve->tos = tos; + geneve->dst_port = dst_port; + geneve->collect_md = metadata; + + t = geneve_find_dev(gn, dst_port, rem_addr, geneve->vni, + &tun_on_same_port, &tun_collect_md); + if (t) + return -EBUSY; + + if (metadata) { + if (tun_on_same_port) + return -EPERM; + } else { + if (tun_collect_md) + return -EPERM; } - if (tb[IFLA_ADDRESS] == NULL) - eth_hw_addr_random(dev); - err = register_netdevice(dev); if (err) return err; + list_add(&geneve->next, &gn->geneve_list); + return 0; +} + +static int geneve_newlink(struct net *net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + __be16 dst_port = htons(GENEVE_UDP_PORT); + __u8 ttl = 0, tos = 0; + bool metadata = false; + __be32 rem_addr = 0; + __u32 vni = 0; + + if (data[IFLA_GENEVE_ID]) + vni = nla_get_u32(data[IFLA_GENEVE_ID]); + + if (data[IFLA_GENEVE_REMOTE]) + rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); + if (data[IFLA_GENEVE_TTL]) - geneve->ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); + ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); if (data[IFLA_GENEVE_TOS]) - geneve->tos = nla_get_u8(data[IFLA_GENEVE_TOS]); + tos = nla_get_u8(data[IFLA_GENEVE_TOS]); - list_add(&geneve->next, &gn->geneve_list); + if (data[IFLA_GENEVE_PORT]) + dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]); - hlist_add_head_rcu(&geneve->hlist, &gn->vni_list[hash]); + if (data[IFLA_GENEVE_COLLECT_METADATA]) + metadata = true; - return 0; + return geneve_configure(net, dev, rem_addr, vni, + ttl, tos, dst_port, metadata); } static void geneve_dellink(struct net_device *dev, struct list_head *head) { struct geneve_dev *geneve = netdev_priv(dev); - if (!hlist_unhashed(&geneve->hlist)) - hlist_del_rcu(&geneve->hlist); - list_del(&geneve->next); unregister_netdevice_queue(dev, head); } @@ -402,6 +935,8 @@ static size_t geneve_get_size(const struct net_device *dev) nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ + nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */ + nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ 0; } @@ -422,6 +957,14 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos)) goto nla_put_failure; + if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port)) + goto nla_put_failure; + + if (geneve->collect_md) { + if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) + goto nla_put_failure; + } + return 0; nla_put_failure: @@ -441,16 +984,34 @@ static struct rtnl_link_ops geneve_link_ops __read_mostly = { .fill_info = geneve_fill_info, }; +struct net_device *geneve_dev_create_fb(struct net *net, const char *name, + u8 name_assign_type, u16 dst_port) +{ + struct nlattr *tb[IFLA_MAX + 1]; + struct net_device *dev; + int err; + + memset(tb, 0, sizeof(tb)); + dev = rtnl_create_link(net, name, name_assign_type, + &geneve_link_ops, tb); + if (IS_ERR(dev)) + return dev; + + err = geneve_configure(net, dev, 0, 0, 0, 0, htons(dst_port), true); + if (err) { + free_netdev(dev); + return ERR_PTR(err); + } + return dev; +} +EXPORT_SYMBOL_GPL(geneve_dev_create_fb); + static __net_init int geneve_init_net(struct net *net) { struct geneve_net *gn = net_generic(net, geneve_net_id); - unsigned int h; INIT_LIST_HEAD(&gn->geneve_list); - - for (h = 0; h < VNI_HASH_SIZE; ++h) - INIT_HLIST_HEAD(&gn->vni_list[h]); - + INIT_LIST_HEAD(&gn->sock_list); return 0; } diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 83c7cce0d..72c9f1f35 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -638,7 +638,7 @@ static int receive(struct net_device *dev, int cnt) #define GETTICK(x) \ ({ \ if (cpu_has_tsc) \ - rdtscl(x); \ + x = (unsigned int)rdtsc(); \ }) #else /* __i386__ */ #define GETTICK(x) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index dd4544085..5fa98f599 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -162,6 +162,7 @@ struct netvsc_device_info { bool link_state; /* 0 - link up, 1 - link down */ int ring_size; u32 max_num_vrss_chns; + u32 num_chn; }; enum rndis_device_state { @@ -541,6 +542,29 @@ union nvsp_2_message_uber { struct nvsp_2_free_rxbuf free_rxbuf; } __packed; +struct nvsp_4_send_vf_association { + /* 1: allocated, serial number is valid. 0: not allocated */ + u32 allocated; + + /* Serial number of the VF to team with */ + u32 serial; +} __packed; + +enum nvsp_vm_datapath { + NVSP_DATAPATH_SYNTHETIC = 0, + NVSP_DATAPATH_VF, + NVSP_DATAPATH_MAX +}; + +struct nvsp_4_sw_datapath { + u32 active_datapath; /* active data path in VM */ +} __packed; + +union nvsp_4_message_uber { + struct nvsp_4_send_vf_association vf_assoc; + struct nvsp_4_sw_datapath active_dp; +} __packed; + enum nvsp_subchannel_operation { NVSP_SUBCHANNEL_NONE = 0, NVSP_SUBCHANNEL_ALLOCATE, @@ -578,6 +602,7 @@ union nvsp_all_messages { union nvsp_message_init_uber init_msg; union nvsp_1_message_uber v1_msg; union nvsp_2_message_uber v2_msg; + union nvsp_4_message_uber v4_msg; union nvsp_5_message_uber v5_msg; } __packed; @@ -589,6 +614,7 @@ struct nvsp_message { #define NETVSC_MTU 65536 +#define NETVSC_MTU_MIN 68 #define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */ #define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */ @@ -670,6 +696,8 @@ struct netvsc_device { u32 send_table[VRSS_SEND_TAB_SIZE]; u32 max_chn; u32 num_chn; + spinlock_t sc_lock; /* Protects num_sc_offered variable */ + u32 num_sc_offered; atomic_t queue_sends[NR_CPUS]; /* Holds rndis device info */ @@ -688,6 +716,11 @@ struct netvsc_device { /* The net device context */ struct net_device_context *nd_ctx; + + /* 1: allocated, serial number is valid. 0: not allocated */ + u32 vf_alloc; + /* Serial number of the VF to team with */ + u32 vf_serial; }; /* NdisInitialize message */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 23126a74f..51e4c0fd0 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -453,13 +453,16 @@ static int negotiate_nvsp_ver(struct hv_device *device, if (nvsp_ver == NVSP_PROTOCOL_VERSION_1) return 0; - /* NVSPv2 only: Send NDIS config */ + /* NVSPv2 or later: Send NDIS config */ memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG; init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu + ETH_HLEN; init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; + if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) + init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1; + ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, @@ -1064,11 +1067,10 @@ static void netvsc_receive(struct netvsc_device *net_device, static void netvsc_send_table(struct hv_device *hdev, - struct vmpacket_descriptor *vmpkt) + struct nvsp_message *nvmsg) { struct netvsc_device *nvscdev; struct net_device *ndev; - struct nvsp_message *nvmsg; int i; u32 count, *tab; @@ -1077,12 +1079,6 @@ static void netvsc_send_table(struct hv_device *hdev, return; ndev = nvscdev->ndev; - nvmsg = (struct nvsp_message *)((unsigned long)vmpkt + - (vmpkt->offset8 << 3)); - - if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE) - return; - count = nvmsg->msg.v5_msg.send_table.count; if (count != VRSS_SEND_TAB_SIZE) { netdev_err(ndev, "Received wrong send-table size:%u\n", count); @@ -1096,6 +1092,28 @@ static void netvsc_send_table(struct hv_device *hdev, nvscdev->send_table[i] = tab[i]; } +static void netvsc_send_vf(struct netvsc_device *nvdev, + struct nvsp_message *nvmsg) +{ + nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; + nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; +} + +static inline void netvsc_receive_inband(struct hv_device *hdev, + struct netvsc_device *nvdev, + struct nvsp_message *nvmsg) +{ + switch (nvmsg->hdr.msg_type) { + case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: + netvsc_send_table(hdev, nvmsg); + break; + + case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: + netvsc_send_vf(nvdev, nvmsg); + break; + } +} + void netvsc_channel_cb(void *context) { int ret; @@ -1108,6 +1126,7 @@ void netvsc_channel_cb(void *context) unsigned char *buffer; int bufferlen = NETVSC_PACKET_SIZE; struct net_device *ndev; + struct nvsp_message *nvmsg; if (channel->primary_channel != NULL) device = channel->primary_channel->device_obj; @@ -1126,6 +1145,8 @@ void netvsc_channel_cb(void *context) if (ret == 0) { if (bytes_recvd > 0) { desc = (struct vmpacket_descriptor *)buffer; + nvmsg = (struct nvsp_message *)((unsigned long) + desc + (desc->offset8 << 3)); switch (desc->type) { case VM_PKT_COMP: netvsc_send_completion(net_device, @@ -1138,7 +1159,9 @@ void netvsc_channel_cb(void *context) break; case VM_PKT_DATA_INBAND: - netvsc_send_table(device, desc); + netvsc_receive_inband(device, + net_device, + nvmsg); break; default: diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 358475ed9..409b48e1e 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -106,7 +106,7 @@ static int netvsc_open(struct net_device *net) return ret; } - netif_tx_start_all_queues(net); + netif_tx_wake_all_queues(net); nvdev = hv_get_drvdata(device_obj); rdev = nvdev->extension; @@ -120,15 +120,56 @@ static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *device_obj = net_device_ctx->device_ctx; + struct netvsc_device *nvdev = hv_get_drvdata(device_obj); int ret; + u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20; + struct vmbus_channel *chn; netif_tx_disable(net); /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ cancel_work_sync(&net_device_ctx->work); ret = rndis_filter_close(device_obj); - if (ret != 0) + if (ret != 0) { netdev_err(net, "unable to close device (ret %d).\n", ret); + return ret; + } + + /* Ensure pending bytes in ring are read */ + while (true) { + aread = 0; + for (i = 0; i < nvdev->num_chn; i++) { + chn = nvdev->chn_table[i]; + if (!chn) + continue; + + hv_get_ringbuffer_availbytes(&chn->inbound, &aread, + &awrite); + + if (aread) + break; + + hv_get_ringbuffer_availbytes(&chn->outbound, &aread, + &awrite); + + if (aread) + break; + } + + retry++; + if (retry > retry_max || aread == 0) + break; + + msleep(msec); + + if (msec < 1000) + msec *= 2; + } + + if (aread) { + netdev_err(net, "Ring buffer not empty after closing rndis\n"); + ret = -ETIMEDOUT; + } return ret; } @@ -198,7 +239,7 @@ static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb) struct flow_keys flow; int data_len; - if (!skb_flow_dissect_flow_keys(skb, &flow) || + if (!skb_flow_dissect_flow_keys(skb, &flow, 0) || !(flow.basic.n_proto == htons(ETH_P_IP) || flow.basic.n_proto == htons(ETH_P_IPV6))) return false; @@ -729,6 +770,104 @@ static void netvsc_get_channels(struct net_device *net, } } +static int netvsc_set_channels(struct net_device *net, + struct ethtool_channels *channels) +{ + struct net_device_context *net_device_ctx = netdev_priv(net); + struct hv_device *dev = net_device_ctx->device_ctx; + struct netvsc_device *nvdev = hv_get_drvdata(dev); + struct netvsc_device_info device_info; + u32 num_chn; + u32 max_chn; + int ret = 0; + bool recovering = false; + + if (!nvdev || nvdev->destroy) + return -ENODEV; + + num_chn = nvdev->num_chn; + max_chn = min_t(u32, nvdev->max_chn, num_online_cpus()); + + if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) { + pr_info("vRSS unsupported before NVSP Version 5\n"); + return -EINVAL; + } + + /* We do not support rx, tx, or other */ + if (!channels || + channels->rx_count || + channels->tx_count || + channels->other_count || + (channels->combined_count < 1)) + return -EINVAL; + + if (channels->combined_count > max_chn) { + pr_info("combined channels too high, using %d\n", max_chn); + channels->combined_count = max_chn; + } + + ret = netvsc_close(net); + if (ret) + goto out; + + do_set: + nvdev->start_remove = true; + rndis_filter_device_remove(dev); + + nvdev->num_chn = channels->combined_count; + + net_device_ctx->device_ctx = dev; + hv_set_drvdata(dev, net); + + memset(&device_info, 0, sizeof(device_info)); + device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */ + device_info.ring_size = ring_size; + device_info.max_num_vrss_chns = max_num_vrss_chns; + + ret = rndis_filter_device_add(dev, &device_info); + if (ret) { + if (recovering) { + netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); + return ret; + } + goto recover; + } + + nvdev = hv_get_drvdata(dev); + + ret = netif_set_real_num_tx_queues(net, nvdev->num_chn); + if (ret) { + if (recovering) { + netdev_err(net, "could not set tx queue count (ret %d)\n", ret); + return ret; + } + goto recover; + } + + ret = netif_set_real_num_rx_queues(net, nvdev->num_chn); + if (ret) { + if (recovering) { + netdev_err(net, "could not set rx queue count (ret %d)\n", ret); + return ret; + } + goto recover; + } + + out: + netvsc_open(net); + + return ret; + + recover: + /* If the above failed, we attempt to recover through the same + * process but with the original number of channels. + */ + netdev_err(net, "could not set channels, recovering\n"); + recovering = true; + channels->combined_count = num_chn; + goto do_set; +} + static int netvsc_change_mtu(struct net_device *ndev, int mtu) { struct net_device_context *ndevctx = netdev_priv(ndev); @@ -736,6 +875,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) struct netvsc_device *nvdev = hv_get_drvdata(hdev); struct netvsc_device_info device_info; int limit = ETH_DATA_LEN; + int ret = 0; if (nvdev == NULL || nvdev->destroy) return -ENODEV; @@ -743,25 +883,31 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) limit = NETVSC_MTU - ETH_HLEN; - /* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */ - if (mtu < ETH_DATA_LEN || mtu > limit) + if (mtu < NETVSC_MTU_MIN || mtu > limit) return -EINVAL; + ret = netvsc_close(ndev); + if (ret) + goto out; + nvdev->start_remove = true; - cancel_work_sync(&ndevctx->work); - netif_tx_disable(ndev); rndis_filter_device_remove(hdev); ndev->mtu = mtu; ndevctx->device_ctx = hdev; hv_set_drvdata(hdev, ndev); + + memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; + device_info.num_chn = nvdev->num_chn; device_info.max_num_vrss_chns = max_num_vrss_chns; rndis_filter_device_add(hdev, &device_info); - netif_tx_wake_all_queues(ndev); - return 0; +out: + netvsc_open(ndev); + + return ret; } static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net, @@ -844,6 +990,7 @@ static const struct ethtool_ops ethtool_ops = { .get_drvinfo = netvsc_get_drvinfo, .get_link = ethtool_op_get_link, .get_channels = netvsc_get_channels, + .set_channels = netvsc_set_channels, }; static const struct net_device_ops device_ops = { @@ -977,6 +1124,7 @@ static int netvsc_probe(struct hv_device *dev, net->needed_headroom = max_needed_headroom; /* Notify the netvsc driver of the new device */ + memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; device_info.max_num_vrss_chns = max_num_vrss_chns; ret = rndis_filter_device_add(dev, &device_info); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 236aeb76e..5931a799a 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -984,9 +984,16 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) struct netvsc_device *nvscdev; u16 chn_index = new_sc->offermsg.offer.sub_channel_index; int ret; + unsigned long flags; nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj); + spin_lock_irqsave(&nvscdev->sc_lock, flags); + nvscdev->num_sc_offered--; + spin_unlock_irqrestore(&nvscdev->sc_lock, flags); + if (nvscdev->num_sc_offered == 0) + complete(&nvscdev->channel_init_wait); + if (chn_index >= nvscdev->num_chn) return; @@ -1015,8 +1022,10 @@ int rndis_filter_device_add(struct hv_device *dev, u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); u32 mtu, size; u32 num_rss_qs; + u32 sc_delta; const struct cpumask *node_cpu_mask; u32 num_possible_rss_qs; + unsigned long flags; rndis_device = get_rndis_device(); if (!rndis_device) @@ -1039,6 +1048,8 @@ int rndis_filter_device_add(struct hv_device *dev, net_device->max_chn = 1; net_device->num_chn = 1; + spin_lock_init(&net_device->sc_lock); + net_device->extension = rndis_device; rndis_device->net_dev = net_device; @@ -1054,7 +1065,7 @@ int rndis_filter_device_add(struct hv_device *dev, ret = rndis_filter_query_device(rndis_device, RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, &mtu, &size); - if (ret == 0 && size == sizeof(u32)) + if (ret == 0 && size == sizeof(u32) && mtu < net_device->ndev->mtu) net_device->ndev->mtu = mtu; /* Get the mac address */ @@ -1114,7 +1125,15 @@ int rndis_filter_device_add(struct hv_device *dev, */ node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu)); num_possible_rss_qs = cpumask_weight(node_cpu_mask); - net_device->num_chn = min(num_possible_rss_qs, num_rss_qs); + + /* We will use the given number of channels if available. */ + if (device_info->num_chn && device_info->num_chn < net_device->max_chn) + net_device->num_chn = device_info->num_chn; + else + net_device->num_chn = min(num_possible_rss_qs, num_rss_qs); + + num_rss_qs = net_device->num_chn - 1; + net_device->num_sc_offered = num_rss_qs; if (net_device->num_chn == 1) goto out; @@ -1157,11 +1176,25 @@ int rndis_filter_device_add(struct hv_device *dev, ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn); + /* + * Wait for the host to send us the sub-channel offers. + */ + spin_lock_irqsave(&net_device->sc_lock, flags); + sc_delta = num_rss_qs - (net_device->num_chn - 1); + net_device->num_sc_offered -= sc_delta; + spin_unlock_irqrestore(&net_device->sc_lock, flags); + + while (net_device->num_sc_offered != 0) { + t = wait_for_completion_timeout(&net_device->channel_init_wait, 10*HZ); + if (t == 0) + WARN(1, "Netvsc: Waiting for sub-channel processing"); + } out: if (ret) { net_device->max_chn = 1; net_device->num_chn = 1; } + return 0; /* return 0 because primary channel can be used alone */ err_dev_remv: diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index f7bd9f3dd..6422caac8 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -97,9 +97,7 @@ struct at86rf230_local { struct at86rf230_state_change irq; - bool tx_aret; unsigned long cal_timeout; - s8 max_frame_retries; bool is_tx; bool is_tx_from_off; u8 tx_retry; @@ -545,7 +543,9 @@ at86rf230_async_state_delay(void *context) } /* Default delay is 1us in the most cases */ - tim = ktime_set(0, NSEC_PER_USEC); + udelay(1); + at86rf230_async_state_timer(&ctx->timer); + return; change: hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL); @@ -649,7 +649,7 @@ at86rf230_tx_complete(void *context) enable_irq(ctx->irq); - ieee802154_xmit_complete(lp->hw, lp->tx_skb, !lp->tx_aret); + ieee802154_xmit_complete(lp->hw, lp->tx_skb, false); } static void @@ -758,17 +758,10 @@ at86rf230_irq_trx_end(struct at86rf230_local *lp) { if (lp->is_tx) { lp->is_tx = 0; - - if (lp->tx_aret) - at86rf230_async_state_change(lp, &lp->irq, - STATE_FORCE_TX_ON, - at86rf230_tx_trac_status, - true); - else - at86rf230_async_state_change(lp, &lp->irq, - STATE_RX_AACK_ON, - at86rf230_tx_complete, - true); + at86rf230_async_state_change(lp, &lp->irq, + STATE_FORCE_TX_ON, + at86rf230_tx_trac_status, + true); } else { at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq, at86rf230_rx_trac_check, true); @@ -874,24 +867,16 @@ at86rf230_xmit_start(void *context) struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - /* In ARET mode we need to go into STATE_TX_ARET_ON after we - * are in STATE_TX_ON. The pfad differs here, so we change - * the complete handler. - */ - if (lp->tx_aret) { - if (lp->is_tx_from_off) { - lp->is_tx_from_off = false; - at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON, - at86rf230_write_frame, - false); - } else { - at86rf230_async_state_change(lp, ctx, STATE_TX_ON, - at86rf230_xmit_tx_on, - false); - } + /* check if we change from off state */ + if (lp->is_tx_from_off) { + lp->is_tx_from_off = false; + at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON, + at86rf230_write_frame, + false); } else { at86rf230_async_state_change(lp, ctx, STATE_TX_ON, - at86rf230_write_frame, false); + at86rf230_xmit_tx_on, + false); } } @@ -1265,15 +1250,8 @@ static int at86rf230_set_frame_retries(struct ieee802154_hw *hw, s8 retries) { struct at86rf230_local *lp = hw->priv; - int rc = 0; - - lp->tx_aret = retries >= 0; - lp->max_frame_retries = retries; - if (retries >= 0) - rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries); - - return rc; + return at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries); } static int diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c index b6fc29579..c5b54a15f 100644 --- a/drivers/net/ieee802154/cc2520.c +++ b/drivers/net/ieee802154/cc2520.c @@ -833,6 +833,7 @@ static int cc2520_get_platform_data(struct spi_device *spi, if (!spi_pdata) return -ENOENT; *pdata = *spi_pdata; + priv->fifo_pin = pdata->fifo; return 0; } @@ -1151,7 +1152,6 @@ MODULE_DEVICE_TABLE(of, cc2520_of_ids); static struct spi_driver cc2520_driver = { .driver = { .name = "cc2520", - .bus = &spi_bus_type, .owner = THIS_MODULE, .of_match_table = of_match_ptr(cc2520_of_ids), }, diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c index 2549760e0..997724b8e 100644 --- a/drivers/net/ieee802154/mrf24j40.c +++ b/drivers/net/ieee802154/mrf24j40.c @@ -812,7 +812,6 @@ MODULE_DEVICE_TABLE(spi, mrf24j40_ids); static struct spi_driver mrf24j40_driver = { .driver = { .name = "mrf24j40", - .bus = &spi_bus_type, .owner = THIS_MODULE, }, .id_table = mrf24j40_ids, diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 94570aace..cc56fac3c 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -38,69 +38,68 @@ #include #define TX_Q_LIMIT 32 -struct ifb_private { +struct ifb_q_private { + struct net_device *dev; struct tasklet_struct ifb_tasklet; - int tasklet_pending; - - struct u64_stats_sync rsync; + int tasklet_pending; + int txqnum; struct sk_buff_head rq; - u64 rx_packets; - u64 rx_bytes; + u64 rx_packets; + u64 rx_bytes; + struct u64_stats_sync rsync; struct u64_stats_sync tsync; + u64 tx_packets; + u64 tx_bytes; struct sk_buff_head tq; - u64 tx_packets; - u64 tx_bytes; -}; +} ____cacheline_aligned_in_smp; -static int numifbs = 2; +struct ifb_dev_private { + struct ifb_q_private *tx_private; +}; -static void ri_tasklet(unsigned long dev); static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev); static int ifb_open(struct net_device *dev); static int ifb_close(struct net_device *dev); -static void ri_tasklet(unsigned long dev) +static void ifb_ri_tasklet(unsigned long _txp) { - struct net_device *_dev = (struct net_device *)dev; - struct ifb_private *dp = netdev_priv(_dev); + struct ifb_q_private *txp = (struct ifb_q_private *)_txp; struct netdev_queue *txq; struct sk_buff *skb; - txq = netdev_get_tx_queue(_dev, 0); - if ((skb = skb_peek(&dp->tq)) == NULL) { - if (__netif_tx_trylock(txq)) { - skb_queue_splice_tail_init(&dp->rq, &dp->tq); - __netif_tx_unlock(txq); - } else { - /* reschedule */ + txq = netdev_get_tx_queue(txp->dev, txp->txqnum); + skb = skb_peek(&txp->tq); + if (!skb) { + if (!__netif_tx_trylock(txq)) goto resched; - } + skb_queue_splice_tail_init(&txp->rq, &txp->tq); + __netif_tx_unlock(txq); } - while ((skb = __skb_dequeue(&dp->tq)) != NULL) { + while ((skb = __skb_dequeue(&txp->tq)) != NULL) { u32 from = G_TC_FROM(skb->tc_verd); skb->tc_verd = 0; skb->tc_verd = SET_TC_NCLS(skb->tc_verd); - u64_stats_update_begin(&dp->tsync); - dp->tx_packets++; - dp->tx_bytes += skb->len; - u64_stats_update_end(&dp->tsync); + u64_stats_update_begin(&txp->tsync); + txp->tx_packets++; + txp->tx_bytes += skb->len; + u64_stats_update_end(&txp->tsync); rcu_read_lock(); - skb->dev = dev_get_by_index_rcu(dev_net(_dev), skb->skb_iif); + skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif); if (!skb->dev) { rcu_read_unlock(); dev_kfree_skb(skb); - _dev->stats.tx_dropped++; - if (skb_queue_len(&dp->tq) != 0) + txp->dev->stats.tx_dropped++; + if (skb_queue_len(&txp->tq) != 0) goto resched; break; } rcu_read_unlock(); - skb->skb_iif = _dev->ifindex; + skb->skb_iif = txp->dev->ifindex; if (from & AT_EGRESS) { dev_queue_xmit(skb); @@ -112,10 +111,11 @@ static void ri_tasklet(unsigned long dev) } if (__netif_tx_trylock(txq)) { - if ((skb = skb_peek(&dp->rq)) == NULL) { - dp->tasklet_pending = 0; - if (netif_queue_stopped(_dev)) - netif_wake_queue(_dev); + skb = skb_peek(&txp->rq); + if (!skb) { + txp->tasklet_pending = 0; + if (netif_tx_queue_stopped(txq)) + netif_tx_wake_queue(txq); } else { __netif_tx_unlock(txq); goto resched; @@ -123,8 +123,8 @@ static void ri_tasklet(unsigned long dev) __netif_tx_unlock(txq); } else { resched: - dp->tasklet_pending = 1; - tasklet_schedule(&dp->ifb_tasklet); + txp->tasklet_pending = 1; + tasklet_schedule(&txp->ifb_tasklet); } } @@ -132,29 +132,58 @@ resched: static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { - struct ifb_private *dp = netdev_priv(dev); + struct ifb_dev_private *dp = netdev_priv(dev); + struct ifb_q_private *txp = dp->tx_private; unsigned int start; - - do { - start = u64_stats_fetch_begin_irq(&dp->rsync); - stats->rx_packets = dp->rx_packets; - stats->rx_bytes = dp->rx_bytes; - } while (u64_stats_fetch_retry_irq(&dp->rsync, start)); - - do { - start = u64_stats_fetch_begin_irq(&dp->tsync); - - stats->tx_packets = dp->tx_packets; - stats->tx_bytes = dp->tx_bytes; - - } while (u64_stats_fetch_retry_irq(&dp->tsync, start)); - + u64 packets, bytes; + int i; + + for (i = 0; i < dev->num_tx_queues; i++,txp++) { + do { + start = u64_stats_fetch_begin_irq(&txp->rsync); + packets = txp->rx_packets; + bytes = txp->rx_bytes; + } while (u64_stats_fetch_retry_irq(&txp->rsync, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + + do { + start = u64_stats_fetch_begin_irq(&txp->tsync); + packets = txp->tx_packets; + bytes = txp->tx_bytes; + } while (u64_stats_fetch_retry_irq(&txp->tsync, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; return stats; } +static int ifb_dev_init(struct net_device *dev) +{ + struct ifb_dev_private *dp = netdev_priv(dev); + struct ifb_q_private *txp; + int i; + + txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL); + if (!txp) + return -ENOMEM; + dp->tx_private = txp; + for (i = 0; i < dev->num_tx_queues; i++,txp++) { + txp->txqnum = i; + txp->dev = dev; + __skb_queue_head_init(&txp->rq); + __skb_queue_head_init(&txp->tq); + u64_stats_init(&txp->rsync); + u64_stats_init(&txp->tsync); + tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet, + (unsigned long)txp); + netif_tx_start_queue(netdev_get_tx_queue(dev, i)); + } + return 0; +} static const struct net_device_ops ifb_netdev_ops = { .ndo_open = ifb_open, @@ -162,6 +191,7 @@ static const struct net_device_ops ifb_netdev_ops = { .ndo_get_stats64 = ifb_stats64, .ndo_start_xmit = ifb_xmit, .ndo_validate_addr = eth_validate_addr, + .ndo_init = ifb_dev_init, }; #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \ @@ -169,10 +199,24 @@ static const struct net_device_ops ifb_netdev_ops = { NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \ NETIF_F_HW_VLAN_STAG_TX) +static void ifb_dev_free(struct net_device *dev) +{ + struct ifb_dev_private *dp = netdev_priv(dev); + struct ifb_q_private *txp = dp->tx_private; + int i; + + for (i = 0; i < dev->num_tx_queues; i++,txp++) { + tasklet_kill(&txp->ifb_tasklet); + __skb_queue_purge(&txp->rq); + __skb_queue_purge(&txp->tq); + } + kfree(dp->tx_private); + free_netdev(dev); +} + static void ifb_setup(struct net_device *dev) { /* Initialize the device structure. */ - dev->destructor = free_netdev; dev->netdev_ops = &ifb_netdev_ops; /* Fill in device structure with ethernet-generic values. */ @@ -188,17 +232,19 @@ static void ifb_setup(struct net_device *dev) dev->priv_flags &= ~IFF_TX_SKB_SHARING; netif_keep_dst(dev); eth_hw_addr_random(dev); + dev->destructor = ifb_dev_free; } static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) { - struct ifb_private *dp = netdev_priv(dev); + struct ifb_dev_private *dp = netdev_priv(dev); u32 from = G_TC_FROM(skb->tc_verd); + struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb); - u64_stats_update_begin(&dp->rsync); - dp->rx_packets++; - dp->rx_bytes += skb->len; - u64_stats_update_end(&dp->rsync); + u64_stats_update_begin(&txp->rsync); + txp->rx_packets++; + txp->rx_bytes += skb->len; + u64_stats_update_end(&txp->rsync); if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) { dev_kfree_skb(skb); @@ -206,14 +252,13 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) { - netif_stop_queue(dev); - } + if (skb_queue_len(&txp->rq) >= dev->tx_queue_len) + netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum)); - __skb_queue_tail(&dp->rq, skb); - if (!dp->tasklet_pending) { - dp->tasklet_pending = 1; - tasklet_schedule(&dp->ifb_tasklet); + __skb_queue_tail(&txp->rq, skb); + if (!txp->tasklet_pending) { + txp->tasklet_pending = 1; + tasklet_schedule(&txp->ifb_tasklet); } return NETDEV_TX_OK; @@ -221,24 +266,13 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) static int ifb_close(struct net_device *dev) { - struct ifb_private *dp = netdev_priv(dev); - - tasklet_kill(&dp->ifb_tasklet); - netif_stop_queue(dev); - __skb_queue_purge(&dp->rq); - __skb_queue_purge(&dp->tq); + netif_tx_stop_all_queues(dev); return 0; } static int ifb_open(struct net_device *dev) { - struct ifb_private *dp = netdev_priv(dev); - - tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev); - __skb_queue_head_init(&dp->rq); - __skb_queue_head_init(&dp->tq); - netif_start_queue(dev); - + netif_tx_start_all_queues(dev); return 0; } @@ -255,31 +289,30 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[]) static struct rtnl_link_ops ifb_link_ops __read_mostly = { .kind = "ifb", - .priv_size = sizeof(struct ifb_private), + .priv_size = sizeof(struct ifb_dev_private), .setup = ifb_setup, .validate = ifb_validate, }; -/* Number of ifb devices to be set up by this module. */ +/* Number of ifb devices to be set up by this module. + * Note that these legacy devices have one queue. + * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb + */ +static int numifbs = 2; module_param(numifbs, int, 0); MODULE_PARM_DESC(numifbs, "Number of ifb devices"); static int __init ifb_init_one(int index) { struct net_device *dev_ifb; - struct ifb_private *dp; int err; - dev_ifb = alloc_netdev(sizeof(struct ifb_private), "ifb%d", + dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d", NET_NAME_UNKNOWN, ifb_setup); if (!dev_ifb) return -ENOMEM; - dp = netdev_priv(dev_ifb); - u64_stats_init(&dp->rsync); - u64_stats_init(&dp->tsync); - dev_ifb->rtnl_link_ops = &ifb_link_ops; err = register_netdevice(dev_ifb); if (err < 0) diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 20b58bdec..a9268db4e 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -520,12 +520,11 @@ static void ipvlan_link_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); - dev->priv_flags |= IFF_UNICAST_FLT; + dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; dev->netdev_ops = &ipvlan_netdev_ops; dev->destructor = free_netdev; dev->header_ops = &ipvlan_header_ops; dev->ethtool_ops = &ipvlan_ethtool_ops; - dev->tx_queue_len = 0; } static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] = diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index 58ae11a14..64bb44d5d 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c @@ -1031,7 +1031,6 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud) static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) { struct ali_ircc_cb *self = priv; - unsigned long flags; int iobase; int fcr; /* FIFO control reg */ int lcr; /* Line control reg */ @@ -1061,8 +1060,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) /* Update accounting for new speed */ self->io.speed = speed; - spin_lock_irqsave(&self->lock, flags); - divisor = 115200/speed; fcr = UART_FCR_ENABLE_FIFO; @@ -1089,9 +1086,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) /* without this, the connection will be broken after come back from FIR speed, but with this, the SIR connection is harder to established */ outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR); - - spin_unlock_irqrestore(&self->lock, flags); - } static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index c76283c2f..dc7d970bd 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -165,10 +165,9 @@ static void loopback_setup(struct net_device *dev) dev->mtu = 64 * 1024; dev->hard_header_len = ETH_HLEN; /* 14 */ dev->addr_len = ETH_ALEN; /* 6 */ - dev->tx_queue_len = 0; dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ dev->flags = IFF_LOOPBACK; - dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; netif_keep_dst(dev); dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 9f59f17dc..47da43595 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1047,6 +1047,7 @@ static const struct net_device_ops macvlan_netdev_ops = { .ndo_netpoll_cleanup = macvlan_dev_netpoll_cleanup, #endif .ndo_get_iflink = macvlan_dev_get_iflink, + .ndo_features_check = passthru_features_check, }; void macvlan_common_setup(struct net_device *dev) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 248478c6f..197c93937 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -137,7 +137,7 @@ static const struct proto_ops macvtap_socket_ops; #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ NETIF_F_TSO6 | NETIF_F_UFO) #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) -#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) +#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST) static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev) { diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c index 34924dfad..7b7c70e23 100644 --- a/drivers/net/nlmon.c +++ b/drivers/net/nlmon.c @@ -130,7 +130,7 @@ static const struct net_device_ops nlmon_ops = { static void nlmon_setup(struct net_device *dev) { dev->type = ARPHRD_NETLINK; - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; dev->netdev_ops = &nlmon_ops; dev->ethtool_ops = &nlmon_ethtool_ops; diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index d8757bf9a..a9acf7156 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -61,11 +61,21 @@ MODULE_VERSION(NTB_NETDEV_VER); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel Corporation"); +/* Time in usecs for tx resource reaper */ +static unsigned int tx_time = 1; + +/* Number of descriptors to free before resuming tx */ +static unsigned int tx_start = 10; + +/* Number of descriptors still available before stop upper layer tx */ +static unsigned int tx_stop = 5; + struct ntb_netdev { struct list_head list; struct pci_dev *pdev; struct net_device *ndev; struct ntb_transport_qp *qp; + struct timer_list tx_timer; }; #define NTB_TX_TIMEOUT_MS 1000 @@ -136,11 +146,42 @@ enqueue_again: } } +static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev, + struct ntb_transport_qp *qp, int size) +{ + struct ntb_netdev *dev = netdev_priv(netdev); + + netif_stop_queue(netdev); + /* Make sure to see the latest value of ntb_transport_tx_free_entry() + * since the queue was last started. + */ + smp_mb(); + + if (likely(ntb_transport_tx_free_entry(qp) < size)) { + mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time)); + return -EBUSY; + } + + netif_start_queue(netdev); + return 0; +} + +static int ntb_netdev_maybe_stop_tx(struct net_device *ndev, + struct ntb_transport_qp *qp, int size) +{ + if (netif_queue_stopped(ndev) || + (ntb_transport_tx_free_entry(qp) >= size)) + return 0; + + return __ntb_netdev_maybe_stop_tx(ndev, qp, size); +} + static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len) { struct net_device *ndev = qp_data; struct sk_buff *skb; + struct ntb_netdev *dev = netdev_priv(ndev); skb = data; if (!skb || !ndev) @@ -155,6 +196,15 @@ static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, } dev_kfree_skb(skb); + + if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) { + /* Make sure anybody stopping the queue after this sees the new + * value of ntb_transport_tx_free_entry() + */ + smp_mb(); + if (netif_queue_stopped(ndev)) + netif_wake_queue(ndev); + } } static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb, @@ -163,10 +213,15 @@ static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb, struct ntb_netdev *dev = netdev_priv(ndev); int rc; + ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop); + rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len); if (rc) goto err; + /* check for next submit */ + ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop); + return NETDEV_TX_OK; err: @@ -175,6 +230,23 @@ err: return NETDEV_TX_BUSY; } +static void ntb_netdev_tx_timer(unsigned long data) +{ + struct net_device *ndev = (struct net_device *)data; + struct ntb_netdev *dev = netdev_priv(ndev); + + if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) { + mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time)); + } else { + /* Make sure anybody stopping the queue after this sees the new + * value of ntb_transport_tx_free_entry() + */ + smp_mb(); + if (netif_queue_stopped(ndev)) + netif_wake_queue(ndev); + } +} + static int ntb_netdev_open(struct net_device *ndev) { struct ntb_netdev *dev = netdev_priv(ndev); @@ -197,8 +269,11 @@ static int ntb_netdev_open(struct net_device *ndev) } } + setup_timer(&dev->tx_timer, ntb_netdev_tx_timer, (unsigned long)ndev); + netif_carrier_off(ndev); ntb_transport_link_up(dev->qp); + netif_start_queue(ndev); return 0; @@ -219,6 +294,8 @@ static int ntb_netdev_close(struct net_device *ndev) while ((skb = ntb_transport_rx_remove(dev->qp, &len))) dev_kfree_skb(skb); + del_timer_sync(&dev->tx_timer); + return 0; } diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index cb86d7a01..436972b2a 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -14,6 +14,11 @@ if PHYLIB comment "MII PHY device drivers" +config AQUANTIA_PHY + tristate "Drivers for the Aquantia PHYs" + ---help--- + Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 + config AT803X_PHY tristate "Drivers for Atheros AT803X PHYs" ---help--- @@ -54,6 +59,11 @@ config VITESSE_PHY ---help--- Currently supports the vsc8244 +config TERANETICS_PHY + tristate "Drivers for the Teranetics PHYs" + ---help--- + Currently supports the Teranetics TN2020 + config SMSC_PHY tristate "Drivers for SMSC PHYs" ---help--- @@ -112,11 +122,21 @@ config MICREL_PHY ---help--- Supports the KSZ9021, VSC8201, KS8001 PHYs. +config DP83848_PHY + tristate "Driver for Texas Instruments DP83848 PHY" + ---help--- + Supports the DP83848 PHY. + config DP83867_PHY tristate "Drivers for Texas Instruments DP83867 Gigabit PHY" ---help--- Currently supports the DP83867 PHY. +config MICROCHIP_PHY + tristate "Drivers for Microchip PHYs" + help + Supports the LAN88XX PHYs. + config FIXED_PHY tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" depends on PHYLIB @@ -145,15 +165,13 @@ config MDIO_GPIO will be called mdio-gpio. config MDIO_OCTEON - tristate "Support for MDIO buses on Octeon SOCs" - depends on CAVIUM_OCTEON_SOC - default y + tristate "Support for MDIO buses on Octeon and ThunderX SOCs" + depends on 64BIT help - This module provides a driver for the Octeon MDIO busses. - It is required by the Octeon Ethernet device drivers. - - If in doubt, say Y. + This module provides a driver for the Octeon and ThunderX MDIO + busses. It is required by the Octeon and ThunderX ethernet device + drivers. config MDIO_SUN4I tristate "Allwinner sun4i MDIO interface support" diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index fcc25a0c4..b74822463 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -3,12 +3,14 @@ libphy-objs := phy.o phy_device.o mdio_bus.o obj-$(CONFIG_PHYLIB) += libphy.o +obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o obj-$(CONFIG_MARVELL_PHY) += marvell.o obj-$(CONFIG_DAVICOM_PHY) += davicom.o obj-$(CONFIG_CICADA_PHY) += cicada.o obj-$(CONFIG_LXT_PHY) += lxt.o obj-$(CONFIG_QSEMI_PHY) += qsemi.o obj-$(CONFIG_SMSC_PHY) += smsc.o +obj-$(CONFIG_TERANETICS_PHY) += teranetics.o obj-$(CONFIG_VITESSE_PHY) += vitesse.o obj-$(CONFIG_BROADCOM_PHY) += broadcom.o obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o @@ -22,6 +24,7 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o obj-$(CONFIG_NATIONAL_PHY) += national.o obj-$(CONFIG_DP83640_PHY) += dp83640.o +obj-$(CONFIG_DP83848_PHY) += dp83848.o obj-$(CONFIG_DP83867_PHY) += dp83867.o obj-$(CONFIG_STE10XP) += ste10Xp.o obj-$(CONFIG_MICREL_PHY) += micrel.o @@ -35,3 +38,4 @@ obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o +obj-$(CONFIG_MICROCHIP_PHY) += microchip.o diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c new file mode 100644 index 000000000..d6111affb --- /dev/null +++ b/drivers/net/phy/aquantia.c @@ -0,0 +1,201 @@ +/* + * Driver for Aquantia PHY + * + * Author: Shaohui Xie + * + * Copyright 2015 Freescale Semiconductor, Inc. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define PHY_ID_AQ1202 0x03a1b445 +#define PHY_ID_AQ2104 0x03a1b460 +#define PHY_ID_AQR105 0x03a1b4a2 +#define PHY_ID_AQR405 0x03a1b4b0 + +#define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \ + SUPPORTED_1000baseT_Full | \ + SUPPORTED_100baseT_Full | \ + PHY_DEFAULT_FEATURES) + +static int aquantia_config_aneg(struct phy_device *phydev) +{ + phydev->supported = PHY_AQUANTIA_FEATURES; + phydev->advertising = phydev->supported; + + return 0; +} + +static int aquantia_aneg_done(struct phy_device *phydev) +{ + int reg; + + reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE); +} + +static int aquantia_config_intr(struct phy_device *phydev) +{ + int err; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 1); + if (err < 0) + return err; + + err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 1); + if (err < 0) + return err; + + err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0x1001); + } else { + err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 0); + if (err < 0) + return err; + + err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 0); + if (err < 0) + return err; + + err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0); + } + + return err; +} + +static int aquantia_ack_interrupt(struct phy_device *phydev) +{ + int reg; + + reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xcc01); + return (reg < 0) ? reg : 0; +} + +static int aquantia_read_status(struct phy_device *phydev) +{ + int reg; + + reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + if (reg & MDIO_STAT1_LSTATUS) + phydev->link = 1; + else + phydev->link = 0; + + reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800); + mdelay(10); + reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800); + + switch (reg) { + case 0x9: + phydev->speed = SPEED_2500; + break; + case 0x5: + phydev->speed = SPEED_1000; + break; + case 0x3: + phydev->speed = SPEED_100; + break; + case 0x7: + default: + phydev->speed = SPEED_10000; + break; + } + phydev->duplex = DUPLEX_FULL; + + return 0; +} + +static struct phy_driver aquantia_driver[] = { +{ + .phy_id = PHY_ID_AQ1202, + .phy_id_mask = 0xfffffff0, + .name = "Aquantia AQ1202", + .features = PHY_AQUANTIA_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .aneg_done = aquantia_aneg_done, + .config_aneg = aquantia_config_aneg, + .config_intr = aquantia_config_intr, + .ack_interrupt = aquantia_ack_interrupt, + .read_status = aquantia_read_status, + .driver = { .owner = THIS_MODULE,}, +}, +{ + .phy_id = PHY_ID_AQ2104, + .phy_id_mask = 0xfffffff0, + .name = "Aquantia AQ2104", + .features = PHY_AQUANTIA_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .aneg_done = aquantia_aneg_done, + .config_aneg = aquantia_config_aneg, + .config_intr = aquantia_config_intr, + .ack_interrupt = aquantia_ack_interrupt, + .read_status = aquantia_read_status, + .driver = { .owner = THIS_MODULE,}, +}, +{ + .phy_id = PHY_ID_AQR105, + .phy_id_mask = 0xfffffff0, + .name = "Aquantia AQR105", + .features = PHY_AQUANTIA_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .aneg_done = aquantia_aneg_done, + .config_aneg = aquantia_config_aneg, + .config_intr = aquantia_config_intr, + .ack_interrupt = aquantia_ack_interrupt, + .read_status = aquantia_read_status, + .driver = { .owner = THIS_MODULE,}, +}, +{ + .phy_id = PHY_ID_AQR405, + .phy_id_mask = 0xfffffff0, + .name = "Aquantia AQR405", + .features = PHY_AQUANTIA_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .aneg_done = aquantia_aneg_done, + .config_aneg = aquantia_config_aneg, + .config_intr = aquantia_config_intr, + .ack_interrupt = aquantia_ack_interrupt, + .read_status = aquantia_read_status, + .driver = { .owner = THIS_MODULE,}, +}, +}; + +static int __init aquantia_init(void) +{ + return phy_drivers_register(aquantia_driver, + ARRAY_SIZE(aquantia_driver)); +} + +static void __exit aquantia_exit(void) +{ + return phy_drivers_unregister(aquantia_driver, + ARRAY_SIZE(aquantia_driver)); +} + +module_init(aquantia_init); +module_exit(aquantia_exit); + +static struct mdio_device_id __maybe_unused aquantia_tbl[] = { + { PHY_ID_AQ1202, 0xfffffff0 }, + { PHY_ID_AQ2104, 0xfffffff0 }, + { PHY_ID_AQR105, 0xfffffff0 }, + { PHY_ID_AQR405, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, aquantia_tbl); + +MODULE_DESCRIPTION("Aquantia PHY driver"); +MODULE_AUTHOR("Shaohui Xie "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 00cb41e71..185b03c08 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1449,17 +1449,9 @@ static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info) info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; } diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c new file mode 100644 index 000000000..5ce9bef54 --- /dev/null +++ b/drivers/net/phy/dp83848.c @@ -0,0 +1,99 @@ +/* + * Driver for the Texas Instruments DP83848 PHY + * + * Copyright (C) 2015 Texas Instruments Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#define DP83848_PHY_ID 0x20005c90 + +/* Registers */ +#define DP83848_MICR 0x11 +#define DP83848_MISR 0x12 + +/* MICR Register Fields */ +#define DP83848_MICR_INT_OE BIT(0) /* Interrupt Output Enable */ +#define DP83848_MICR_INTEN BIT(1) /* Interrupt Enable */ + +/* MISR Register Fields */ +#define DP83848_MISR_RHF_INT_EN BIT(0) /* Receive Error Counter */ +#define DP83848_MISR_FHF_INT_EN BIT(1) /* False Carrier Counter */ +#define DP83848_MISR_ANC_INT_EN BIT(2) /* Auto-negotiation complete */ +#define DP83848_MISR_DUP_INT_EN BIT(3) /* Duplex Status */ +#define DP83848_MISR_SPD_INT_EN BIT(4) /* Speed status */ +#define DP83848_MISR_LINK_INT_EN BIT(5) /* Link status */ +#define DP83848_MISR_ED_INT_EN BIT(6) /* Energy detect */ +#define DP83848_MISR_LQM_INT_EN BIT(7) /* Link Quality Monitor */ + +static int dp83848_ack_interrupt(struct phy_device *phydev) +{ + int err = phy_read(phydev, DP83848_MISR); + + return err < 0 ? err : 0; +} + +static int dp83848_config_intr(struct phy_device *phydev) +{ + int err; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = phy_write(phydev, DP83848_MICR, + DP83848_MICR_INT_OE | + DP83848_MICR_INTEN); + if (err < 0) + return err; + + return phy_write(phydev, DP83848_MISR, + DP83848_MISR_ANC_INT_EN | + DP83848_MISR_DUP_INT_EN | + DP83848_MISR_SPD_INT_EN | + DP83848_MISR_LINK_INT_EN); + } + + return phy_write(phydev, DP83848_MICR, 0x0); +} + +static struct mdio_device_id __maybe_unused dp83848_tbl[] = { + { DP83848_PHY_ID, 0xfffffff0 }, + { } +}; +MODULE_DEVICE_TABLE(mdio, dp83848_tbl); + +static struct phy_driver dp83848_driver[] = { + { + .phy_id = DP83848_PHY_ID, + .phy_id_mask = 0xfffffff0, + .name = "TI DP83848", + .features = PHY_BASIC_FEATURES, + .flags = PHY_HAS_INTERRUPT, + + .soft_reset = genphy_soft_reset, + .config_init = genphy_config_init, + .suspend = genphy_suspend, + .resume = genphy_resume, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + + /* IRQ related */ + .ack_interrupt = dp83848_ack_interrupt, + .config_intr = dp83848_config_intr, + + .driver = { .owner = THIS_MODULE, }, + }, +}; +module_phy_driver(dp83848_driver); + +MODULE_DESCRIPTION("Texas Instruments DP83848 PHY driver"); +MODULE_AUTHOR("Andrew F. Davis fifo_depth); - if (ret) - return ret; - - return 0; } #else static int dp83867_of_init(struct phy_device *phydev) diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index 99d9bc19c..e23bf5b90 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -22,6 +22,7 @@ #include #include #include +#include #define MII_REGS_NUM 29 @@ -38,6 +39,7 @@ struct fixed_phy { struct fixed_phy_status status; int (*link_update)(struct net_device *, struct fixed_phy_status *); struct list_head node; + int link_gpio; }; static struct platform_device *pdev; @@ -52,61 +54,87 @@ static int fixed_phy_update_regs(struct fixed_phy *fp) u16 lpagb = 0; u16 lpa = 0; - if (!fp->status.link) - goto done; - bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; + if (gpio_is_valid(fp->link_gpio)) + fp->status.link = !!gpio_get_value_cansleep(fp->link_gpio); if (fp->status.duplex) { - bmcr |= BMCR_FULLDPLX; - switch (fp->status.speed) { case 1000: bmsr |= BMSR_ESTATEN; - bmcr |= BMCR_SPEED1000; - lpagb |= LPA_1000FULL; break; case 100: bmsr |= BMSR_100FULL; - bmcr |= BMCR_SPEED100; - lpa |= LPA_100FULL; break; case 10: bmsr |= BMSR_10FULL; - lpa |= LPA_10FULL; break; default: - pr_warn("fixed phy: unknown speed\n"); - return -EINVAL; + break; } } else { switch (fp->status.speed) { case 1000: bmsr |= BMSR_ESTATEN; - bmcr |= BMCR_SPEED1000; - lpagb |= LPA_1000HALF; break; case 100: bmsr |= BMSR_100HALF; - bmcr |= BMCR_SPEED100; - lpa |= LPA_100HALF; break; case 10: bmsr |= BMSR_10HALF; - lpa |= LPA_10HALF; break; default: - pr_warn("fixed phy: unknown speed\n"); - return -EINVAL; + break; } } - if (fp->status.pause) - lpa |= LPA_PAUSE_CAP; + if (fp->status.link) { + bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; + + if (fp->status.duplex) { + bmcr |= BMCR_FULLDPLX; + + switch (fp->status.speed) { + case 1000: + bmcr |= BMCR_SPEED1000; + lpagb |= LPA_1000FULL; + break; + case 100: + bmcr |= BMCR_SPEED100; + lpa |= LPA_100FULL; + break; + case 10: + lpa |= LPA_10FULL; + break; + default: + pr_warn("fixed phy: unknown speed\n"); + return -EINVAL; + } + } else { + switch (fp->status.speed) { + case 1000: + bmcr |= BMCR_SPEED1000; + lpagb |= LPA_1000HALF; + break; + case 100: + bmcr |= BMCR_SPEED100; + lpa |= LPA_100HALF; + break; + case 10: + lpa |= LPA_10HALF; + break; + default: + pr_warn("fixed phy: unknown speed\n"); + return -EINVAL; + } + } - if (fp->status.asym_pause) - lpa |= LPA_PAUSE_ASYM; + if (fp->status.pause) + lpa |= LPA_PAUSE_CAP; + + if (fp->status.asym_pause) + lpa |= LPA_PAUSE_ASYM; + } -done: fp->regs[MII_PHYSID1] = 0; fp->regs[MII_PHYSID2] = 0; @@ -192,7 +220,7 @@ int fixed_phy_update_state(struct phy_device *phydev, struct fixed_mdio_bus *fmb = &platform_fmb; struct fixed_phy *fp; - if (!phydev || !phydev->bus) + if (!phydev || phydev->bus != fmb->mii_bus) return -EINVAL; list_for_each_entry(fp, &fmb->phys, node) { @@ -215,7 +243,8 @@ int fixed_phy_update_state(struct phy_device *phydev, EXPORT_SYMBOL(fixed_phy_update_state); int fixed_phy_add(unsigned int irq, int phy_addr, - struct fixed_phy_status *status) + struct fixed_phy_status *status, + int link_gpio) { int ret; struct fixed_mdio_bus *fmb = &platform_fmb; @@ -231,15 +260,26 @@ int fixed_phy_add(unsigned int irq, int phy_addr, fp->addr = phy_addr; fp->status = *status; + fp->link_gpio = link_gpio; + + if (gpio_is_valid(fp->link_gpio)) { + ret = gpio_request_one(fp->link_gpio, GPIOF_DIR_IN, + "fixed-link-gpio-link"); + if (ret) + goto err_regs; + } ret = fixed_phy_update_regs(fp); if (ret) - goto err_regs; + goto err_gpio; list_add_tail(&fp->node, &fmb->phys); return 0; +err_gpio: + if (gpio_is_valid(fp->link_gpio)) + gpio_free(fp->link_gpio); err_regs: kfree(fp); return ret; @@ -254,6 +294,8 @@ void fixed_phy_del(int phy_addr) list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { if (fp->addr == phy_addr) { list_del(&fp->node); + if (gpio_is_valid(fp->link_gpio)) + gpio_free(fp->link_gpio); kfree(fp); return; } @@ -266,6 +308,7 @@ static DEFINE_SPINLOCK(phy_fixed_addr_lock); struct phy_device *fixed_phy_register(unsigned int irq, struct fixed_phy_status *status, + int link_gpio, struct device_node *np) { struct fixed_mdio_bus *fmb = &platform_fmb; @@ -282,7 +325,7 @@ struct phy_device *fixed_phy_register(unsigned int irq, phy_addr = phy_fixed_addr++; spin_unlock(&phy_fixed_addr_lock); - ret = fixed_phy_add(PHY_POLL, phy_addr, status); + ret = fixed_phy_add(irq, phy_addr, status, link_gpio); if (ret < 0) return ERR_PTR(ret); @@ -303,6 +346,19 @@ struct phy_device *fixed_phy_register(unsigned int irq, of_node_get(np); phy->dev.of_node = np; + phy->is_pseudo_fixed_link = true; + + switch (status->speed) { + case SPEED_1000: + phy->supported = PHY_1000BT_FEATURES; + break; + case SPEED_100: + phy->supported = PHY_100BT_FEATURES; + break; + case SPEED_10: + default: + phy->supported = PHY_10BT_FEATURES; + } ret = phy_device_register(phy); if (ret) { diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index f721444c2..5de8d5827 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -48,8 +48,11 @@ #define MII_M1011_IMASK_CLEAR 0x0000 #define MII_M1011_PHY_SCR 0x10 +#define MII_M1011_PHY_SCR_MDI 0x0000 +#define MII_M1011_PHY_SCR_MDI_X 0x0020 #define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060 +#define MII_M1145_PHY_EXT_ADDR_PAGE 0x16 #define MII_M1145_PHY_EXT_SR 0x1b #define MII_M1145_PHY_EXT_CR 0x14 #define MII_M1145_RGMII_RX_DELAY 0x0080 @@ -159,6 +162,43 @@ static int marvell_config_intr(struct phy_device *phydev) return err; } +static int marvell_set_polarity(struct phy_device *phydev, int polarity) +{ + int reg; + int err; + int val; + + /* get the current settings */ + reg = phy_read(phydev, MII_M1011_PHY_SCR); + if (reg < 0) + return reg; + + val = reg; + val &= ~MII_M1011_PHY_SCR_AUTO_CROSS; + switch (polarity) { + case ETH_TP_MDI: + val |= MII_M1011_PHY_SCR_MDI; + break; + case ETH_TP_MDI_X: + val |= MII_M1011_PHY_SCR_MDI_X; + break; + case ETH_TP_MDI_AUTO: + case ETH_TP_MDI_INVALID: + default: + val |= MII_M1011_PHY_SCR_AUTO_CROSS; + break; + } + + if (val != reg) { + /* Set the new polarity value in the register */ + err = phy_write(phydev, MII_M1011_PHY_SCR, val); + if (err) + return err; + } + + return 0; +} + static int marvell_config_aneg(struct phy_device *phydev) { int err; @@ -191,8 +231,7 @@ static int marvell_config_aneg(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_M1011_PHY_SCR, - MII_M1011_PHY_SCR_AUTO_CROSS); + err = marvell_set_polarity(phydev, phydev->mdix); if (err < 0) return err; @@ -514,6 +553,16 @@ static int m88e1111_config_init(struct phy_device *phydev) err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); if (err < 0) return err; + + /* make sure copper is selected */ + err = phy_read(phydev, MII_M1145_PHY_EXT_ADDR_PAGE); + if (err < 0) + return err; + + err = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, + err & (~0xff)); + if (err < 0) + return err; } if (phydev->interface == PHY_INTERFACE_MODE_RTBI) { @@ -736,6 +785,7 @@ static int marvell_read_status(struct phy_device *phydev) int adv; int err; int lpa; + int lpagb; int status = 0; /* Update the link, but return if there @@ -753,10 +803,17 @@ static int marvell_read_status(struct phy_device *phydev) if (lpa < 0) return lpa; + lpagb = phy_read(phydev, MII_STAT1000); + if (lpagb < 0) + return lpagb; + adv = phy_read(phydev, MII_ADVERTISE); if (adv < 0) return adv; + phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) | + mii_lpa_to_ethtool_lpa_t(lpa); + lpa &= adv; if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) @@ -804,6 +861,7 @@ static int marvell_read_status(struct phy_device *phydev) phydev->speed = SPEED_10; phydev->pause = phydev->asym_pause = 0; + phydev->lp_advertising = 0; } return 0; diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 6a52a7f0f..4bde5e728 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -244,6 +244,7 @@ static const struct of_device_id unimac_mdio_ids[] = { { .compatible = "brcm,unimac-mdio", }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, unimac_mdio_ids); static struct platform_driver unimac_mdio_driver = { .driver = { diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 7dc21e56a..3bc9f0334 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -261,6 +261,7 @@ static const struct of_device_id mdio_gpio_of_match[] = { { .compatible = "virtual,mdio-gpio", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, mdio_gpio_of_match); static struct platform_driver mdio_gpio_driver = { .probe = mdio_gpio_probe, diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c index 2377c1341..7fde454fb 100644 --- a/drivers/net/phy/mdio-mux-mmioreg.c +++ b/drivers/net/phy/mdio-mux-mmioreg.c @@ -113,12 +113,14 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev) if (!iprop || len != sizeof(uint32_t)) { dev_err(&pdev->dev, "mdio-mux child node %s is " "missing a 'reg' property\n", np2->full_name); + of_node_put(np2); return -ENODEV; } if (be32_to_cpup(iprop) & ~s->mask) { dev_err(&pdev->dev, "mdio-mux child node %s has " "a 'reg' value with unmasked bits\n", np2->full_name); + of_node_put(np2); return -ENODEV; } } diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index 4d4d25efc..908e8d486 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c @@ -113,18 +113,18 @@ int mdio_mux_init(struct device *dev, if (!parent_bus_node) return -ENODEV; - parent_bus = of_mdio_find_bus(parent_bus_node); - if (parent_bus == NULL) { - ret_val = -EPROBE_DEFER; - goto err_parent_bus; - } - pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); if (pb == NULL) { ret_val = -ENOMEM; goto err_parent_bus; } + parent_bus = of_mdio_find_bus(parent_bus_node); + if (parent_bus == NULL) { + ret_val = -EPROBE_DEFER; + goto err_parent_bus; + } + pb->switch_data = data; pb->switch_fn = switch_fn; pb->current_child = -1; @@ -144,6 +144,7 @@ int mdio_mux_init(struct device *dev, dev_err(dev, "Error: Failed to allocate memory for child\n"); ret_val = -ENOMEM; + of_node_put(child_bus_node); break; } cb->bus_number = v; @@ -173,6 +174,10 @@ int mdio_mux_init(struct device *dev, dev_info(dev, "Version " DRV_VERSION "\n"); return 0; } + + /* balance the reference of_mdio_find_bus() took */ + put_device(&pb->mii_bus->dev); + err_parent_bus: of_node_put(parent_bus_node); return ret_val; @@ -189,6 +194,9 @@ void mdio_mux_uninit(void *mux_handle) mdiobus_free(cb->mii_bus); cb = cb->next; } + + /* balance the reference of_mdio_find_bus() in mdio_mux_init() took */ + put_device(&pb->mii_bus->dev); } EXPORT_SYMBOL_GPL(mdio_mux_uninit); diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c index c838ad615..fcf4e4df7 100644 --- a/drivers/net/phy/mdio-octeon.c +++ b/drivers/net/phy/mdio-octeon.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -14,11 +15,12 @@ #include #include +#ifdef CONFIG_CAVIUM_OCTEON_SOC #include -#include +#endif -#define DRV_VERSION "1.0" -#define DRV_DESCRIPTION "Cavium Networks Octeon SMI/MDIO driver" +#define DRV_VERSION "1.1" +#define DRV_DESCRIPTION "Cavium Networks Octeon/ThunderX SMI/MDIO driver" #define SMI_CMD 0x0 #define SMI_WR_DAT 0x8 @@ -26,6 +28,79 @@ #define SMI_CLK 0x18 #define SMI_EN 0x20 +#ifdef __BIG_ENDIAN_BITFIELD +#define OCT_MDIO_BITFIELD_FIELD(field, more) \ + field; \ + more + +#else +#define OCT_MDIO_BITFIELD_FIELD(field, more) \ + more \ + field; + +#endif + +union cvmx_smix_clk { + u64 u64; + struct cvmx_smix_clk_s { + OCT_MDIO_BITFIELD_FIELD(u64 reserved_25_63:39, + OCT_MDIO_BITFIELD_FIELD(u64 mode:1, + OCT_MDIO_BITFIELD_FIELD(u64 reserved_21_23:3, + OCT_MDIO_BITFIELD_FIELD(u64 sample_hi:5, + OCT_MDIO_BITFIELD_FIELD(u64 sample_mode:1, + OCT_MDIO_BITFIELD_FIELD(u64 reserved_14_14:1, + OCT_MDIO_BITFIELD_FIELD(u64 clk_idle:1, + OCT_MDIO_BITFIELD_FIELD(u64 preamble:1, + OCT_MDIO_BITFIELD_FIELD(u64 sample:4, + OCT_MDIO_BITFIELD_FIELD(u64 phase:8, + ;)))))))))) + } s; +}; + +union cvmx_smix_cmd { + u64 u64; + struct cvmx_smix_cmd_s { + OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46, + OCT_MDIO_BITFIELD_FIELD(u64 phy_op:2, + OCT_MDIO_BITFIELD_FIELD(u64 reserved_13_15:3, + OCT_MDIO_BITFIELD_FIELD(u64 phy_adr:5, + OCT_MDIO_BITFIELD_FIELD(u64 reserved_5_7:3, + OCT_MDIO_BITFIELD_FIELD(u64 reg_adr:5, + ;)))))) + } s; +}; + +union cvmx_smix_en { + u64 u64; + struct cvmx_smix_en_s { + OCT_MDIO_BITFIELD_FIELD(u64 reserved_1_63:63, + OCT_MDIO_BITFIELD_FIELD(u64 en:1, + ;)) + } s; +}; + +union cvmx_smix_rd_dat { + u64 u64; + struct cvmx_smix_rd_dat_s { + OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46, + OCT_MDIO_BITFIELD_FIELD(u64 pending:1, + OCT_MDIO_BITFIELD_FIELD(u64 val:1, + OCT_MDIO_BITFIELD_FIELD(u64 dat:16, + ;)))) + } s; +}; + +union cvmx_smix_wr_dat { + u64 u64; + struct cvmx_smix_wr_dat_s { + OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46, + OCT_MDIO_BITFIELD_FIELD(u64 pending:1, + OCT_MDIO_BITFIELD_FIELD(u64 val:1, + OCT_MDIO_BITFIELD_FIELD(u64 dat:16, + ;)))) + } s; +}; + enum octeon_mdiobus_mode { UNINIT = 0, C22, @@ -41,6 +116,21 @@ struct octeon_mdiobus { int phy_irq[PHY_MAX_ADDR]; }; +#ifdef CONFIG_CAVIUM_OCTEON_SOC +static void oct_mdio_writeq(u64 val, u64 addr) +{ + cvmx_write_csr(addr, val); +} + +static u64 oct_mdio_readq(u64 addr) +{ + return cvmx_read_csr(addr); +} +#else +#define oct_mdio_writeq(val, addr) writeq_relaxed(val, (void *)addr) +#define oct_mdio_readq(addr) readq_relaxed((void *)addr) +#endif + static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p, enum octeon_mdiobus_mode m) { @@ -49,10 +139,10 @@ static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p, if (m == p->mode) return; - smi_clk.u64 = cvmx_read_csr(p->register_base + SMI_CLK); + smi_clk.u64 = oct_mdio_readq(p->register_base + SMI_CLK); smi_clk.s.mode = (m == C45) ? 1 : 0; smi_clk.s.preamble = 1; - cvmx_write_csr(p->register_base + SMI_CLK, smi_clk.u64); + oct_mdio_writeq(smi_clk.u64, p->register_base + SMI_CLK); p->mode = m; } @@ -67,7 +157,7 @@ static int octeon_mdiobus_c45_addr(struct octeon_mdiobus *p, smi_wr.u64 = 0; smi_wr.s.dat = regnum & 0xffff; - cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64); + oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT); regnum = (regnum >> 16) & 0x1f; @@ -75,14 +165,14 @@ static int octeon_mdiobus_c45_addr(struct octeon_mdiobus *p, smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */ smi_cmd.s.phy_adr = phy_id; smi_cmd.s.reg_adr = regnum; - cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64); + oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); do { /* Wait 1000 clocks so we don't saturate the RSL bus * doing reads. */ __delay(1000); - smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT); + smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT); } while (smi_wr.s.pending && --timeout); if (timeout <= 0) @@ -114,14 +204,14 @@ static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum) smi_cmd.s.phy_op = op; smi_cmd.s.phy_adr = phy_id; smi_cmd.s.reg_adr = regnum; - cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64); + oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); do { /* Wait 1000 clocks so we don't saturate the RSL bus * doing reads. */ __delay(1000); - smi_rd.u64 = cvmx_read_csr(p->register_base + SMI_RD_DAT); + smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT); } while (smi_rd.s.pending && --timeout); if (smi_rd.s.val) @@ -153,20 +243,20 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id, smi_wr.u64 = 0; smi_wr.s.dat = val; - cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64); + oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT); smi_cmd.u64 = 0; smi_cmd.s.phy_op = op; smi_cmd.s.phy_adr = phy_id; smi_cmd.s.reg_adr = regnum; - cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64); + oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); do { /* Wait 1000 clocks so we don't saturate the RSL bus * doing reads. */ __delay(1000); - smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT); + smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT); } while (smi_wr.s.pending && --timeout); if (timeout <= 0) @@ -187,30 +277,34 @@ static int octeon_mdiobus_probe(struct platform_device *pdev) return -ENOMEM; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res_mem == NULL) { dev_err(&pdev->dev, "found no memory resource\n"); - err = -ENXIO; - goto fail; + return -ENXIO; } + bus->mdio_phys = res_mem->start; bus->regsize = resource_size(res_mem); + if (!devm_request_mem_region(&pdev->dev, bus->mdio_phys, bus->regsize, res_mem->name)) { dev_err(&pdev->dev, "request_mem_region failed\n"); - goto fail; + return -ENXIO; } + bus->register_base = (u64)devm_ioremap(&pdev->dev, bus->mdio_phys, bus->regsize); + if (!bus->register_base) { + dev_err(&pdev->dev, "dev_ioremap failed\n"); + return -ENOMEM; + } bus->mii_bus = mdiobus_alloc(); - if (!bus->mii_bus) goto fail; smi_en.u64 = 0; smi_en.s.en = 1; - cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64); + oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); bus->mii_bus->priv = bus; bus->mii_bus->irq = bus->phy_irq; @@ -234,7 +328,7 @@ fail_register: mdiobus_free(bus->mii_bus); fail: smi_en.u64 = 0; - cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64); + oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); return err; } @@ -248,7 +342,7 @@ static int octeon_mdiobus_remove(struct platform_device *pdev) mdiobus_unregister(bus->mii_bus); mdiobus_free(bus->mii_bus); smi_en.u64 = 0; - cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64); + oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); return 0; } diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 02a4615b6..12f44c53c 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -167,7 +167,9 @@ static int of_mdio_bus_match(struct device *dev, const void *mdio_bus_np) * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. * @mdio_bus_np: Pointer to the mii_bus. * - * Returns a pointer to the mii_bus, or NULL if none found. + * Returns a reference to the mii_bus, or NULL if none found. The + * embedded struct device will have its reference count incremented, + * and this must be put once the bus is finished with. * * Because the association of a device_node and mii_bus is made via * of_mdiobus_register(), the mii_bus cannot be found before it is @@ -234,15 +236,18 @@ static inline void of_mdiobus_link_phydev(struct mii_bus *mdio, #endif /** - * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus + * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus * @bus: target mii_bus + * @owner: module containing bus accessor functions * * Description: Called by a bus driver to bring up all the PHYs - * on a given bus, and attach them to the bus. + * on a given bus, and attach them to the bus. Drivers should use + * mdiobus_register() rather than __mdiobus_register() unless they + * need to pass a specific owner module. * * Returns 0 on success or < 0 on error. */ -int mdiobus_register(struct mii_bus *bus) +int __mdiobus_register(struct mii_bus *bus, struct module *owner) { int i, err; @@ -253,6 +258,7 @@ int mdiobus_register(struct mii_bus *bus) BUG_ON(bus->state != MDIOBUS_ALLOCATED && bus->state != MDIOBUS_UNREGISTERED); + bus->owner = owner; bus->dev.parent = bus->parent; bus->dev.class = &mdio_bus_class; bus->dev.groups = NULL; @@ -288,13 +294,16 @@ int mdiobus_register(struct mii_bus *bus) error: while (--i >= 0) { - if (bus->phy_map[i]) - device_unregister(&bus->phy_map[i]->dev); + struct phy_device *phydev = bus->phy_map[i]; + if (phydev) { + phy_device_remove(phydev); + phy_device_free(phydev); + } } device_del(&bus->dev); return err; } -EXPORT_SYMBOL(mdiobus_register); +EXPORT_SYMBOL(__mdiobus_register); void mdiobus_unregister(struct mii_bus *bus) { @@ -304,9 +313,11 @@ void mdiobus_unregister(struct mii_bus *bus) bus->state = MDIOBUS_UNREGISTERED; for (i = 0; i < PHY_MAX_ADDR; i++) { - if (bus->phy_map[i]) - device_unregister(&bus->phy_map[i]->dev); - bus->phy_map[i] = NULL; + struct phy_device *phydev = bus->phy_map[i]; + if (phydev) { + phy_device_remove(phydev); + phy_device_free(phydev); + } } device_del(&bus->dev); } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 499185eaf..cf6312faf 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -514,6 +514,27 @@ static int ksz8873mll_read_status(struct phy_device *phydev) return 0; } +static int ksz9031_read_status(struct phy_device *phydev) +{ + int err; + int regval; + + err = genphy_read_status(phydev); + if (err) + return err; + + /* Make sure the PHY is not broken. Read idle error count, + * and reset the PHY if it is maxed out. + */ + regval = phy_read(phydev, MII_STAT1000); + if ((regval & 0xFF) == 0xFF) { + phy_init_hw(phydev); + phydev->link = 0; + } + + return 0; +} + static int ksz8873mll_config_aneg(struct phy_device *phydev) { return 0; @@ -772,7 +793,7 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz9021_type, .config_init = ksz9031_config_init, .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, + .read_status = ksz9031_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .suspend = genphy_suspend, diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c new file mode 100644 index 000000000..c0a20ebd0 --- /dev/null +++ b/drivers/net/phy/microchip.c @@ -0,0 +1,148 @@ +/* + * Copyright (C) 2015 Microchip Technology + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +#include +#include +#include +#include +#include +#include + +#define DRIVER_AUTHOR "WOOJUNG HUH " +#define DRIVER_DESC "Microchip LAN88XX PHY driver" + +struct lan88xx_priv { + int chip_id; + int chip_rev; + __u32 wolopts; +}; + +static int lan88xx_phy_config_intr(struct phy_device *phydev) +{ + int rc; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + /* unmask all source and clear them before enable */ + rc = phy_write(phydev, LAN88XX_INT_MASK, 0x7FFF); + rc = phy_read(phydev, LAN88XX_INT_STS); + rc = phy_write(phydev, LAN88XX_INT_MASK, + LAN88XX_INT_MASK_MDINTPIN_EN_ | + LAN88XX_INT_MASK_LINK_CHANGE_); + } else { + rc = phy_write(phydev, LAN88XX_INT_MASK, 0); + } + + return rc < 0 ? rc : 0; +} + +static int lan88xx_phy_ack_interrupt(struct phy_device *phydev) +{ + int rc = phy_read(phydev, LAN88XX_INT_STS); + + return rc < 0 ? rc : 0; +} + +int lan88xx_suspend(struct phy_device *phydev) +{ + struct lan88xx_priv *priv = phydev->priv; + + /* do not power down PHY when WOL is enabled */ + if (!priv->wolopts) + genphy_suspend(phydev); + + return 0; +} + +static int lan88xx_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->dev; + struct lan88xx_priv *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->wolopts = 0; + + /* these values can be used to identify internal PHY */ + priv->chip_id = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_ID, + 3, phydev->addr); + priv->chip_rev = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_REV, + 3, phydev->addr); + + phydev->priv = priv; + + return 0; +} + +static void lan88xx_remove(struct phy_device *phydev) +{ + struct device *dev = &phydev->dev; + struct lan88xx_priv *priv = phydev->priv; + + if (priv) + devm_kfree(dev, priv); +} + +static int lan88xx_set_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + struct lan88xx_priv *priv = phydev->priv; + + priv->wolopts = wol->wolopts; + + return 0; +} + +static struct phy_driver microchip_phy_driver[] = { +{ + .phy_id = 0x0007c130, + .phy_id_mask = 0xfffffff0, + .name = "Microchip LAN88xx", + + .features = (PHY_GBIT_FEATURES | + SUPPORTED_Pause | SUPPORTED_Asym_Pause), + .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, + + .probe = lan88xx_probe, + .remove = lan88xx_remove, + + .config_init = genphy_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + + .ack_interrupt = lan88xx_phy_ack_interrupt, + .config_intr = lan88xx_phy_config_intr, + + .suspend = lan88xx_suspend, + .resume = genphy_resume, + .set_wol = lan88xx_set_wol, + + .driver = { .owner = THIS_MODULE, } +} }; + +module_phy_driver(microchip_phy_driver); + +static struct mdio_device_id __maybe_unused microchip_tbl[] = { + { 0x0007c130, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, microchip_tbl); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 34fe339f4..adb48abaf 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -353,6 +353,8 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) phydev->duplex = cmd->duplex; + phydev->mdix = cmd->eth_tp_mdix_ctrl; + /* Restart the PHY */ phy_start_aneg(phydev); @@ -377,6 +379,7 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd) cmd->transceiver = phy_is_internal(phydev) ? XCVR_INTERNAL : XCVR_EXTERNAL; cmd->autoneg = phydev->autoneg; + cmd->eth_tp_mdix_ctrl = phydev->mdix; return 0; } @@ -1037,7 +1040,7 @@ int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, struct phy_driver *phydrv = phydev->drv; int value = -1; - if (phydrv->read_mmd_indirect == NULL) { + if (!phydrv->read_mmd_indirect) { struct mii_bus *bus = phydev->bus; mutex_lock(&bus->mdio_lock); @@ -1074,7 +1077,7 @@ void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, { struct phy_driver *phydrv = phydev->drv; - if (phydrv->write_mmd_indirect == NULL) { + if (!phydrv->write_mmd_indirect) { struct mii_bus *bus = phydev->bus; mutex_lock(&bus->mdio_lock); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 55f01788d..f761288ab 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -156,8 +156,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, /* We allocate the device, and initialize the default values */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (NULL == dev) - return (struct phy_device *)PTR_ERR((void *)-ENOMEM); + if (!dev) + return ERR_PTR(-ENOMEM); dev->dev.release = phy_device_release; @@ -178,7 +178,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, dev->bus = bus; dev->dev.parent = &bus->dev; dev->dev.bus = &mdio_bus_type; - dev->irq = bus->irq != NULL ? bus->irq[addr] : PHY_POLL; + dev->irq = bus->irq ? bus->irq[addr] : PHY_POLL; dev_set_name(&dev->dev, PHY_ID_FMT, bus->id, addr); dev->state = PHY_DOWN; @@ -383,6 +383,24 @@ int phy_device_register(struct phy_device *phydev) } EXPORT_SYMBOL(phy_device_register); +/** + * phy_device_remove - Remove a previously registered phy device from the MDIO bus + * @phydev: phy_device structure to remove + * + * This doesn't free the phy_device itself, it merely reverses the effects + * of phy_device_register(). Use phy_device_free() to free the device + * after calling this function. + */ +void phy_device_remove(struct phy_device *phydev) +{ + struct mii_bus *bus = phydev->bus; + int addr = phydev->addr; + + device_del(&phydev->dev); + bus->phy_map[addr] = NULL; +} +EXPORT_SYMBOL(phy_device_remove); + /** * phy_find_first - finds the first PHY device on the bus * @bus: the target MII bus @@ -578,18 +596,26 @@ EXPORT_SYMBOL(phy_init_hw); * generic driver is used. The phy_device is given a ptr to * the attaching device, and given a callback for link status * change. The phy_device is returned to the attaching driver. + * This function takes a reference on the phy device. */ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, u32 flags, phy_interface_t interface) { + struct mii_bus *bus = phydev->bus; struct device *d = &phydev->dev; - struct module *bus_module; int err; + if (!try_module_get(bus->owner)) { + dev_err(&dev->dev, "failed to get the bus module\n"); + return -EIO; + } + + get_device(d); + /* Assume that if there is no driver, that it doesn't * exist, and we should use the genphy driver. */ - if (NULL == d->driver) { + if (!d->driver) { if (phydev->is_c45) d->driver = &genphy_driver[GENPHY_DRV_10G].driver; else @@ -600,20 +626,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, err = device_bind_driver(d); if (err) - return err; + goto error; } if (phydev->attached_dev) { dev_err(&dev->dev, "PHY already attached\n"); - return -EBUSY; - } - - /* Increment the bus module reference count */ - bus_module = phydev->bus->dev.driver ? - phydev->bus->dev.driver->owner : NULL; - if (!try_module_get(bus_module)) { - dev_err(&dev->dev, "failed to get the bus module\n"); - return -EIO; + err = -EBUSY; + goto error; } phydev->attached_dev = dev; @@ -636,6 +655,11 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, phy_resume(phydev); return err; + +error: + put_device(d); + module_put(bus->owner); + return err; } EXPORT_SYMBOL(phy_attach_direct); @@ -677,14 +701,15 @@ EXPORT_SYMBOL(phy_attach); /** * phy_detach - detach a PHY device from its network device * @phydev: target phy_device struct + * + * This detaches the phy device from its network device and the phy + * driver, and drops the reference count taken in phy_attach_direct(). */ void phy_detach(struct phy_device *phydev) { + struct mii_bus *bus; int i; - if (phydev->bus->dev.driver) - module_put(phydev->bus->dev.driver->owner); - phydev->attached_dev->phydev = NULL; phydev->attached_dev = NULL; phy_suspend(phydev); @@ -700,6 +725,15 @@ void phy_detach(struct phy_device *phydev) break; } } + + /* + * The phydev might go away on the put_device() below, so avoid + * a use-after-free bug by reading the underlying bus first. + */ + bus = phydev->bus; + + put_device(&phydev->dev); + module_put(bus->owner); } EXPORT_SYMBOL(phy_detach); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 45353613b..43ab69136 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -136,6 +136,19 @@ static struct phy_driver realtek_drvs[] = { .ack_interrupt = &rtl821x_ack_interrupt, .config_intr = &rtl8211b_config_intr, .driver = { .owner = THIS_MODULE,}, + }, { + .phy_id = 0x001cc914, + .name = "RTL8211DN Gigabit Ethernet", + .phy_id_mask = 0x001fffff, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = rtl821x_ack_interrupt, + .config_intr = rtl8211e_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, + .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x001cc915, .name = "RTL8211E Gigabit Ethernet", @@ -170,6 +183,7 @@ module_phy_driver(realtek_drvs); static struct mdio_device_id __maybe_unused realtek_tbl[] = { { 0x001cc912, 0x001fffff }, + { 0x001cc914, 0x001fffff }, { 0x001cc915, 0x001fffff }, { 0x001cc916, 0x001fffff }, { } diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 70b089587..dc2da8770 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -43,16 +43,25 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev) static int smsc_phy_config_init(struct phy_device *phydev) { + int __maybe_unused len; + struct device *dev __maybe_unused = &phydev->dev; + struct device_node *of_node __maybe_unused = dev->of_node; int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); + int enable_energy = 1; if (rc < 0) return rc; - /* Enable energy detect mode for this SMSC Transceivers */ - rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, - rc | MII_LAN83C185_EDPWRDOWN); - if (rc < 0) - return rc; + if (of_find_property(of_node, "smsc,disable-energy-detect", &len)) + enable_energy = 0; + + if (enable_energy) { + /* Enable energy detect mode for this SMSC Transceivers */ + rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, + rc | MII_LAN83C185_EDPWRDOWN); + if (rc < 0) + return rc; + } return smsc_phy_ack_interrupt(phydev); } diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index 465301592..f091d691c 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -209,8 +209,6 @@ static int ks8995_reset(struct ks8995_switch *ks) return ks8995_start(ks); } -/* ------------------------------------------------------------------------ */ - static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { @@ -220,19 +218,9 @@ static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj, dev = container_of(kobj, struct device, kobj); ks8995 = dev_get_drvdata(dev); - if (unlikely(off > ks8995->regs_attr.size)) - return 0; - - if ((off + count) > ks8995->regs_attr.size) - count = ks8995->regs_attr.size - off; - - if (unlikely(!count)) - return count; - return ks8995_read(ks8995, buf, off, count); } - static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { @@ -242,19 +230,9 @@ static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj, dev = container_of(kobj, struct device, kobj); ks8995 = dev_get_drvdata(dev); - if (unlikely(off >= ks8995->regs_attr.size)) - return -EFBIG; - - if ((off + count) > ks8995->regs_attr.size) - count = ks8995->regs_attr.size - off; - - if (unlikely(!count)) - return count; - return ks8995_write(ks8995, buf, off, count); } - static const struct bin_attribute ks8995_registers_attr = { .attr = { .name = "registers", diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c new file mode 100644 index 000000000..91e1bec60 --- /dev/null +++ b/drivers/net/phy/teranetics.c @@ -0,0 +1,135 @@ +/* + * Driver for Teranetics PHY + * + * Author: Shaohui Xie + * + * Copyright 2015 Freescale Semiconductor, Inc. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("Teranetics PHY driver"); +MODULE_AUTHOR("Shaohui Xie "); +MODULE_LICENSE("GPL v2"); + +#define PHY_ID_TN2020 0x00a19410 +#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001 +#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002 +#define MDIO_PHYXS_LNSTAT_SYNC2 0x0004 +#define MDIO_PHYXS_LNSTAT_SYNC3 0x0008 +#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000 + +#define MDIO_PHYXS_LANE_READY (MDIO_PHYXS_LNSTAT_SYNC0 | \ + MDIO_PHYXS_LNSTAT_SYNC1 | \ + MDIO_PHYXS_LNSTAT_SYNC2 | \ + MDIO_PHYXS_LNSTAT_SYNC3 | \ + MDIO_PHYXS_LNSTAT_ALIGN) + +static int teranetics_config_init(struct phy_device *phydev) +{ + phydev->supported = SUPPORTED_10000baseT_Full; + phydev->advertising = SUPPORTED_10000baseT_Full; + + return 0; +} + +static int teranetics_soft_reset(struct phy_device *phydev) +{ + return 0; +} + +static int teranetics_aneg_done(struct phy_device *phydev) +{ + int reg; + + /* auto negotiation state can only be checked when using copper + * port, if using fiber port, just lie it's done. + */ + if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) { + reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE); + } + + return 1; +} + +static int teranetics_config_aneg(struct phy_device *phydev) +{ + return 0; +} + +static int teranetics_read_status(struct phy_device *phydev) +{ + int reg; + + phydev->link = 1; + + phydev->speed = SPEED_10000; + phydev->duplex = DUPLEX_FULL; + + if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) { + reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_LNSTAT); + if (reg < 0 || + !((reg & MDIO_PHYXS_LANE_READY) == MDIO_PHYXS_LANE_READY)) { + phydev->link = 0; + return 0; + } + + reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS)) + phydev->link = 0; + } + + return 0; +} + +static int teranetics_match_phy_device(struct phy_device *phydev) +{ + return phydev->c45_ids.device_ids[3] == PHY_ID_TN2020; +} + +static struct phy_driver teranetics_driver[] = { +{ + .phy_id = PHY_ID_TN2020, + .phy_id_mask = 0xffffffff, + .name = "Teranetics TN2020", + .soft_reset = teranetics_soft_reset, + .aneg_done = teranetics_aneg_done, + .config_init = teranetics_config_init, + .config_aneg = teranetics_config_aneg, + .read_status = teranetics_read_status, + .match_phy_device = teranetics_match_phy_device, + .driver = { .owner = THIS_MODULE,}, +}, +}; + +static int __init teranetics_init(void) +{ + return phy_drivers_register(teranetics_driver, + ARRAY_SIZE(teranetics_driver)); +} + +static void __exit teranetics_exit(void) +{ + return phy_drivers_unregister(teranetics_driver, + ARRAY_SIZE(teranetics_driver)); +} + +module_init(teranetics_init); +module_exit(teranetics_exit); + +static struct mdio_device_id __maybe_unused teranetics_tbl[] = { + { PHY_ID_TN2020, 0xffffffff }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, teranetics_tbl); diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 17cad1851..76cad712d 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -66,7 +66,6 @@ #define PHY_ID_VSC8244 0x000fc6c0 #define PHY_ID_VSC8514 0x00070670 #define PHY_ID_VSC8574 0x000704a0 -#define PHY_ID_VSC8641 0x00070431 #define PHY_ID_VSC8662 0x00070660 #define PHY_ID_VSC8221 0x000fc550 #define PHY_ID_VSC8211 0x000fc4b0 @@ -272,18 +271,6 @@ static struct phy_driver vsc82xx_driver[] = { .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, .driver = { .owner = THIS_MODULE,}, -}, { - .phy_id = PHY_ID_VSC8641, - .name = "Vitesse VSC8641", - .phy_id_mask = 0x000ffff0, - .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, - .config_init = &vsc824x_config_init, - .config_aneg = &vsc82x4_config_aneg, - .read_status = &genphy_read_status, - .ack_interrupt = &vsc824x_ack_interrupt, - .config_intr = &vsc82xx_config_intr, - .driver = { .owner = THIS_MODULE,}, }, { .phy_id = PHY_ID_VSC8662, .name = "Vitesse VSC8662", @@ -331,7 +318,6 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = { { PHY_ID_VSC8244, 0x000fffc0 }, { PHY_ID_VSC8514, 0x000ffff0 }, { PHY_ID_VSC8574, 0x000ffff0 }, - { PHY_ID_VSC8641, 0x000ffff0 }, { PHY_ID_VSC8662, 0x000ffff0 }, { PHY_ID_VSC8221, 0x000ffff0 }, { PHY_ID_VSC8211, 0x000ffff0 }, diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 487be20b6..ed0044675 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -283,6 +283,8 @@ static int unit_set(struct idr *p, void *ptr, int n); static void unit_put(struct idr *p, int n); static void *unit_find(struct idr *p, int n); +static const struct net_device_ops ppp_netdev_ops; + static struct class *ppp_class; /* per net-namespace data */ @@ -919,13 +921,22 @@ static __net_init int ppp_init_net(struct net *net) static __net_exit void ppp_exit_net(struct net *net) { struct ppp_net *pn = net_generic(net, ppp_net_id); + struct net_device *dev; + struct net_device *aux; struct ppp *ppp; LIST_HEAD(list); int id; rtnl_lock(); + for_each_netdev_safe(net, dev, aux) { + if (dev->netdev_ops == &ppp_netdev_ops) + unregister_netdevice_queue(dev, &list); + } + idr_for_each_entry(&pn->units_idr, ppp, id) - unregister_netdevice_queue(ppp->dev, &list); + /* Skip devices already unregistered by previous loop */ + if (!net_eq(dev_net(ppp->dev), net)) + unregister_netdevice_queue(ppp->dev, &list); unregister_netdevice_many(&list); rtnl_unlock(); @@ -1017,6 +1028,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) proto = npindex_to_proto[npi]; put_unaligned_be16(proto, pp); + skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev))); skb_queue_tail(&ppp->file.xq, skb); ppp_xmit_process(ppp); return NETDEV_TX_OK; @@ -1137,7 +1149,6 @@ static void ppp_setup(struct net_device *dev) dev->tx_queue_len = 3; dev->type = ARPHRD_PPP; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; - dev->features |= NETIF_F_NETNS_LOCAL; netif_keep_dst(dev); } @@ -1900,6 +1911,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) skb->dev = ppp->dev; skb->protocol = htons(npindex_to_ethertype[npi]); skb_reset_mac_header(skb); + skb_scrub_packet(skb, !net_eq(ppp->ppp_net, + dev_net(ppp->dev))); netif_rx(skb); } } diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 2ed75060d..5e0b43283 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -589,7 +589,7 @@ static int pppoe_release(struct socket *sock) po = pppox_sk(sk); - if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { + if (po->pppoe_dev) { dev_put(po->pppoe_dev); po->pppoe_dev = NULL; } diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index dac7a0d9b..01f08a775 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -396,7 +396,7 @@ static int rionet_close(struct net_device *ndev) return 0; } -static int rionet_remove_dev(struct device *dev, struct subsys_interface *sif) +static void rionet_remove_dev(struct device *dev, struct subsys_interface *sif) { struct rio_dev *rdev = to_rio_dev(dev); unsigned char netid = rdev->net->hport->id; @@ -416,8 +416,6 @@ static int rionet_remove_dev(struct device *dev, struct subsys_interface *sif) } } } - - return 0; } static void rionet_get_drvinfo(struct net_device *ndev, diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index daa054b3f..651d35ea2 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2051,9 +2051,9 @@ static void team_setup(struct net_device *dev) dev->netdev_ops = &team_netdev_ops; dev->ethtool_ops = &team_ethtool_ops; dev->destructor = team_destructor; - dev->tx_queue_len = 0; dev->flags |= IFF_MULTICAST; dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); + dev->priv_flags |= IFF_NO_QUEUE; /* * Indicate we support unicast address filtering. That way core won't diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 06a039414..976aa9704 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -961,6 +961,7 @@ static const struct net_device_ops tap_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = tun_poll_controller, #endif + .ndo_features_check = passthru_features_check, }; static void tun_flow_init(struct tun_struct *tun) diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 7ba8d0885..e66805eef 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -106,6 +106,16 @@ config USB_RTL8152 To compile this driver as a module, choose M here: the module will be called r8152. +config USB_LAN78XX + tristate "Microchip LAN78XX Based USB Ethernet Adapters" + select MII + help + This option adds support for Microchip LAN78XX based USB 2 + & USB 3 10/100/1000 Ethernet adapters. + + To compile this driver as a module, choose M here: the + module will be called lan78xx. + config USB_USBNET tristate "Multi-purpose USB Networking Framework" select MII @@ -154,6 +164,7 @@ config USB_NET_AX8817X * Aten UC210T * ASIX AX88172 * Billionton Systems, USB2AR + * Billionton Systems, GUSB2AM-1G-B * Buffalo LUA-U2-KTX * Corega FEther USB2-TX * D-Link DUB-E100 @@ -573,4 +584,15 @@ config USB_VL600 http://ubuntuforums.org/showpost.php?p=10589647&postcount=17 +config USB_NET_CH9200 + tristate "QingHeng CH9200 USB ethernet support" + depends on USB_USBNET + select MII + help + Choose this option if you have a USB ethernet adapter with a QinHeng + CH9200 chipset. + + To compile this driver as a module, choose M here: the + module will be called ch9200. + endif # USB_NET_DRIVERS diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index e2797f1e1..b5f04068d 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_USB_PEGASUS) += pegasus.o obj-$(CONFIG_USB_RTL8150) += rtl8150.o obj-$(CONFIG_USB_RTL8152) += r8152.o obj-$(CONFIG_USB_HSO) += hso.o +obj-$(CONFIG_USB_LAN78XX) += lan78xx.o obj-$(CONFIG_USB_NET_AX8817X) += asix.o asix-y := asix_devices.o asix_common.o ax88172a.o obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o @@ -37,4 +38,4 @@ obj-$(CONFIG_USB_NET_HUAWEI_CDC_NCM) += huawei_cdc_ncm.o obj-$(CONFIG_USB_VL600) += lg-vl600.o obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o - +obj-$(CONFIG_USB_NET_CH9200) += ch9200.o diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 75d6f2672..079069a06 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -91,8 +91,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, } rx->ax_skb = netdev_alloc_skb_ip_align(dev->net, rx->size); - if (!rx->ax_skb) + if (!rx->ax_skb) { + rx->size = 0; return 0; + } } if (rx->size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 1173a24fe..5cabefc23 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -958,6 +958,10 @@ static const struct usb_device_id products [] = { // Billionton Systems, USB2AR USB_DEVICE (0x08dd, 0x90ff), .driver_info = (unsigned long) &ax8817x_info, +}, { + // Billionton Systems, GUSB2AM-1G-B + USB_DEVICE(0x08dd, 0x0114), + .driver_info = (unsigned long) &ax88178_info, }, { // ATEN UC210T USB_DEVICE (0x0557, 0x2009), diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c new file mode 100644 index 000000000..5e151e6a3 --- /dev/null +++ b/drivers/net/usb/ch9200.c @@ -0,0 +1,432 @@ +/* + * USB 10M/100M ethernet adapter + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CH9200_VID 0x1A86 +#define CH9200_PID_E092 0xE092 + +#define CTRL_TIMEOUT_MS 1000 + +#define CONTROL_TIMEOUT_MS 1000 + +#define REQUEST_READ 0x0E +#define REQUEST_WRITE 0x0F + +/* Address space: + * 00-63 : MII + * 64-128: MAC + * + * Note: all accesses must be 16-bit + */ + +#define MAC_REG_CTRL 64 +#define MAC_REG_STATUS 66 +#define MAC_REG_INTERRUPT_MASK 68 +#define MAC_REG_PHY_COMMAND 70 +#define MAC_REG_PHY_DATA 72 +#define MAC_REG_STATION_L 74 +#define MAC_REG_STATION_M 76 +#define MAC_REG_STATION_H 78 +#define MAC_REG_HASH_L 80 +#define MAC_REG_HASH_M1 82 +#define MAC_REG_HASH_M2 84 +#define MAC_REG_HASH_H 86 +#define MAC_REG_THRESHOLD 88 +#define MAC_REG_FIFO_DEPTH 90 +#define MAC_REG_PAUSE 92 +#define MAC_REG_FLOW_CONTROL 94 + +/* Control register bits + * + * Note: bits 13 and 15 are reserved + */ +#define LOOPBACK (0x01 << 14) +#define BASE100X (0x01 << 12) +#define MBPS_10 (0x01 << 11) +#define DUPLEX_MODE (0x01 << 10) +#define PAUSE_FRAME (0x01 << 9) +#define PROMISCUOUS (0x01 << 8) +#define MULTICAST (0x01 << 7) +#define BROADCAST (0x01 << 6) +#define HASH (0x01 << 5) +#define APPEND_PAD (0x01 << 4) +#define APPEND_CRC (0x01 << 3) +#define TRANSMITTER_ACTION (0x01 << 2) +#define RECEIVER_ACTION (0x01 << 1) +#define DMA_ACTION (0x01 << 0) + +/* Status register bits + * + * Note: bits 7-15 are reserved + */ +#define ALIGNMENT (0x01 << 6) +#define FIFO_OVER_RUN (0x01 << 5) +#define FIFO_UNDER_RUN (0x01 << 4) +#define RX_ERROR (0x01 << 3) +#define RX_COMPLETE (0x01 << 2) +#define TX_ERROR (0x01 << 1) +#define TX_COMPLETE (0x01 << 0) + +/* FIFO depth register bits + * + * Note: bits 6 and 14 are reserved + */ + +#define ETH_TXBD (0x01 << 15) +#define ETN_TX_FIFO_DEPTH (0x01 << 8) +#define ETH_RXBD (0x01 << 7) +#define ETH_RX_FIFO_DEPTH (0x01 << 0) + +static int control_read(struct usbnet *dev, + unsigned char request, unsigned short value, + unsigned short index, void *data, unsigned short size, + int timeout) +{ + unsigned char *buf = NULL; + unsigned char request_type; + int err = 0; + + if (request == REQUEST_READ) + request_type = (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER); + else + request_type = (USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_DEVICE); + + netdev_dbg(dev->net, "Control_read() index=0x%02x size=%d\n", + index, size); + + buf = kmalloc(size, GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto err_out; + } + + err = usb_control_msg(dev->udev, + usb_rcvctrlpipe(dev->udev, 0), + request, request_type, value, index, buf, size, + timeout); + if (err == size) + memcpy(data, buf, size); + else if (err >= 0) + err = -EINVAL; + kfree(buf); + + return err; + +err_out: + return err; +} + +static int control_write(struct usbnet *dev, unsigned char request, + unsigned short value, unsigned short index, + void *data, unsigned short size, int timeout) +{ + unsigned char *buf = NULL; + unsigned char request_type; + int err = 0; + + if (request == REQUEST_WRITE) + request_type = (USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_OTHER); + else + request_type = (USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE); + + netdev_dbg(dev->net, "Control_write() index=0x%02x size=%d\n", + index, size); + + if (data) { + buf = kmalloc(size, GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto err_out; + } + memcpy(buf, data, size); + } + + err = usb_control_msg(dev->udev, + usb_sndctrlpipe(dev->udev, 0), + request, request_type, value, index, buf, size, + timeout); + if (err >= 0 && err < size) + err = -EINVAL; + kfree(buf); + + return 0; + +err_out: + return err; +} + +static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc) +{ + struct usbnet *dev = netdev_priv(netdev); + unsigned char buff[2]; + + netdev_dbg(netdev, "ch9200_mdio_read phy_id:%02x loc:%02x\n", + phy_id, loc); + + if (phy_id != 0) + return -ENODEV; + + control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02, + CONTROL_TIMEOUT_MS); + + return (buff[0] | buff[1] << 8); +} + +static void ch9200_mdio_write(struct net_device *netdev, + int phy_id, int loc, int val) +{ + struct usbnet *dev = netdev_priv(netdev); + unsigned char buff[2]; + + netdev_dbg(netdev, "ch9200_mdio_write() phy_id=%02x loc:%02x\n", + phy_id, loc); + + if (phy_id != 0) + return; + + buff[0] = (unsigned char)val; + buff[1] = (unsigned char)(val >> 8); + + control_write(dev, REQUEST_WRITE, 0, loc * 2, buff, 0x02, + CONTROL_TIMEOUT_MS); +} + +static int ch9200_link_reset(struct usbnet *dev) +{ + struct ethtool_cmd ecmd; + + mii_check_media(&dev->mii, 1, 1); + mii_ethtool_gset(&dev->mii, &ecmd); + + netdev_dbg(dev->net, "link_reset() speed:%d duplex:%d\n", + ecmd.speed, ecmd.duplex); + + return 0; +} + +static void ch9200_status(struct usbnet *dev, struct urb *urb) +{ + int link; + unsigned char *buf; + + if (urb->actual_length < 16) + return; + + buf = urb->transfer_buffer; + link = !!(buf[0] & 0x01); + + if (link) { + netif_carrier_on(dev->net); + usbnet_defer_kevent(dev, EVENT_LINK_RESET); + } else { + netif_carrier_off(dev->net); + } +} + +static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb, + gfp_t flags) +{ + int i = 0; + int len = 0; + int tx_overhead = 0; + + tx_overhead = 0x40; + + len = skb->len; + if (skb_headroom(skb) < tx_overhead) { + struct sk_buff *skb2; + + skb2 = skb_copy_expand(skb, tx_overhead, 0, flags); + dev_kfree_skb_any(skb); + skb = skb2; + if (!skb) + return NULL; + } + + __skb_push(skb, tx_overhead); + /* usbnet adds padding if length is a multiple of packet size + * if so, adjust length value in header + */ + if ((skb->len % dev->maxpacket) == 0) + len++; + + skb->data[0] = len; + skb->data[1] = len >> 8; + skb->data[2] = 0x00; + skb->data[3] = 0x80; + + for (i = 4; i < 48; i++) + skb->data[i] = 0x00; + + skb->data[48] = len; + skb->data[49] = len >> 8; + skb->data[50] = 0x00; + skb->data[51] = 0x80; + + for (i = 52; i < 64; i++) + skb->data[i] = 0x00; + + return skb; +} + +static int ch9200_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +{ + int len = 0; + int rx_overhead = 0; + + rx_overhead = 64; + + if (unlikely(skb->len < rx_overhead)) { + dev_err(&dev->udev->dev, "unexpected tiny rx frame\n"); + return 0; + } + + len = (skb->data[skb->len - 16] | skb->data[skb->len - 15] << 8); + skb_trim(skb, len); + + return 1; +} + +static int get_mac_address(struct usbnet *dev, unsigned char *data) +{ + int err = 0; + unsigned char mac_addr[0x06]; + int rd_mac_len = 0; + + netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n", + dev->udev->descriptor.idVendor, + dev->udev->descriptor.idProduct); + + memset(mac_addr, 0, sizeof(mac_addr)); + rd_mac_len = control_read(dev, REQUEST_READ, 0, + MAC_REG_STATION_L, mac_addr, 0x02, + CONTROL_TIMEOUT_MS); + rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_M, + mac_addr + 2, 0x02, CONTROL_TIMEOUT_MS); + rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_H, + mac_addr + 4, 0x02, CONTROL_TIMEOUT_MS); + if (rd_mac_len != ETH_ALEN) + err = -EINVAL; + + data[0] = mac_addr[5]; + data[1] = mac_addr[4]; + data[2] = mac_addr[3]; + data[3] = mac_addr[2]; + data[4] = mac_addr[1]; + data[5] = mac_addr[0]; + + return err; +} + +static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf) +{ + int retval = 0; + unsigned char data[2]; + + retval = usbnet_get_endpoints(dev, intf); + if (retval) + return retval; + + dev->mii.dev = dev->net; + dev->mii.mdio_read = ch9200_mdio_read; + dev->mii.mdio_write = ch9200_mdio_write; + dev->mii.reg_num_mask = 0x1f; + + dev->mii.phy_id_mask = 0x1f; + + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; + dev->rx_urb_size = 24 * 64 + 16; + mii_nway_restart(&dev->mii); + + data[0] = 0x01; + data[1] = 0x0F; + retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_THRESHOLD, data, + 0x02, CONTROL_TIMEOUT_MS); + + data[0] = 0xA0; + data[1] = 0x90; + retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FIFO_DEPTH, data, + 0x02, CONTROL_TIMEOUT_MS); + + data[0] = 0x30; + data[1] = 0x00; + retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_PAUSE, data, + 0x02, CONTROL_TIMEOUT_MS); + + data[0] = 0x17; + data[1] = 0xD8; + retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FLOW_CONTROL, + data, 0x02, CONTROL_TIMEOUT_MS); + + /* Undocumented register */ + data[0] = 0x01; + data[1] = 0x00; + retval = control_write(dev, REQUEST_WRITE, 0, 254, data, 0x02, + CONTROL_TIMEOUT_MS); + + data[0] = 0x5F; + data[1] = 0x0D; + retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_CTRL, data, 0x02, + CONTROL_TIMEOUT_MS); + + retval = get_mac_address(dev, dev->net->dev_addr); + + return retval; +} + +static const struct driver_info ch9200_info = { + .description = "CH9200 USB to Network Adaptor", + .flags = FLAG_ETHER, + .bind = ch9200_bind, + .rx_fixup = ch9200_rx_fixup, + .tx_fixup = ch9200_tx_fixup, + .status = ch9200_status, + .link_reset = ch9200_link_reset, + .reset = ch9200_link_reset, +}; + +static const struct usb_device_id ch9200_products[] = { + { + USB_DEVICE(0x1A86, 0xE092), + .driver_info = (unsigned long)&ch9200_info, + }, + {}, +}; + +MODULE_DEVICE_TABLE(usb, ch9200_products); + +static struct usb_driver ch9200_driver = { + .name = "ch9200", + .id_table = ch9200_products, + .probe = usbnet_probe, + .disconnect = usbnet_disconnect, + .suspend = usbnet_suspend, + .resume = usbnet_resume, +}; + +module_usb_driver(ch9200_driver); + +MODULE_DESCRIPTION("QinHeng CH9200 USB Network device"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c new file mode 100644 index 000000000..a39518fc9 --- /dev/null +++ b/drivers/net/usb/lan78xx.c @@ -0,0 +1,3494 @@ +/* + * Copyright (C) 2015 Microchip Technology + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "lan78xx.h" + +#define DRIVER_AUTHOR "WOOJUNG HUH " +#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices" +#define DRIVER_NAME "lan78xx" +#define DRIVER_VERSION "1.0.0" + +#define TX_TIMEOUT_JIFFIES (5 * HZ) +#define THROTTLE_JIFFIES (HZ / 8) +#define UNLINK_TIMEOUT_MS 3 + +#define RX_MAX_QUEUE_MEMORY (60 * 1518) + +#define SS_USB_PKT_SIZE (1024) +#define HS_USB_PKT_SIZE (512) +#define FS_USB_PKT_SIZE (64) + +#define MAX_RX_FIFO_SIZE (12 * 1024) +#define MAX_TX_FIFO_SIZE (12 * 1024) +#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE) +#define DEFAULT_BULK_IN_DELAY (0x0800) +#define MAX_SINGLE_PACKET_SIZE (9000) +#define DEFAULT_TX_CSUM_ENABLE (true) +#define DEFAULT_RX_CSUM_ENABLE (true) +#define DEFAULT_TSO_CSUM_ENABLE (true) +#define DEFAULT_VLAN_FILTER_ENABLE (true) +#define INTERNAL_PHY_ID (2) /* 2: GMII */ +#define TX_OVERHEAD (8) +#define RXW_PADDING 2 + +#define LAN78XX_USB_VENDOR_ID (0x0424) +#define LAN7800_USB_PRODUCT_ID (0x7800) +#define LAN7850_USB_PRODUCT_ID (0x7850) +#define LAN78XX_EEPROM_MAGIC (0x78A5) +#define LAN78XX_OTP_MAGIC (0x78F3) + +#define MII_READ 1 +#define MII_WRITE 0 + +#define EEPROM_INDICATOR (0xA5) +#define EEPROM_MAC_OFFSET (0x01) +#define MAX_EEPROM_SIZE 512 +#define OTP_INDICATOR_1 (0xF3) +#define OTP_INDICATOR_2 (0xF7) + +#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \ + WAKE_MCAST | WAKE_BCAST | \ + WAKE_ARP | WAKE_MAGIC) + +/* USB related defines */ +#define BULK_IN_PIPE 1 +#define BULK_OUT_PIPE 2 + +/* default autosuspend delay (mSec)*/ +#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000) + +static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = { + "RX FCS Errors", + "RX Alignment Errors", + "Rx Fragment Errors", + "RX Jabber Errors", + "RX Undersize Frame Errors", + "RX Oversize Frame Errors", + "RX Dropped Frames", + "RX Unicast Byte Count", + "RX Broadcast Byte Count", + "RX Multicast Byte Count", + "RX Unicast Frames", + "RX Broadcast Frames", + "RX Multicast Frames", + "RX Pause Frames", + "RX 64 Byte Frames", + "RX 65 - 127 Byte Frames", + "RX 128 - 255 Byte Frames", + "RX 256 - 511 Bytes Frames", + "RX 512 - 1023 Byte Frames", + "RX 1024 - 1518 Byte Frames", + "RX Greater 1518 Byte Frames", + "EEE RX LPI Transitions", + "EEE RX LPI Time", + "TX FCS Errors", + "TX Excess Deferral Errors", + "TX Carrier Errors", + "TX Bad Byte Count", + "TX Single Collisions", + "TX Multiple Collisions", + "TX Excessive Collision", + "TX Late Collisions", + "TX Unicast Byte Count", + "TX Broadcast Byte Count", + "TX Multicast Byte Count", + "TX Unicast Frames", + "TX Broadcast Frames", + "TX Multicast Frames", + "TX Pause Frames", + "TX 64 Byte Frames", + "TX 65 - 127 Byte Frames", + "TX 128 - 255 Byte Frames", + "TX 256 - 511 Bytes Frames", + "TX 512 - 1023 Byte Frames", + "TX 1024 - 1518 Byte Frames", + "TX Greater 1518 Byte Frames", + "EEE TX LPI Transitions", + "EEE TX LPI Time", +}; + +struct lan78xx_statstage { + u32 rx_fcs_errors; + u32 rx_alignment_errors; + u32 rx_fragment_errors; + u32 rx_jabber_errors; + u32 rx_undersize_frame_errors; + u32 rx_oversize_frame_errors; + u32 rx_dropped_frames; + u32 rx_unicast_byte_count; + u32 rx_broadcast_byte_count; + u32 rx_multicast_byte_count; + u32 rx_unicast_frames; + u32 rx_broadcast_frames; + u32 rx_multicast_frames; + u32 rx_pause_frames; + u32 rx_64_byte_frames; + u32 rx_65_127_byte_frames; + u32 rx_128_255_byte_frames; + u32 rx_256_511_bytes_frames; + u32 rx_512_1023_byte_frames; + u32 rx_1024_1518_byte_frames; + u32 rx_greater_1518_byte_frames; + u32 eee_rx_lpi_transitions; + u32 eee_rx_lpi_time; + u32 tx_fcs_errors; + u32 tx_excess_deferral_errors; + u32 tx_carrier_errors; + u32 tx_bad_byte_count; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_excessive_collision; + u32 tx_late_collisions; + u32 tx_unicast_byte_count; + u32 tx_broadcast_byte_count; + u32 tx_multicast_byte_count; + u32 tx_unicast_frames; + u32 tx_broadcast_frames; + u32 tx_multicast_frames; + u32 tx_pause_frames; + u32 tx_64_byte_frames; + u32 tx_65_127_byte_frames; + u32 tx_128_255_byte_frames; + u32 tx_256_511_bytes_frames; + u32 tx_512_1023_byte_frames; + u32 tx_1024_1518_byte_frames; + u32 tx_greater_1518_byte_frames; + u32 eee_tx_lpi_transitions; + u32 eee_tx_lpi_time; +}; + +struct lan78xx_net; + +struct lan78xx_priv { + struct lan78xx_net *dev; + u32 rfe_ctl; + u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */ + u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */ + u32 vlan_table[DP_SEL_VHF_VLAN_LEN]; + struct mutex dataport_mutex; /* for dataport access */ + spinlock_t rfe_ctl_lock; /* for rfe register access */ + struct work_struct set_multicast; + struct work_struct set_vlan; + u32 wol; +}; + +enum skb_state { + illegal = 0, + tx_start, + tx_done, + rx_start, + rx_done, + rx_cleanup, + unlink_start +}; + +struct skb_data { /* skb->cb is one of these */ + struct urb *urb; + struct lan78xx_net *dev; + enum skb_state state; + size_t length; +}; + +struct usb_context { + struct usb_ctrlrequest req; + struct lan78xx_net *dev; +}; + +#define EVENT_TX_HALT 0 +#define EVENT_RX_HALT 1 +#define EVENT_RX_MEMORY 2 +#define EVENT_STS_SPLIT 3 +#define EVENT_LINK_RESET 4 +#define EVENT_RX_PAUSED 5 +#define EVENT_DEV_WAKING 6 +#define EVENT_DEV_ASLEEP 7 +#define EVENT_DEV_OPEN 8 + +struct lan78xx_net { + struct net_device *net; + struct usb_device *udev; + struct usb_interface *intf; + void *driver_priv; + + int rx_qlen; + int tx_qlen; + struct sk_buff_head rxq; + struct sk_buff_head txq; + struct sk_buff_head done; + struct sk_buff_head rxq_pause; + struct sk_buff_head txq_pend; + + struct tasklet_struct bh; + struct delayed_work wq; + + struct usb_host_endpoint *ep_blkin; + struct usb_host_endpoint *ep_blkout; + struct usb_host_endpoint *ep_intr; + + int msg_enable; + + struct urb *urb_intr; + struct usb_anchor deferred; + + struct mutex phy_mutex; /* for phy access */ + unsigned pipe_in, pipe_out, pipe_intr; + + u32 hard_mtu; /* count any extra framing */ + size_t rx_urb_size; /* size for rx urbs */ + + unsigned long flags; + + wait_queue_head_t *wait; + unsigned char suspend_count; + + unsigned maxpacket; + struct timer_list delay; + + unsigned long data[5]; + struct mii_if_info mii; + + int link_on; + u8 mdix_ctrl; +}; + +/* use ethtool to change the level for any given device */ +static int msg_level = -1; +module_param(msg_level, int, 0); +MODULE_PARM_DESC(msg_level, "Override default message level"); + +static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data) +{ + u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL); + int ret; + + if (!buf) + return -ENOMEM; + + ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), + USB_VENDOR_REQUEST_READ_REGISTER, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, index, buf, 4, USB_CTRL_GET_TIMEOUT); + if (likely(ret >= 0)) { + le32_to_cpus(buf); + *data = *buf; + } else { + netdev_warn(dev->net, + "Failed to read register index 0x%08x. ret = %d", + index, ret); + } + + kfree(buf); + + return ret; +} + +static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data) +{ + u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL); + int ret; + + if (!buf) + return -ENOMEM; + + *buf = data; + cpu_to_le32s(buf); + + ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), + USB_VENDOR_REQUEST_WRITE_REGISTER, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, index, buf, 4, USB_CTRL_SET_TIMEOUT); + if (unlikely(ret < 0)) { + netdev_warn(dev->net, + "Failed to write register index 0x%08x. ret = %d", + index, ret); + } + + kfree(buf); + + return ret; +} + +static int lan78xx_read_stats(struct lan78xx_net *dev, + struct lan78xx_statstage *data) +{ + int ret = 0; + int i; + struct lan78xx_statstage *stats; + u32 *src; + u32 *dst; + + stats = kmalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) + return -ENOMEM; + + ret = usb_control_msg(dev->udev, + usb_rcvctrlpipe(dev->udev, 0), + USB_VENDOR_REQUEST_GET_STATS, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, + 0, + (void *)stats, + sizeof(*stats), + USB_CTRL_SET_TIMEOUT); + if (likely(ret >= 0)) { + src = (u32 *)stats; + dst = (u32 *)data; + for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) { + le32_to_cpus(&src[i]); + dst[i] = src[i]; + } + } else { + netdev_warn(dev->net, + "Failed to read stat ret = 0x%x", ret); + } + + kfree(stats); + + return ret; +} + +/* Loop until the read is completed with timeout called with phy_mutex held */ +static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev) +{ + unsigned long start_time = jiffies; + u32 val; + int ret; + + do { + ret = lan78xx_read_reg(dev, MII_ACC, &val); + if (unlikely(ret < 0)) + return -EIO; + + if (!(val & MII_ACC_MII_BUSY_)) + return 0; + } while (!time_after(jiffies, start_time + HZ)); + + return -EIO; +} + +static inline u32 mii_access(int id, int index, int read) +{ + u32 ret; + + ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_; + ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_; + if (read) + ret |= MII_ACC_MII_READ_; + else + ret |= MII_ACC_MII_WRITE_; + ret |= MII_ACC_MII_BUSY_; + + return ret; +} + +static int lan78xx_mdio_read(struct net_device *netdev, int phy_id, int idx) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + u32 val, addr; + int ret; + + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + return ret; + + mutex_lock(&dev->phy_mutex); + + /* confirm MII not busy */ + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* set the address, index & direction (read from PHY) */ + phy_id &= dev->mii.phy_id_mask; + idx &= dev->mii.reg_num_mask; + addr = mii_access(phy_id, idx, MII_READ); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + ret = lan78xx_read_reg(dev, MII_DATA, &val); + + ret = (int)(val & 0xFFFF); + +done: + mutex_unlock(&dev->phy_mutex); + usb_autopm_put_interface(dev->intf); + return ret; +} + +static void lan78xx_mdio_write(struct net_device *netdev, int phy_id, + int idx, int regval) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + u32 val, addr; + int ret; + + if (usb_autopm_get_interface(dev->intf) < 0) + return; + + mutex_lock(&dev->phy_mutex); + + /* confirm MII not busy */ + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + val = regval; + ret = lan78xx_write_reg(dev, MII_DATA, val); + + /* set the address, index & direction (write to PHY) */ + phy_id &= dev->mii.phy_id_mask; + idx &= dev->mii.reg_num_mask; + addr = mii_access(phy_id, idx, MII_WRITE); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + +done: + mutex_unlock(&dev->phy_mutex); + usb_autopm_put_interface(dev->intf); +} + +static void lan78xx_mmd_write(struct net_device *netdev, int phy_id, + int mmddev, int mmdidx, int regval) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + u32 val, addr; + int ret; + + if (usb_autopm_get_interface(dev->intf) < 0) + return; + + mutex_lock(&dev->phy_mutex); + + /* confirm MII not busy */ + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + mmddev &= 0x1F; + + /* set up device address for MMD */ + ret = lan78xx_write_reg(dev, MII_DATA, mmddev); + + phy_id &= dev->mii.phy_id_mask; + addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* select register of MMD */ + val = mmdidx; + ret = lan78xx_write_reg(dev, MII_DATA, val); + + phy_id &= dev->mii.phy_id_mask; + addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* select register data for MMD */ + val = PHY_MMD_CTRL_OP_DNI_ | mmddev; + ret = lan78xx_write_reg(dev, MII_DATA, val); + + phy_id &= dev->mii.phy_id_mask; + addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* write to MMD */ + val = regval; + ret = lan78xx_write_reg(dev, MII_DATA, val); + + phy_id &= dev->mii.phy_id_mask; + addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + +done: + mutex_unlock(&dev->phy_mutex); + usb_autopm_put_interface(dev->intf); +} + +static int lan78xx_mmd_read(struct net_device *netdev, int phy_id, + int mmddev, int mmdidx) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + u32 val, addr; + int ret; + + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + return ret; + + mutex_lock(&dev->phy_mutex); + + /* confirm MII not busy */ + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* set up device address for MMD */ + ret = lan78xx_write_reg(dev, MII_DATA, mmddev); + + phy_id &= dev->mii.phy_id_mask; + addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* select register of MMD */ + val = mmdidx; + ret = lan78xx_write_reg(dev, MII_DATA, val); + + phy_id &= dev->mii.phy_id_mask; + addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* select register data for MMD */ + val = PHY_MMD_CTRL_OP_DNI_ | mmddev; + ret = lan78xx_write_reg(dev, MII_DATA, val); + + phy_id &= dev->mii.phy_id_mask; + addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* set the address, index & direction (read from PHY) */ + phy_id &= dev->mii.phy_id_mask; + addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_READ); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* read from MMD */ + ret = lan78xx_read_reg(dev, MII_DATA, &val); + + ret = (int)(val & 0xFFFF); + +done: + mutex_unlock(&dev->phy_mutex); + usb_autopm_put_interface(dev->intf); + return ret; +} + +static int lan78xx_wait_eeprom(struct lan78xx_net *dev) +{ + unsigned long start_time = jiffies; + u32 val; + int ret; + + do { + ret = lan78xx_read_reg(dev, E2P_CMD, &val); + if (unlikely(ret < 0)) + return -EIO; + + if (!(val & E2P_CMD_EPC_BUSY_) || + (val & E2P_CMD_EPC_TIMEOUT_)) + break; + usleep_range(40, 100); + } while (!time_after(jiffies, start_time + HZ)); + + if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) { + netdev_warn(dev->net, "EEPROM read operation timeout"); + return -EIO; + } + + return 0; +} + +static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev) +{ + unsigned long start_time = jiffies; + u32 val; + int ret; + + do { + ret = lan78xx_read_reg(dev, E2P_CMD, &val); + if (unlikely(ret < 0)) + return -EIO; + + if (!(val & E2P_CMD_EPC_BUSY_)) + return 0; + + usleep_range(40, 100); + } while (!time_after(jiffies, start_time + HZ)); + + netdev_warn(dev->net, "EEPROM is busy"); + return -EIO; +} + +static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset, + u32 length, u8 *data) +{ + u32 val; + int i, ret; + + ret = lan78xx_eeprom_confirm_not_busy(dev); + if (ret) + return ret; + + for (i = 0; i < length; i++) { + val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_; + val |= (offset & E2P_CMD_EPC_ADDR_MASK_); + ret = lan78xx_write_reg(dev, E2P_CMD, val); + if (unlikely(ret < 0)) + return -EIO; + + ret = lan78xx_wait_eeprom(dev); + if (ret < 0) + return ret; + + ret = lan78xx_read_reg(dev, E2P_DATA, &val); + if (unlikely(ret < 0)) + return -EIO; + + data[i] = val & 0xFF; + offset++; + } + + return 0; +} + +static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset, + u32 length, u8 *data) +{ + u8 sig; + int ret; + + ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig); + if ((ret == 0) && (sig == EEPROM_INDICATOR)) + ret = lan78xx_read_raw_eeprom(dev, offset, length, data); + else + ret = -EINVAL; + + return ret; +} + +static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset, + u32 length, u8 *data) +{ + u32 val; + int i, ret; + + ret = lan78xx_eeprom_confirm_not_busy(dev); + if (ret) + return ret; + + /* Issue write/erase enable command */ + val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_; + ret = lan78xx_write_reg(dev, E2P_CMD, val); + if (unlikely(ret < 0)) + return -EIO; + + ret = lan78xx_wait_eeprom(dev); + if (ret < 0) + return ret; + + for (i = 0; i < length; i++) { + /* Fill data register */ + val = data[i]; + ret = lan78xx_write_reg(dev, E2P_DATA, val); + if (ret < 0) + return ret; + + /* Send "write" command */ + val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_; + val |= (offset & E2P_CMD_EPC_ADDR_MASK_); + ret = lan78xx_write_reg(dev, E2P_CMD, val); + if (ret < 0) + return ret; + + ret = lan78xx_wait_eeprom(dev); + if (ret < 0) + return ret; + + offset++; + } + + return 0; +} + +static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, + u32 length, u8 *data) +{ + int i; + int ret; + u32 buf; + unsigned long timeout; + + ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + + if (buf & OTP_PWR_DN_PWRDN_N_) { + /* clear it and wait to be cleared */ + ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0); + + timeout = jiffies + HZ; + do { + usleep_range(1, 10); + ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + if (time_after(jiffies, timeout)) { + netdev_warn(dev->net, + "timeout on OTP_PWR_DN"); + return -EIO; + } + } while (buf & OTP_PWR_DN_PWRDN_N_); + } + + for (i = 0; i < length; i++) { + ret = lan78xx_write_reg(dev, OTP_ADDR1, + ((offset + i) >> 8) & OTP_ADDR1_15_11); + ret = lan78xx_write_reg(dev, OTP_ADDR2, + ((offset + i) & OTP_ADDR2_10_3)); + + ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_); + ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); + + timeout = jiffies + HZ; + do { + udelay(1); + ret = lan78xx_read_reg(dev, OTP_STATUS, &buf); + if (time_after(jiffies, timeout)) { + netdev_warn(dev->net, + "timeout on OTP_STATUS"); + return -EIO; + } + } while (buf & OTP_STATUS_BUSY_); + + ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf); + + data[i] = (u8)(buf & 0xFF); + } + + return 0; +} + +static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset, + u32 length, u8 *data) +{ + u8 sig; + int ret; + + ret = lan78xx_read_raw_otp(dev, 0, 1, &sig); + + if (ret == 0) { + if (sig == OTP_INDICATOR_1) + offset = offset; + else if (sig == OTP_INDICATOR_2) + offset += 0x100; + else + ret = -EINVAL; + ret = lan78xx_read_raw_otp(dev, offset, length, data); + } + + return ret; +} + +static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev) +{ + int i, ret; + + for (i = 0; i < 100; i++) { + u32 dp_sel; + + ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel); + if (unlikely(ret < 0)) + return -EIO; + + if (dp_sel & DP_SEL_DPRDY_) + return 0; + + usleep_range(40, 100); + } + + netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out"); + + return -EIO; +} + +static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select, + u32 addr, u32 length, u32 *buf) +{ + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + u32 dp_sel; + int i, ret; + + if (usb_autopm_get_interface(dev->intf) < 0) + return 0; + + mutex_lock(&pdata->dataport_mutex); + + ret = lan78xx_dataport_wait_not_busy(dev); + if (ret < 0) + goto done; + + ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel); + + dp_sel &= ~DP_SEL_RSEL_MASK_; + dp_sel |= ram_select; + ret = lan78xx_write_reg(dev, DP_SEL, dp_sel); + + for (i = 0; i < length; i++) { + ret = lan78xx_write_reg(dev, DP_ADDR, addr + i); + + ret = lan78xx_write_reg(dev, DP_DATA, buf[i]); + + ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_); + + ret = lan78xx_dataport_wait_not_busy(dev); + if (ret < 0) + goto done; + } + +done: + mutex_unlock(&pdata->dataport_mutex); + usb_autopm_put_interface(dev->intf); + + return ret; +} + +static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata, + int index, u8 addr[ETH_ALEN]) +{ + u32 temp; + + if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) { + temp = addr[3]; + temp = addr[2] | (temp << 8); + temp = addr[1] | (temp << 8); + temp = addr[0] | (temp << 8); + pdata->pfilter_table[index][1] = temp; + temp = addr[5]; + temp = addr[4] | (temp << 8); + temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_; + pdata->pfilter_table[index][0] = temp; + } +} + +/* returns hash bit number for given MAC address */ +static inline u32 lan78xx_hash(char addr[ETH_ALEN]) +{ + return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff; +} + +static void lan78xx_deferred_multicast_write(struct work_struct *param) +{ + struct lan78xx_priv *pdata = + container_of(param, struct lan78xx_priv, set_multicast); + struct lan78xx_net *dev = pdata->dev; + int i; + int ret; + + netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n", + pdata->rfe_ctl); + + lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN, + DP_SEL_VHF_HASH_LEN, pdata->mchash_table); + + for (i = 1; i < NUM_OF_MAF; i++) { + ret = lan78xx_write_reg(dev, MAF_HI(i), 0); + ret = lan78xx_write_reg(dev, MAF_LO(i), + pdata->pfilter_table[i][1]); + ret = lan78xx_write_reg(dev, MAF_HI(i), + pdata->pfilter_table[i][0]); + } + + ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); +} + +static void lan78xx_set_multicast(struct net_device *netdev) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + unsigned long flags; + int i; + + spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); + + pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ | + RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_); + + for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++) + pdata->mchash_table[i] = 0; + /* pfilter_table[0] has own HW address */ + for (i = 1; i < NUM_OF_MAF; i++) { + pdata->pfilter_table[i][0] = + pdata->pfilter_table[i][1] = 0; + } + + pdata->rfe_ctl |= RFE_CTL_BCAST_EN_; + + if (dev->net->flags & IFF_PROMISC) { + netif_dbg(dev, drv, dev->net, "promiscuous mode enabled"); + pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_; + } else { + if (dev->net->flags & IFF_ALLMULTI) { + netif_dbg(dev, drv, dev->net, + "receive all multicast enabled"); + pdata->rfe_ctl |= RFE_CTL_MCAST_EN_; + } + } + + if (netdev_mc_count(dev->net)) { + struct netdev_hw_addr *ha; + int i; + + netif_dbg(dev, drv, dev->net, "receive multicast hash filter"); + + pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_; + + i = 1; + netdev_for_each_mc_addr(ha, netdev) { + /* set first 32 into Perfect Filter */ + if (i < 33) { + lan78xx_set_addr_filter(pdata, i, ha->addr); + } else { + u32 bitnum = lan78xx_hash(ha->addr); + + pdata->mchash_table[bitnum / 32] |= + (1 << (bitnum % 32)); + pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_; + } + i++; + } + } + + spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); + + /* defer register writes to a sleepable context */ + schedule_work(&pdata->set_multicast); +} + +static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex, + u16 lcladv, u16 rmtadv) +{ + u32 flow = 0, fct_flow = 0; + int ret; + + u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + + if (cap & FLOW_CTRL_TX) + flow = (FLOW_CR_TX_FCEN_ | 0xFFFF); + + if (cap & FLOW_CTRL_RX) + flow |= FLOW_CR_RX_FCEN_; + + if (dev->udev->speed == USB_SPEED_SUPER) + fct_flow = 0x817; + else if (dev->udev->speed == USB_SPEED_HIGH) + fct_flow = 0x211; + + netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s", + (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), + (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); + + ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow); + + /* threshold value should be set before enabling flow */ + ret = lan78xx_write_reg(dev, FLOW, flow); + + return 0; +} + +static int lan78xx_link_reset(struct lan78xx_net *dev) +{ + struct mii_if_info *mii = &dev->mii; + struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; + int ladv, radv, ret; + u32 buf; + + /* clear PHY interrupt status */ + /* VTSE PHY */ + ret = lan78xx_mdio_read(dev->net, mii->phy_id, PHY_VTSE_INT_STS); + if (unlikely(ret < 0)) + return -EIO; + + /* clear LAN78xx interrupt status */ + ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_); + if (unlikely(ret < 0)) + return -EIO; + + if (!mii_link_ok(mii) && dev->link_on) { + dev->link_on = false; + netif_carrier_off(dev->net); + + /* reset MAC */ + ret = lan78xx_read_reg(dev, MAC_CR, &buf); + if (unlikely(ret < 0)) + return -EIO; + buf |= MAC_CR_RST_; + ret = lan78xx_write_reg(dev, MAC_CR, buf); + if (unlikely(ret < 0)) + return -EIO; + } else if (mii_link_ok(mii) && !dev->link_on) { + dev->link_on = true; + + mii_check_media(mii, 1, 1); + mii_ethtool_gset(&dev->mii, &ecmd); + + mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS); + + if (dev->udev->speed == USB_SPEED_SUPER) { + if (ethtool_cmd_speed(&ecmd) == 1000) { + /* disable U2 */ + ret = lan78xx_read_reg(dev, USB_CFG1, &buf); + buf &= ~USB_CFG1_DEV_U2_INIT_EN_; + ret = lan78xx_write_reg(dev, USB_CFG1, buf); + /* enable U1 */ + ret = lan78xx_read_reg(dev, USB_CFG1, &buf); + buf |= USB_CFG1_DEV_U1_INIT_EN_; + ret = lan78xx_write_reg(dev, USB_CFG1, buf); + } else { + /* enable U1 & U2 */ + ret = lan78xx_read_reg(dev, USB_CFG1, &buf); + buf |= USB_CFG1_DEV_U2_INIT_EN_; + buf |= USB_CFG1_DEV_U1_INIT_EN_; + ret = lan78xx_write_reg(dev, USB_CFG1, buf); + } + } + + ladv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE); + if (ladv < 0) + return ladv; + + radv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_LPA); + if (radv < 0) + return radv; + + netif_dbg(dev, link, dev->net, + "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x", + ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv); + + ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv); + netif_carrier_on(dev->net); + } + + return ret; +} + +/* some work can't be done in tasklets, so we use keventd + * + * NOTE: annoying asymmetry: if it's active, schedule_work() fails, + * but tasklet_schedule() doesn't. hope the failure is rare. + */ +void lan78xx_defer_kevent(struct lan78xx_net *dev, int work) +{ + set_bit(work, &dev->flags); + if (!schedule_delayed_work(&dev->wq, 0)) + netdev_err(dev->net, "kevent %d may have been dropped\n", work); +} + +static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb) +{ + u32 intdata; + + if (urb->actual_length != 4) { + netdev_warn(dev->net, + "unexpected urb length %d", urb->actual_length); + return; + } + + memcpy(&intdata, urb->transfer_buffer, 4); + le32_to_cpus(&intdata); + + if (intdata & INT_ENP_PHY_INT) { + netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata); + lan78xx_defer_kevent(dev, EVENT_LINK_RESET); + } else + netdev_warn(dev->net, + "unexpected interrupt: 0x%08x\n", intdata); +} + +static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev) +{ + return MAX_EEPROM_SIZE; +} + +static int lan78xx_ethtool_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + + ee->magic = LAN78XX_EEPROM_MAGIC; + + return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data); +} + +static int lan78xx_ethtool_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + + /* Allow entire eeprom update only */ + if ((ee->magic == LAN78XX_EEPROM_MAGIC) && + (ee->offset == 0) && + (ee->len == 512) && + (data[0] == EEPROM_INDICATOR)) + return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data); + else if ((ee->magic == LAN78XX_OTP_MAGIC) && + (ee->offset == 0) && + (ee->len == 512) && + (data[0] == OTP_INDICATOR_1)) + return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data); + + return -EINVAL; +} + +static void lan78xx_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + if (stringset == ETH_SS_STATS) + memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings)); +} + +static int lan78xx_get_sset_count(struct net_device *netdev, int sset) +{ + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(lan78xx_gstrings); + else + return -EOPNOTSUPP; +} + +static void lan78xx_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + struct lan78xx_statstage lan78xx_stat; + u32 *p; + int i; + + if (usb_autopm_get_interface(dev->intf) < 0) + return; + + if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) { + p = (u32 *)&lan78xx_stat; + for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++) + data[i] = p[i]; + } + + usb_autopm_put_interface(dev->intf); +} + +static void lan78xx_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + int ret; + u32 buf; + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + + if (usb_autopm_get_interface(dev->intf) < 0) + return; + + ret = lan78xx_read_reg(dev, USB_CFG0, &buf); + if (unlikely(ret < 0)) { + wol->supported = 0; + wol->wolopts = 0; + } else { + if (buf & USB_CFG_RMT_WKP_) { + wol->supported = WAKE_ALL; + wol->wolopts = pdata->wol; + } else { + wol->supported = 0; + wol->wolopts = 0; + } + } + + usb_autopm_put_interface(dev->intf); +} + +static int lan78xx_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + int ret; + + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + return ret; + + pdata->wol = 0; + if (wol->wolopts & WAKE_UCAST) + pdata->wol |= WAKE_UCAST; + if (wol->wolopts & WAKE_MCAST) + pdata->wol |= WAKE_MCAST; + if (wol->wolopts & WAKE_BCAST) + pdata->wol |= WAKE_BCAST; + if (wol->wolopts & WAKE_MAGIC) + pdata->wol |= WAKE_MAGIC; + if (wol->wolopts & WAKE_PHY) + pdata->wol |= WAKE_PHY; + if (wol->wolopts & WAKE_ARP) + pdata->wol |= WAKE_ARP; + + device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts); + + usb_autopm_put_interface(dev->intf); + + return ret; +} + +static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata) +{ + struct lan78xx_net *dev = netdev_priv(net); + int ret; + u32 buf; + u32 adv, lpadv; + + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + return ret; + + ret = lan78xx_read_reg(dev, MAC_CR, &buf); + if (buf & MAC_CR_EEE_EN_) { + buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id, + PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT); + adv = mmd_eee_adv_to_ethtool_adv_t(buf); + buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id, + PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT); + lpadv = mmd_eee_adv_to_ethtool_adv_t(buf); + + edata->eee_enabled = true; + edata->supported = true; + edata->eee_active = !!(adv & lpadv); + edata->advertised = adv; + edata->lp_advertised = lpadv; + edata->tx_lpi_enabled = true; + /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */ + ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf); + edata->tx_lpi_timer = buf; + } else { + buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id, + PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT); + lpadv = mmd_eee_adv_to_ethtool_adv_t(buf); + + edata->eee_enabled = false; + edata->eee_active = false; + edata->supported = false; + edata->advertised = 0; + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(lpadv); + edata->tx_lpi_enabled = false; + edata->tx_lpi_timer = 0; + } + + usb_autopm_put_interface(dev->intf); + + return 0; +} + +static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata) +{ + struct lan78xx_net *dev = netdev_priv(net); + int ret; + u32 buf; + + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + return ret; + + if (edata->eee_enabled) { + ret = lan78xx_read_reg(dev, MAC_CR, &buf); + buf |= MAC_CR_EEE_EN_; + ret = lan78xx_write_reg(dev, MAC_CR, buf); + + buf = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + lan78xx_mmd_write(dev->net, dev->mii.phy_id, + PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT, buf); + } else { + ret = lan78xx_read_reg(dev, MAC_CR, &buf); + buf &= ~MAC_CR_EEE_EN_; + ret = lan78xx_write_reg(dev, MAC_CR, buf); + } + + usb_autopm_put_interface(dev->intf); + + return 0; +} + +static u32 lan78xx_get_link(struct net_device *net) +{ + struct lan78xx_net *dev = netdev_priv(net); + + return mii_link_ok(&dev->mii); +} + +int lan78xx_nway_reset(struct net_device *net) +{ + struct lan78xx_net *dev = netdev_priv(net); + + if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write)) + return -EOPNOTSUPP; + + return mii_nway_restart(&dev->mii); +} + +static void lan78xx_get_drvinfo(struct net_device *net, + struct ethtool_drvinfo *info) +{ + struct lan78xx_net *dev = netdev_priv(net); + + strncpy(info->driver, DRIVER_NAME, sizeof(info->driver)); + strncpy(info->version, DRIVER_VERSION, sizeof(info->version)); + usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); +} + +static u32 lan78xx_get_msglevel(struct net_device *net) +{ + struct lan78xx_net *dev = netdev_priv(net); + + return dev->msg_enable; +} + +static void lan78xx_set_msglevel(struct net_device *net, u32 level) +{ + struct lan78xx_net *dev = netdev_priv(net); + + dev->msg_enable = level; +} + +static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd) +{ + struct lan78xx_net *dev = netdev_priv(net); + struct mii_if_info *mii = &dev->mii; + int ret; + int buf; + + if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write)) + return -EOPNOTSUPP; + + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + return ret; + + ret = mii_ethtool_gset(&dev->mii, cmd); + + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1); + buf = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL); + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0); + + buf &= PHY_EXT_MODE_CTRL_MDIX_MASK_; + if (buf == PHY_EXT_MODE_CTRL_AUTO_MDIX_) { + cmd->eth_tp_mdix = ETH_TP_MDI_AUTO; + cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + } else if (buf == PHY_EXT_MODE_CTRL_MDI_) { + cmd->eth_tp_mdix = ETH_TP_MDI; + cmd->eth_tp_mdix_ctrl = ETH_TP_MDI; + } else if (buf == PHY_EXT_MODE_CTRL_MDI_X_) { + cmd->eth_tp_mdix = ETH_TP_MDI_X; + cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X; + } + + usb_autopm_put_interface(dev->intf); + + return ret; +} + +static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd) +{ + struct lan78xx_net *dev = netdev_priv(net); + struct mii_if_info *mii = &dev->mii; + int ret = 0; + int temp; + + if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write)) + return -EOPNOTSUPP; + + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + return ret; + + if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) { + if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI) { + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, + PHY_EXT_GPIO_PAGE_SPACE_1); + temp = mii->mdio_read(mii->dev, mii->phy_id, + PHY_EXT_MODE_CTRL); + temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_; + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_MODE_CTRL, + temp | PHY_EXT_MODE_CTRL_MDI_); + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, + PHY_EXT_GPIO_PAGE_SPACE_0); + } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_X) { + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, + PHY_EXT_GPIO_PAGE_SPACE_1); + temp = mii->mdio_read(mii->dev, mii->phy_id, + PHY_EXT_MODE_CTRL); + temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_; + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_MODE_CTRL, + temp | PHY_EXT_MODE_CTRL_MDI_X_); + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, + PHY_EXT_GPIO_PAGE_SPACE_0); + } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) { + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, + PHY_EXT_GPIO_PAGE_SPACE_1); + temp = mii->mdio_read(mii->dev, mii->phy_id, + PHY_EXT_MODE_CTRL); + temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_; + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_MODE_CTRL, + temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_); + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, + PHY_EXT_GPIO_PAGE_SPACE_0); + } + } + + /* change speed & duplex */ + ret = mii_ethtool_sset(&dev->mii, cmd); + + if (!cmd->autoneg) { + /* force link down */ + temp = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); + mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, + temp | BMCR_LOOPBACK); + mdelay(1); + mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, temp); + } + + usb_autopm_put_interface(dev->intf); + + return ret; +} + +static const struct ethtool_ops lan78xx_ethtool_ops = { + .get_link = lan78xx_get_link, + .nway_reset = lan78xx_nway_reset, + .get_drvinfo = lan78xx_get_drvinfo, + .get_msglevel = lan78xx_get_msglevel, + .set_msglevel = lan78xx_set_msglevel, + .get_settings = lan78xx_get_settings, + .set_settings = lan78xx_set_settings, + .get_eeprom_len = lan78xx_ethtool_get_eeprom_len, + .get_eeprom = lan78xx_ethtool_get_eeprom, + .set_eeprom = lan78xx_ethtool_set_eeprom, + .get_ethtool_stats = lan78xx_get_stats, + .get_sset_count = lan78xx_get_sset_count, + .get_strings = lan78xx_get_strings, + .get_wol = lan78xx_get_wol, + .set_wol = lan78xx_set_wol, + .get_eee = lan78xx_get_eee, + .set_eee = lan78xx_set_eee, +}; + +static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); +} + +static void lan78xx_init_mac_address(struct lan78xx_net *dev) +{ + u32 addr_lo, addr_hi; + int ret; + u8 addr[6]; + + ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo); + ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi); + + addr[0] = addr_lo & 0xFF; + addr[1] = (addr_lo >> 8) & 0xFF; + addr[2] = (addr_lo >> 16) & 0xFF; + addr[3] = (addr_lo >> 24) & 0xFF; + addr[4] = addr_hi & 0xFF; + addr[5] = (addr_hi >> 8) & 0xFF; + + if (!is_valid_ether_addr(addr)) { + /* reading mac address from EEPROM or OTP */ + if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, + addr) == 0) || + (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN, + addr) == 0)) { + if (is_valid_ether_addr(addr)) { + /* eeprom values are valid so use them */ + netif_dbg(dev, ifup, dev->net, + "MAC address read from EEPROM"); + } else { + /* generate random MAC */ + random_ether_addr(addr); + netif_dbg(dev, ifup, dev->net, + "MAC address set to random addr"); + } + + addr_lo = addr[0] | (addr[1] << 8) | + (addr[2] << 16) | (addr[3] << 24); + addr_hi = addr[4] | (addr[5] << 8); + + ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); + ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + } else { + /* generate random MAC */ + random_ether_addr(addr); + netif_dbg(dev, ifup, dev->net, + "MAC address set to random addr"); + } + } + + ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); + ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); + + ether_addr_copy(dev->net->dev_addr, addr); +} + +static void lan78xx_mii_init(struct lan78xx_net *dev) +{ + /* Initialize MII structure */ + dev->mii.dev = dev->net; + dev->mii.mdio_read = lan78xx_mdio_read; + dev->mii.mdio_write = lan78xx_mdio_write; + dev->mii.phy_id_mask = 0x1f; + dev->mii.reg_num_mask = 0x1f; + dev->mii.phy_id = INTERNAL_PHY_ID; + dev->mii.supports_gmii = true; +} + +static int lan78xx_phy_init(struct lan78xx_net *dev) +{ + int temp; + struct mii_if_info *mii = &dev->mii; + + if ((!mii->mdio_write) || (!mii->mdio_read)) + return -EOPNOTSUPP; + + temp = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE); + temp |= ADVERTISE_ALL; + mii->mdio_write(mii->dev, mii->phy_id, MII_ADVERTISE, + temp | ADVERTISE_CSMA | + ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + + /* set to AUTOMDIX */ + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1); + temp = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL); + temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_; + mii->mdio_write(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL, + temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_); + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0); + dev->mdix_ctrl = ETH_TP_MDI_AUTO; + + /* MAC doesn't support 1000HD */ + temp = mii->mdio_read(mii->dev, mii->phy_id, MII_CTRL1000); + mii->mdio_write(mii->dev, mii->phy_id, MII_CTRL1000, + temp & ~ADVERTISE_1000HALF); + + /* clear interrupt */ + mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS); + mii->mdio_write(mii->dev, mii->phy_id, PHY_VTSE_INT_MASK, + PHY_VTSE_INT_MASK_MDINTPIN_EN_ | + PHY_VTSE_INT_MASK_LINK_CHANGE_); + + netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); + + return 0; +} + +static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size) +{ + int ret = 0; + u32 buf; + bool rxenabled; + + ret = lan78xx_read_reg(dev, MAC_RX, &buf); + + rxenabled = ((buf & MAC_RX_RXEN_) != 0); + + if (rxenabled) { + buf &= ~MAC_RX_RXEN_; + ret = lan78xx_write_reg(dev, MAC_RX, buf); + } + + /* add 4 to size for FCS */ + buf &= ~MAC_RX_MAX_SIZE_MASK_; + buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_); + + ret = lan78xx_write_reg(dev, MAC_RX, buf); + + if (rxenabled) { + buf |= MAC_RX_RXEN_; + ret = lan78xx_write_reg(dev, MAC_RX, buf); + } + + return 0; +} + +static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q) +{ + struct sk_buff *skb; + unsigned long flags; + int count = 0; + + spin_lock_irqsave(&q->lock, flags); + while (!skb_queue_empty(q)) { + struct skb_data *entry; + struct urb *urb; + int ret; + + skb_queue_walk(q, skb) { + entry = (struct skb_data *)skb->cb; + if (entry->state != unlink_start) + goto found; + } + break; +found: + entry->state = unlink_start; + urb = entry->urb; + + /* Get reference count of the URB to avoid it to be + * freed during usb_unlink_urb, which may trigger + * use-after-free problem inside usb_unlink_urb since + * usb_unlink_urb is always racing with .complete + * handler(include defer_bh). + */ + usb_get_urb(urb); + spin_unlock_irqrestore(&q->lock, flags); + /* during some PM-driven resume scenarios, + * these (async) unlinks complete immediately + */ + ret = usb_unlink_urb(urb); + if (ret != -EINPROGRESS && ret != 0) + netdev_dbg(dev->net, "unlink urb err, %d\n", ret); + else + count++; + usb_put_urb(urb); + spin_lock_irqsave(&q->lock, flags); + } + spin_unlock_irqrestore(&q->lock, flags); + return count; +} + +static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + int ll_mtu = new_mtu + netdev->hard_header_len; + int old_hard_mtu = dev->hard_mtu; + int old_rx_urb_size = dev->rx_urb_size; + int ret; + + if (new_mtu > MAX_SINGLE_PACKET_SIZE) + return -EINVAL; + + if (new_mtu <= 0) + return -EINVAL; + /* no second zero-length packet read wanted after mtu-sized packets */ + if ((ll_mtu % dev->maxpacket) == 0) + return -EDOM; + + ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); + + netdev->mtu = new_mtu; + + dev->hard_mtu = netdev->mtu + netdev->hard_header_len; + if (dev->rx_urb_size == old_hard_mtu) { + dev->rx_urb_size = dev->hard_mtu; + if (dev->rx_urb_size > old_rx_urb_size) { + if (netif_running(dev->net)) { + unlink_urbs(dev, &dev->rxq); + tasklet_schedule(&dev->bh); + } + } + } + + return 0; +} + +int lan78xx_set_mac_addr(struct net_device *netdev, void *p) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + struct sockaddr *addr = p; + u32 addr_lo, addr_hi; + int ret; + + if (netif_running(netdev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + ether_addr_copy(netdev->dev_addr, addr->sa_data); + + addr_lo = netdev->dev_addr[0] | + netdev->dev_addr[1] << 8 | + netdev->dev_addr[2] << 16 | + netdev->dev_addr[3] << 24; + addr_hi = netdev->dev_addr[4] | + netdev->dev_addr[5] << 8; + + ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); + ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + + return 0; +} + +/* Enable or disable Rx checksum offload engine */ +static int lan78xx_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + unsigned long flags; + int ret; + + spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); + + if (features & NETIF_F_RXCSUM) { + pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_; + pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_; + } else { + pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_); + pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_); + } + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_; + else + pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_; + + spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); + + ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); + + return 0; +} + +static void lan78xx_deferred_vlan_write(struct work_struct *param) +{ + struct lan78xx_priv *pdata = + container_of(param, struct lan78xx_priv, set_vlan); + struct lan78xx_net *dev = pdata->dev; + + lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0, + DP_SEL_VHF_VLAN_LEN, pdata->vlan_table); +} + +static int lan78xx_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + u16 vid_bit_index; + u16 vid_dword_index; + + vid_dword_index = (vid >> 5) & 0x7F; + vid_bit_index = vid & 0x1F; + + pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index); + + /* defer register writes to a sleepable context */ + schedule_work(&pdata->set_vlan); + + return 0; +} + +static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + u16 vid_bit_index; + u16 vid_dword_index; + + vid_dword_index = (vid >> 5) & 0x7F; + vid_bit_index = vid & 0x1F; + + pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index); + + /* defer register writes to a sleepable context */ + schedule_work(&pdata->set_vlan); + + return 0; +} + +static void lan78xx_init_ltm(struct lan78xx_net *dev) +{ + int ret; + u32 buf; + u32 regs[6] = { 0 }; + + ret = lan78xx_read_reg(dev, USB_CFG1, &buf); + if (buf & USB_CFG1_LTM_ENABLE_) { + u8 temp[2]; + /* Get values from EEPROM first */ + if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) { + if (temp[0] == 24) { + ret = lan78xx_read_raw_eeprom(dev, + temp[1] * 2, + 24, + (u8 *)regs); + if (ret < 0) + return; + } + } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) { + if (temp[0] == 24) { + ret = lan78xx_read_raw_otp(dev, + temp[1] * 2, + 24, + (u8 *)regs); + if (ret < 0) + return; + } + } + } + + lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]); + lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]); + lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]); + lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]); + lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]); + lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]); +} + +static int lan78xx_reset(struct lan78xx_net *dev) +{ + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + u32 buf; + int ret = 0; + unsigned long timeout; + + ret = lan78xx_read_reg(dev, HW_CFG, &buf); + buf |= HW_CFG_LRST_; + ret = lan78xx_write_reg(dev, HW_CFG, buf); + + timeout = jiffies + HZ; + do { + mdelay(1); + ret = lan78xx_read_reg(dev, HW_CFG, &buf); + if (time_after(jiffies, timeout)) { + netdev_warn(dev->net, + "timeout on completion of LiteReset"); + return -EIO; + } + } while (buf & HW_CFG_LRST_); + + lan78xx_init_mac_address(dev); + + /* Respond to the IN token with a NAK */ + ret = lan78xx_read_reg(dev, USB_CFG0, &buf); + buf |= USB_CFG_BIR_; + ret = lan78xx_write_reg(dev, USB_CFG0, buf); + + /* Init LTM */ + lan78xx_init_ltm(dev); + + dev->net->hard_header_len += TX_OVERHEAD; + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; + + if (dev->udev->speed == USB_SPEED_SUPER) { + buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE; + dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; + dev->rx_qlen = 4; + dev->tx_qlen = 4; + } else if (dev->udev->speed == USB_SPEED_HIGH) { + buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE; + dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; + dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size; + dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu; + } else { + buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE; + dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; + dev->rx_qlen = 4; + } + + ret = lan78xx_write_reg(dev, BURST_CAP, buf); + ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY); + + ret = lan78xx_read_reg(dev, HW_CFG, &buf); + buf |= HW_CFG_MEF_; + ret = lan78xx_write_reg(dev, HW_CFG, buf); + + ret = lan78xx_read_reg(dev, USB_CFG0, &buf); + buf |= USB_CFG_BCE_; + ret = lan78xx_write_reg(dev, USB_CFG0, buf); + + /* set FIFO sizes */ + buf = (MAX_RX_FIFO_SIZE - 512) / 512; + ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf); + + buf = (MAX_TX_FIFO_SIZE - 512) / 512; + ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf); + + ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_); + ret = lan78xx_write_reg(dev, FLOW, 0); + ret = lan78xx_write_reg(dev, FCT_FLOW, 0); + + /* Don't need rfe_ctl_lock during initialisation */ + ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); + pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_; + ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); + + /* Enable or disable checksum offload engines */ + lan78xx_set_features(dev->net, dev->net->features); + + lan78xx_set_multicast(dev->net); + + /* reset PHY */ + ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + buf |= PMT_CTL_PHY_RST_; + ret = lan78xx_write_reg(dev, PMT_CTL, buf); + + timeout = jiffies + HZ; + do { + mdelay(1); + ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + if (time_after(jiffies, timeout)) { + netdev_warn(dev->net, "timeout waiting for PHY Reset"); + return -EIO; + } + } while (buf & PMT_CTL_PHY_RST_); + + lan78xx_mii_init(dev); + + ret = lan78xx_phy_init(dev); + + ret = lan78xx_read_reg(dev, MAC_CR, &buf); + + buf |= MAC_CR_GMII_EN_; + buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_; + + ret = lan78xx_write_reg(dev, MAC_CR, buf); + + /* enable on PHY */ + if (buf & MAC_CR_EEE_EN_) + lan78xx_mmd_write(dev->net, dev->mii.phy_id, 0x07, 0x3C, 0x06); + + /* enable PHY interrupts */ + ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf); + buf |= INT_ENP_PHY_INT; + ret = lan78xx_write_reg(dev, INT_EP_CTL, buf); + + ret = lan78xx_read_reg(dev, MAC_TX, &buf); + buf |= MAC_TX_TXEN_; + ret = lan78xx_write_reg(dev, MAC_TX, buf); + + ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf); + buf |= FCT_TX_CTL_EN_; + ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf); + + ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); + + ret = lan78xx_read_reg(dev, MAC_RX, &buf); + buf |= MAC_RX_RXEN_; + ret = lan78xx_write_reg(dev, MAC_RX, buf); + + ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf); + buf |= FCT_RX_CTL_EN_; + ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf); + + if (!mii_nway_restart(&dev->mii)) + netif_dbg(dev, link, dev->net, "autoneg initiated"); + + return 0; +} + +static int lan78xx_open(struct net_device *net) +{ + struct lan78xx_net *dev = netdev_priv(net); + int ret; + + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + goto out; + + ret = lan78xx_reset(dev); + if (ret < 0) + goto done; + + /* for Link Check */ + if (dev->urb_intr) { + ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL); + if (ret < 0) { + netif_err(dev, ifup, dev->net, + "intr submit %d\n", ret); + goto done; + } + } + + set_bit(EVENT_DEV_OPEN, &dev->flags); + + netif_start_queue(net); + + dev->link_on = false; + + lan78xx_defer_kevent(dev, EVENT_LINK_RESET); +done: + usb_autopm_put_interface(dev->intf); + +out: + return ret; +} + +static void lan78xx_terminate_urbs(struct lan78xx_net *dev) +{ + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup); + DECLARE_WAITQUEUE(wait, current); + int temp; + + /* ensure there are no more active urbs */ + add_wait_queue(&unlink_wakeup, &wait); + set_current_state(TASK_UNINTERRUPTIBLE); + dev->wait = &unlink_wakeup; + temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq); + + /* maybe wait for deletions to finish. */ + while (!skb_queue_empty(&dev->rxq) && + !skb_queue_empty(&dev->txq) && + !skb_queue_empty(&dev->done)) { + schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); + set_current_state(TASK_UNINTERRUPTIBLE); + netif_dbg(dev, ifdown, dev->net, + "waited for %d urb completions\n", temp); + } + set_current_state(TASK_RUNNING); + dev->wait = NULL; + remove_wait_queue(&unlink_wakeup, &wait); +} + +int lan78xx_stop(struct net_device *net) +{ + struct lan78xx_net *dev = netdev_priv(net); + + clear_bit(EVENT_DEV_OPEN, &dev->flags); + netif_stop_queue(net); + + netif_info(dev, ifdown, dev->net, + "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n", + net->stats.rx_packets, net->stats.tx_packets, + net->stats.rx_errors, net->stats.tx_errors); + + lan78xx_terminate_urbs(dev); + + usb_kill_urb(dev->urb_intr); + + skb_queue_purge(&dev->rxq_pause); + + /* deferred work (task, timer, softirq) must also stop. + * can't flush_scheduled_work() until we drop rtnl (later), + * else workers could deadlock; so make workers a NOP. + */ + dev->flags = 0; + cancel_delayed_work_sync(&dev->wq); + tasklet_kill(&dev->bh); + + usb_autopm_put_interface(dev->intf); + + return 0; +} + +static int lan78xx_linearize(struct sk_buff *skb) +{ + return skb_linearize(skb); +} + +static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, + struct sk_buff *skb, gfp_t flags) +{ + u32 tx_cmd_a, tx_cmd_b; + + if (skb_headroom(skb) < TX_OVERHEAD) { + struct sk_buff *skb2; + + skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags); + dev_kfree_skb_any(skb); + skb = skb2; + if (!skb) + return NULL; + } + + if (lan78xx_linearize(skb) < 0) + return NULL; + + tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_; + + tx_cmd_b = 0; + if (skb_is_gso(skb)) { + u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_); + + tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_; + + tx_cmd_a |= TX_CMD_A_LSO_; + } + + if (skb_vlan_tag_present(skb)) { + tx_cmd_a |= TX_CMD_A_IVTG_; + tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_; + } + + skb_push(skb, 4); + cpu_to_le32s(&tx_cmd_b); + memcpy(skb->data, &tx_cmd_b, 4); + + skb_push(skb, 4); + cpu_to_le32s(&tx_cmd_a); + memcpy(skb->data, &tx_cmd_a, 4); + + return skb; +} + +static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb, + struct sk_buff_head *list, enum skb_state state) +{ + unsigned long flags; + enum skb_state old_state; + struct skb_data *entry = (struct skb_data *)skb->cb; + + spin_lock_irqsave(&list->lock, flags); + old_state = entry->state; + entry->state = state; + + __skb_unlink(skb, list); + spin_unlock(&list->lock); + spin_lock(&dev->done.lock); + + __skb_queue_tail(&dev->done, skb); + if (skb_queue_len(&dev->done) == 1) + tasklet_schedule(&dev->bh); + spin_unlock_irqrestore(&dev->done.lock, flags); + + return old_state; +} + +static void tx_complete(struct urb *urb) +{ + struct sk_buff *skb = (struct sk_buff *)urb->context; + struct skb_data *entry = (struct skb_data *)skb->cb; + struct lan78xx_net *dev = entry->dev; + + if (urb->status == 0) { + dev->net->stats.tx_packets++; + dev->net->stats.tx_bytes += entry->length; + } else { + dev->net->stats.tx_errors++; + + switch (urb->status) { + case -EPIPE: + lan78xx_defer_kevent(dev, EVENT_TX_HALT); + break; + + /* software-driven interface shutdown */ + case -ECONNRESET: + case -ESHUTDOWN: + break; + + case -EPROTO: + case -ETIME: + case -EILSEQ: + netif_stop_queue(dev->net); + break; + default: + netif_dbg(dev, tx_err, dev->net, + "tx err %d\n", entry->urb->status); + break; + } + } + + usb_autopm_put_interface_async(dev->intf); + + defer_bh(dev, skb, &dev->txq, tx_done); +} + +static void lan78xx_queue_skb(struct sk_buff_head *list, + struct sk_buff *newsk, enum skb_state state) +{ + struct skb_data *entry = (struct skb_data *)newsk->cb; + + __skb_queue_tail(list, newsk); + entry->state = state; +} + +netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net) +{ + struct lan78xx_net *dev = netdev_priv(net); + struct sk_buff *skb2 = NULL; + + if (skb) { + skb_tx_timestamp(skb); + skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC); + } + + if (skb2) { + skb_queue_tail(&dev->txq_pend, skb2); + + if (skb_queue_len(&dev->txq_pend) > 10) + netif_stop_queue(net); + } else { + netif_dbg(dev, tx_err, dev->net, + "lan78xx_tx_prep return NULL\n"); + dev->net->stats.tx_errors++; + dev->net->stats.tx_dropped++; + } + + tasklet_schedule(&dev->bh); + + return NETDEV_TX_OK; +} + +int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf) +{ + int tmp; + struct usb_host_interface *alt = NULL; + struct usb_host_endpoint *in = NULL, *out = NULL; + struct usb_host_endpoint *status = NULL; + + for (tmp = 0; tmp < intf->num_altsetting; tmp++) { + unsigned ep; + + in = NULL; + out = NULL; + status = NULL; + alt = intf->altsetting + tmp; + + for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { + struct usb_host_endpoint *e; + int intr = 0; + + e = alt->endpoint + ep; + switch (e->desc.bmAttributes) { + case USB_ENDPOINT_XFER_INT: + if (!usb_endpoint_dir_in(&e->desc)) + continue; + intr = 1; + /* FALLTHROUGH */ + case USB_ENDPOINT_XFER_BULK: + break; + default: + continue; + } + if (usb_endpoint_dir_in(&e->desc)) { + if (!intr && !in) + in = e; + else if (intr && !status) + status = e; + } else { + if (!out) + out = e; + } + } + if (in && out) + break; + } + if (!alt || !in || !out) + return -EINVAL; + + dev->pipe_in = usb_rcvbulkpipe(dev->udev, + in->desc.bEndpointAddress & + USB_ENDPOINT_NUMBER_MASK); + dev->pipe_out = usb_sndbulkpipe(dev->udev, + out->desc.bEndpointAddress & + USB_ENDPOINT_NUMBER_MASK); + dev->ep_intr = status; + + return 0; +} + +static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) +{ + struct lan78xx_priv *pdata = NULL; + int ret; + int i; + + ret = lan78xx_get_endpoints(dev, intf); + + dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL); + + pdata = (struct lan78xx_priv *)(dev->data[0]); + if (!pdata) { + netdev_warn(dev->net, "Unable to allocate lan78xx_priv"); + return -ENOMEM; + } + + pdata->dev = dev; + + spin_lock_init(&pdata->rfe_ctl_lock); + mutex_init(&pdata->dataport_mutex); + + INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write); + + for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++) + pdata->vlan_table[i] = 0; + + INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write); + + dev->net->features = 0; + + if (DEFAULT_TX_CSUM_ENABLE) + dev->net->features |= NETIF_F_HW_CSUM; + + if (DEFAULT_RX_CSUM_ENABLE) + dev->net->features |= NETIF_F_RXCSUM; + + if (DEFAULT_TSO_CSUM_ENABLE) + dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG; + + dev->net->hw_features = dev->net->features; + + /* Init all registers */ + ret = lan78xx_reset(dev); + + dev->net->flags |= IFF_MULTICAST; + + pdata->wol = WAKE_MAGIC; + + return 0; +} + +static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) +{ + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + + if (pdata) { + netif_dbg(dev, ifdown, dev->net, "free pdata"); + kfree(pdata); + pdata = NULL; + dev->data[0] = 0; + } +} + +static void lan78xx_rx_csum_offload(struct lan78xx_net *dev, + struct sk_buff *skb, + u32 rx_cmd_a, u32 rx_cmd_b) +{ + if (!(dev->net->features & NETIF_F_RXCSUM) || + unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) { + skb->ip_summed = CHECKSUM_NONE; + } else { + skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_)); + skb->ip_summed = CHECKSUM_COMPLETE; + } +} + +void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) +{ + int status; + + if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { + skb_queue_tail(&dev->rxq_pause, skb); + return; + } + + skb->protocol = eth_type_trans(skb, dev->net); + dev->net->stats.rx_packets++; + dev->net->stats.rx_bytes += skb->len; + + netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", + skb->len + sizeof(struct ethhdr), skb->protocol); + memset(skb->cb, 0, sizeof(struct skb_data)); + + if (skb_defer_rx_timestamp(skb)) + return; + + status = netif_rx(skb); + if (status != NET_RX_SUCCESS) + netif_dbg(dev, rx_err, dev->net, + "netif_rx status %d\n", status); +} + +static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) +{ + if (skb->len < dev->net->hard_header_len) + return 0; + + while (skb->len > 0) { + u32 rx_cmd_a, rx_cmd_b, align_count, size; + u16 rx_cmd_c; + struct sk_buff *skb2; + unsigned char *packet; + + memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a)); + le32_to_cpus(&rx_cmd_a); + skb_pull(skb, sizeof(rx_cmd_a)); + + memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b)); + le32_to_cpus(&rx_cmd_b); + skb_pull(skb, sizeof(rx_cmd_b)); + + memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c)); + le16_to_cpus(&rx_cmd_c); + skb_pull(skb, sizeof(rx_cmd_c)); + + packet = skb->data; + + /* get the packet length */ + size = (rx_cmd_a & RX_CMD_A_LEN_MASK_); + align_count = (4 - ((size + RXW_PADDING) % 4)) % 4; + + if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) { + netif_dbg(dev, rx_err, dev->net, + "Error rx_cmd_a=0x%08x", rx_cmd_a); + } else { + /* last frame in this batch */ + if (skb->len == size) { + lan78xx_rx_csum_offload(dev, skb, + rx_cmd_a, rx_cmd_b); + + skb_trim(skb, skb->len - 4); /* remove fcs */ + skb->truesize = size + sizeof(struct sk_buff); + + return 1; + } + + skb2 = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!skb2)) { + netdev_warn(dev->net, "Error allocating skb"); + return 0; + } + + skb2->len = size; + skb2->data = packet; + skb_set_tail_pointer(skb2, size); + + lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b); + + skb_trim(skb2, skb2->len - 4); /* remove fcs */ + skb2->truesize = size + sizeof(struct sk_buff); + + lan78xx_skb_return(dev, skb2); + } + + skb_pull(skb, size); + + /* padding bytes before the next frame starts */ + if (skb->len) + skb_pull(skb, align_count); + } + + if (unlikely(skb->len < 0)) { + netdev_warn(dev->net, "invalid rx length<0 %d", skb->len); + return 0; + } + + return 1; +} + +static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb) +{ + if (!lan78xx_rx(dev, skb)) { + dev->net->stats.rx_errors++; + goto done; + } + + if (skb->len) { + lan78xx_skb_return(dev, skb); + return; + } + + netif_dbg(dev, rx_err, dev->net, "drop\n"); + dev->net->stats.rx_errors++; +done: + skb_queue_tail(&dev->done, skb); +} + +static void rx_complete(struct urb *urb); + +static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags) +{ + struct sk_buff *skb; + struct skb_data *entry; + unsigned long lockflags; + size_t size = dev->rx_urb_size; + int ret = 0; + + skb = netdev_alloc_skb_ip_align(dev->net, size); + if (!skb) { + usb_free_urb(urb); + return -ENOMEM; + } + + entry = (struct skb_data *)skb->cb; + entry->urb = urb; + entry->dev = dev; + entry->length = 0; + + usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in, + skb->data, size, rx_complete, skb); + + spin_lock_irqsave(&dev->rxq.lock, lockflags); + + if (netif_device_present(dev->net) && + netif_running(dev->net) && + !test_bit(EVENT_RX_HALT, &dev->flags) && + !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { + ret = usb_submit_urb(urb, GFP_ATOMIC); + switch (ret) { + case 0: + lan78xx_queue_skb(&dev->rxq, skb, rx_start); + break; + case -EPIPE: + lan78xx_defer_kevent(dev, EVENT_RX_HALT); + break; + case -ENODEV: + netif_dbg(dev, ifdown, dev->net, "device gone\n"); + netif_device_detach(dev->net); + break; + case -EHOSTUNREACH: + ret = -ENOLINK; + break; + default: + netif_dbg(dev, rx_err, dev->net, + "rx submit, %d\n", ret); + tasklet_schedule(&dev->bh); + } + } else { + netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); + ret = -ENOLINK; + } + spin_unlock_irqrestore(&dev->rxq.lock, lockflags); + if (ret) { + dev_kfree_skb_any(skb); + usb_free_urb(urb); + } + return ret; +} + +static void rx_complete(struct urb *urb) +{ + struct sk_buff *skb = (struct sk_buff *)urb->context; + struct skb_data *entry = (struct skb_data *)skb->cb; + struct lan78xx_net *dev = entry->dev; + int urb_status = urb->status; + enum skb_state state; + + skb_put(skb, urb->actual_length); + state = rx_done; + entry->urb = NULL; + + switch (urb_status) { + case 0: + if (skb->len < dev->net->hard_header_len) { + state = rx_cleanup; + dev->net->stats.rx_errors++; + dev->net->stats.rx_length_errors++; + netif_dbg(dev, rx_err, dev->net, + "rx length %d\n", skb->len); + } + usb_mark_last_busy(dev->udev); + break; + case -EPIPE: + dev->net->stats.rx_errors++; + lan78xx_defer_kevent(dev, EVENT_RX_HALT); + /* FALLTHROUGH */ + case -ECONNRESET: /* async unlink */ + case -ESHUTDOWN: /* hardware gone */ + netif_dbg(dev, ifdown, dev->net, + "rx shutdown, code %d\n", urb_status); + state = rx_cleanup; + entry->urb = urb; + urb = NULL; + break; + case -EPROTO: + case -ETIME: + case -EILSEQ: + dev->net->stats.rx_errors++; + state = rx_cleanup; + entry->urb = urb; + urb = NULL; + break; + + /* data overrun ... flush fifo? */ + case -EOVERFLOW: + dev->net->stats.rx_over_errors++; + /* FALLTHROUGH */ + + default: + state = rx_cleanup; + dev->net->stats.rx_errors++; + netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); + break; + } + + state = defer_bh(dev, skb, &dev->rxq, state); + + if (urb) { + if (netif_running(dev->net) && + !test_bit(EVENT_RX_HALT, &dev->flags) && + state != unlink_start) { + rx_submit(dev, urb, GFP_ATOMIC); + return; + } + usb_free_urb(urb); + } + netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); +} + +static void lan78xx_tx_bh(struct lan78xx_net *dev) +{ + int length; + struct urb *urb = NULL; + struct skb_data *entry; + unsigned long flags; + struct sk_buff_head *tqp = &dev->txq_pend; + struct sk_buff *skb, *skb2; + int ret; + int count, pos; + int skb_totallen, pkt_cnt; + + skb_totallen = 0; + pkt_cnt = 0; + for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) { + if (skb_is_gso(skb)) { + if (pkt_cnt) { + /* handle previous packets first */ + break; + } + length = skb->len; + skb2 = skb_dequeue(tqp); + goto gso_skb; + } + + if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE) + break; + skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32)); + pkt_cnt++; + } + + /* copy to a single skb */ + skb = alloc_skb(skb_totallen, GFP_ATOMIC); + if (!skb) + goto drop; + + skb_put(skb, skb_totallen); + + for (count = pos = 0; count < pkt_cnt; count++) { + skb2 = skb_dequeue(tqp); + if (skb2) { + memcpy(skb->data + pos, skb2->data, skb2->len); + pos += roundup(skb2->len, sizeof(u32)); + dev_kfree_skb(skb2); + } + } + + length = skb_totallen; + +gso_skb: + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + netif_dbg(dev, tx_err, dev->net, "no urb\n"); + goto drop; + } + + entry = (struct skb_data *)skb->cb; + entry->urb = urb; + entry->dev = dev; + entry->length = length; + + spin_lock_irqsave(&dev->txq.lock, flags); + ret = usb_autopm_get_interface_async(dev->intf); + if (ret < 0) { + spin_unlock_irqrestore(&dev->txq.lock, flags); + goto drop; + } + + usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out, + skb->data, skb->len, tx_complete, skb); + + if (length % dev->maxpacket == 0) { + /* send USB_ZERO_PACKET */ + urb->transfer_flags |= URB_ZERO_PACKET; + } + +#ifdef CONFIG_PM + /* if this triggers the device is still a sleep */ + if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { + /* transmission will be done in resume */ + usb_anchor_urb(urb, &dev->deferred); + /* no use to process more packets */ + netif_stop_queue(dev->net); + usb_put_urb(urb); + spin_unlock_irqrestore(&dev->txq.lock, flags); + netdev_dbg(dev->net, "Delaying transmission for resumption\n"); + return; + } +#endif + + ret = usb_submit_urb(urb, GFP_ATOMIC); + switch (ret) { + case 0: + dev->net->trans_start = jiffies; + lan78xx_queue_skb(&dev->txq, skb, tx_start); + if (skb_queue_len(&dev->txq) >= dev->tx_qlen) + netif_stop_queue(dev->net); + break; + case -EPIPE: + netif_stop_queue(dev->net); + lan78xx_defer_kevent(dev, EVENT_TX_HALT); + usb_autopm_put_interface_async(dev->intf); + break; + default: + usb_autopm_put_interface_async(dev->intf); + netif_dbg(dev, tx_err, dev->net, + "tx: submit urb err %d\n", ret); + break; + } + + spin_unlock_irqrestore(&dev->txq.lock, flags); + + if (ret) { + netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret); +drop: + dev->net->stats.tx_dropped++; + if (skb) + dev_kfree_skb_any(skb); + usb_free_urb(urb); + } else + netif_dbg(dev, tx_queued, dev->net, + "> tx, len %d, type 0x%x\n", length, skb->protocol); +} + +static void lan78xx_rx_bh(struct lan78xx_net *dev) +{ + struct urb *urb; + int i; + + if (skb_queue_len(&dev->rxq) < dev->rx_qlen) { + for (i = 0; i < 10; i++) { + if (skb_queue_len(&dev->rxq) >= dev->rx_qlen) + break; + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (urb) + if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK) + return; + } + + if (skb_queue_len(&dev->rxq) < dev->rx_qlen) + tasklet_schedule(&dev->bh); + } + if (skb_queue_len(&dev->txq) < dev->tx_qlen) + netif_wake_queue(dev->net); +} + +static void lan78xx_bh(unsigned long param) +{ + struct lan78xx_net *dev = (struct lan78xx_net *)param; + struct sk_buff *skb; + struct skb_data *entry; + + while ((skb = skb_dequeue(&dev->done))) { + entry = (struct skb_data *)(skb->cb); + switch (entry->state) { + case rx_done: + entry->state = rx_cleanup; + rx_process(dev, skb); + continue; + case tx_done: + usb_free_urb(entry->urb); + dev_kfree_skb(skb); + continue; + case rx_cleanup: + usb_free_urb(entry->urb); + dev_kfree_skb(skb); + continue; + default: + netdev_dbg(dev->net, "skb state %d\n", entry->state); + return; + } + } + + if (netif_device_present(dev->net) && netif_running(dev->net)) { + if (!skb_queue_empty(&dev->txq_pend)) + lan78xx_tx_bh(dev); + + if (!timer_pending(&dev->delay) && + !test_bit(EVENT_RX_HALT, &dev->flags)) + lan78xx_rx_bh(dev); + } +} + +static void lan78xx_delayedwork(struct work_struct *work) +{ + int status; + struct lan78xx_net *dev; + + dev = container_of(work, struct lan78xx_net, wq.work); + + if (test_bit(EVENT_TX_HALT, &dev->flags)) { + unlink_urbs(dev, &dev->txq); + status = usb_autopm_get_interface(dev->intf); + if (status < 0) + goto fail_pipe; + status = usb_clear_halt(dev->udev, dev->pipe_out); + usb_autopm_put_interface(dev->intf); + if (status < 0 && + status != -EPIPE && + status != -ESHUTDOWN) { + if (netif_msg_tx_err(dev)) +fail_pipe: + netdev_err(dev->net, + "can't clear tx halt, status %d\n", + status); + } else { + clear_bit(EVENT_TX_HALT, &dev->flags); + if (status != -ESHUTDOWN) + netif_wake_queue(dev->net); + } + } + if (test_bit(EVENT_RX_HALT, &dev->flags)) { + unlink_urbs(dev, &dev->rxq); + status = usb_autopm_get_interface(dev->intf); + if (status < 0) + goto fail_halt; + status = usb_clear_halt(dev->udev, dev->pipe_in); + usb_autopm_put_interface(dev->intf); + if (status < 0 && + status != -EPIPE && + status != -ESHUTDOWN) { + if (netif_msg_rx_err(dev)) +fail_halt: + netdev_err(dev->net, + "can't clear rx halt, status %d\n", + status); + } else { + clear_bit(EVENT_RX_HALT, &dev->flags); + tasklet_schedule(&dev->bh); + } + } + + if (test_bit(EVENT_LINK_RESET, &dev->flags)) { + int ret = 0; + + clear_bit(EVENT_LINK_RESET, &dev->flags); + status = usb_autopm_get_interface(dev->intf); + if (status < 0) + goto skip_reset; + if (lan78xx_link_reset(dev) < 0) { + usb_autopm_put_interface(dev->intf); +skip_reset: + netdev_info(dev->net, "link reset failed (%d)\n", + ret); + } else { + usb_autopm_put_interface(dev->intf); + } + } +} + +static void intr_complete(struct urb *urb) +{ + struct lan78xx_net *dev = urb->context; + int status = urb->status; + + switch (status) { + /* success */ + case 0: + lan78xx_status(dev, urb); + break; + + /* software-driven interface shutdown */ + case -ENOENT: /* urb killed */ + case -ESHUTDOWN: /* hardware gone */ + netif_dbg(dev, ifdown, dev->net, + "intr shutdown, code %d\n", status); + return; + + /* NOTE: not throttling like RX/TX, since this endpoint + * already polls infrequently + */ + default: + netdev_dbg(dev->net, "intr status %d\n", status); + break; + } + + if (!netif_running(dev->net)) + return; + + memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); + status = usb_submit_urb(urb, GFP_ATOMIC); + if (status != 0) + netif_err(dev, timer, dev->net, + "intr resubmit --> %d\n", status); +} + +static void lan78xx_disconnect(struct usb_interface *intf) +{ + struct lan78xx_net *dev; + struct usb_device *udev; + struct net_device *net; + + dev = usb_get_intfdata(intf); + usb_set_intfdata(intf, NULL); + if (!dev) + return; + + udev = interface_to_usbdev(intf); + + net = dev->net; + unregister_netdev(net); + + cancel_delayed_work_sync(&dev->wq); + + usb_scuttle_anchored_urbs(&dev->deferred); + + lan78xx_unbind(dev, intf); + + usb_kill_urb(dev->urb_intr); + usb_free_urb(dev->urb_intr); + + free_netdev(net); + usb_put_dev(udev); +} + +void lan78xx_tx_timeout(struct net_device *net) +{ + struct lan78xx_net *dev = netdev_priv(net); + + unlink_urbs(dev, &dev->txq); + tasklet_schedule(&dev->bh); +} + +static const struct net_device_ops lan78xx_netdev_ops = { + .ndo_open = lan78xx_open, + .ndo_stop = lan78xx_stop, + .ndo_start_xmit = lan78xx_start_xmit, + .ndo_tx_timeout = lan78xx_tx_timeout, + .ndo_change_mtu = lan78xx_change_mtu, + .ndo_set_mac_address = lan78xx_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = lan78xx_ioctl, + .ndo_set_rx_mode = lan78xx_set_multicast, + .ndo_set_features = lan78xx_set_features, + .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid, +}; + +static int lan78xx_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct lan78xx_net *dev; + struct net_device *netdev; + struct usb_device *udev; + int ret; + unsigned maxp; + unsigned period; + u8 *buf = NULL; + + udev = interface_to_usbdev(intf); + udev = usb_get_dev(udev); + + ret = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct lan78xx_net)); + if (!netdev) { + dev_err(&intf->dev, "Error: OOM\n"); + goto out1; + } + + /* netdev_printk() needs this */ + SET_NETDEV_DEV(netdev, &intf->dev); + + dev = netdev_priv(netdev); + dev->udev = udev; + dev->intf = intf; + dev->net = netdev; + dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV + | NETIF_MSG_PROBE | NETIF_MSG_LINK); + + skb_queue_head_init(&dev->rxq); + skb_queue_head_init(&dev->txq); + skb_queue_head_init(&dev->done); + skb_queue_head_init(&dev->rxq_pause); + skb_queue_head_init(&dev->txq_pend); + mutex_init(&dev->phy_mutex); + + tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev); + INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork); + init_usb_anchor(&dev->deferred); + + netdev->netdev_ops = &lan78xx_netdev_ops; + netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES; + netdev->ethtool_ops = &lan78xx_ethtool_ops; + + ret = lan78xx_bind(dev, intf); + if (ret < 0) + goto out2; + strcpy(netdev->name, "eth%d"); + + if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len)) + netdev->mtu = dev->hard_mtu - netdev->hard_header_len; + + dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; + dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; + dev->ep_intr = (intf->cur_altsetting)->endpoint + 2; + + dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); + dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); + + dev->pipe_intr = usb_rcvintpipe(dev->udev, + dev->ep_intr->desc.bEndpointAddress & + USB_ENDPOINT_NUMBER_MASK); + period = dev->ep_intr->desc.bInterval; + + maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0); + buf = kmalloc(maxp, GFP_KERNEL); + if (buf) { + dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL); + if (!dev->urb_intr) { + kfree(buf); + goto out3; + } else { + usb_fill_int_urb(dev->urb_intr, dev->udev, + dev->pipe_intr, buf, maxp, + intr_complete, dev, period); + } + } + + dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1); + + /* driver requires remote-wakeup capability during autosuspend. */ + intf->needs_remote_wakeup = 1; + + ret = register_netdev(netdev); + if (ret != 0) { + netif_err(dev, probe, netdev, "couldn't register the device\n"); + goto out2; + } + + usb_set_intfdata(intf, dev); + + ret = device_set_wakeup_enable(&udev->dev, true); + + /* Default delay of 2sec has more overhead than advantage. + * Set to 10sec as default. + */ + pm_runtime_set_autosuspend_delay(&udev->dev, + DEFAULT_AUTOSUSPEND_DELAY); + + return 0; + +out3: + lan78xx_unbind(dev, intf); +out2: + free_netdev(netdev); +out1: + usb_put_dev(udev); + + return ret; +} + +static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len) +{ + const u16 crc16poly = 0x8005; + int i; + u16 bit, crc, msb; + u8 data; + + crc = 0xFFFF; + for (i = 0; i < len; i++) { + data = *buf++; + for (bit = 0; bit < 8; bit++) { + msb = crc >> 15; + crc <<= 1; + + if (msb ^ (u16)(data & 1)) { + crc ^= crc16poly; + crc |= (u16)0x0001U; + } + data >>= 1; + } + } + + return crc; +} + +static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) +{ + u32 buf; + int ret; + int mask_index; + u16 crc; + u32 temp_wucsr; + u32 temp_pmt_ctl; + const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E }; + const u8 ipv6_multicast[3] = { 0x33, 0x33 }; + const u8 arp_type[2] = { 0x08, 0x06 }; + + ret = lan78xx_read_reg(dev, MAC_TX, &buf); + buf &= ~MAC_TX_TXEN_; + ret = lan78xx_write_reg(dev, MAC_TX, buf); + ret = lan78xx_read_reg(dev, MAC_RX, &buf); + buf &= ~MAC_RX_RXEN_; + ret = lan78xx_write_reg(dev, MAC_RX, buf); + + ret = lan78xx_write_reg(dev, WUCSR, 0); + ret = lan78xx_write_reg(dev, WUCSR2, 0); + ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); + + temp_wucsr = 0; + + temp_pmt_ctl = 0; + ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl); + temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_; + temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_; + + for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) + ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0); + + mask_index = 0; + if (wol & WAKE_PHY) { + temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_; + + temp_pmt_ctl |= PMT_CTL_WOL_EN_; + temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; + temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; + } + if (wol & WAKE_MAGIC) { + temp_wucsr |= WUCSR_MPEN_; + + temp_pmt_ctl |= PMT_CTL_WOL_EN_; + temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; + temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_; + } + if (wol & WAKE_BCAST) { + temp_wucsr |= WUCSR_BCST_EN_; + + temp_pmt_ctl |= PMT_CTL_WOL_EN_; + temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; + temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; + } + if (wol & WAKE_MCAST) { + temp_wucsr |= WUCSR_WAKE_EN_; + + /* set WUF_CFG & WUF_MASK for IPv4 Multicast */ + crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3); + ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), + WUF_CFGX_EN_ | + WUF_CFGX_TYPE_MCAST_ | + (0 << WUF_CFGX_OFFSET_SHIFT_) | + (crc & WUF_CFGX_CRC16_MASK_)); + + ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7); + ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); + ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); + ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); + mask_index++; + + /* for IPv6 Multicast */ + crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2); + ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), + WUF_CFGX_EN_ | + WUF_CFGX_TYPE_MCAST_ | + (0 << WUF_CFGX_OFFSET_SHIFT_) | + (crc & WUF_CFGX_CRC16_MASK_)); + + ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3); + ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); + ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); + ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); + mask_index++; + + temp_pmt_ctl |= PMT_CTL_WOL_EN_; + temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; + temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; + } + if (wol & WAKE_UCAST) { + temp_wucsr |= WUCSR_PFDA_EN_; + + temp_pmt_ctl |= PMT_CTL_WOL_EN_; + temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; + temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; + } + if (wol & WAKE_ARP) { + temp_wucsr |= WUCSR_WAKE_EN_; + + /* set WUF_CFG & WUF_MASK + * for packettype (offset 12,13) = ARP (0x0806) + */ + crc = lan78xx_wakeframe_crc16(arp_type, 2); + ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), + WUF_CFGX_EN_ | + WUF_CFGX_TYPE_ALL_ | + (0 << WUF_CFGX_OFFSET_SHIFT_) | + (crc & WUF_CFGX_CRC16_MASK_)); + + ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000); + ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); + ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); + ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); + mask_index++; + + temp_pmt_ctl |= PMT_CTL_WOL_EN_; + temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; + temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; + } + + ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr); + + /* when multiple WOL bits are set */ + if (hweight_long((unsigned long)wol) > 1) { + temp_pmt_ctl |= PMT_CTL_WOL_EN_; + temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; + temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; + } + ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl); + + /* clear WUPS */ + ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + buf |= PMT_CTL_WUPS_MASK_; + ret = lan78xx_write_reg(dev, PMT_CTL, buf); + + ret = lan78xx_read_reg(dev, MAC_RX, &buf); + buf |= MAC_RX_RXEN_; + ret = lan78xx_write_reg(dev, MAC_RX, buf); + + return 0; +} + +int lan78xx_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct lan78xx_net *dev = usb_get_intfdata(intf); + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + u32 buf; + int ret; + int event; + + ret = 0; + event = message.event; + + if (!dev->suspend_count++) { + spin_lock_irq(&dev->txq.lock); + /* don't autosuspend while transmitting */ + if ((skb_queue_len(&dev->txq) || + skb_queue_len(&dev->txq_pend)) && + PMSG_IS_AUTO(message)) { + spin_unlock_irq(&dev->txq.lock); + ret = -EBUSY; + goto out; + } else { + set_bit(EVENT_DEV_ASLEEP, &dev->flags); + spin_unlock_irq(&dev->txq.lock); + } + + /* stop TX & RX */ + ret = lan78xx_read_reg(dev, MAC_TX, &buf); + buf &= ~MAC_TX_TXEN_; + ret = lan78xx_write_reg(dev, MAC_TX, buf); + ret = lan78xx_read_reg(dev, MAC_RX, &buf); + buf &= ~MAC_RX_RXEN_; + ret = lan78xx_write_reg(dev, MAC_RX, buf); + + /* empty out the rx and queues */ + netif_device_detach(dev->net); + lan78xx_terminate_urbs(dev); + usb_kill_urb(dev->urb_intr); + + /* reattach */ + netif_device_attach(dev->net); + } + + if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { + if (PMSG_IS_AUTO(message)) { + /* auto suspend (selective suspend) */ + ret = lan78xx_read_reg(dev, MAC_TX, &buf); + buf &= ~MAC_TX_TXEN_; + ret = lan78xx_write_reg(dev, MAC_TX, buf); + ret = lan78xx_read_reg(dev, MAC_RX, &buf); + buf &= ~MAC_RX_RXEN_; + ret = lan78xx_write_reg(dev, MAC_RX, buf); + + ret = lan78xx_write_reg(dev, WUCSR, 0); + ret = lan78xx_write_reg(dev, WUCSR2, 0); + ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); + + /* set goodframe wakeup */ + ret = lan78xx_read_reg(dev, WUCSR, &buf); + + buf |= WUCSR_RFE_WAKE_EN_; + buf |= WUCSR_STORE_WAKE_; + + ret = lan78xx_write_reg(dev, WUCSR, buf); + + ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + + buf &= ~PMT_CTL_RES_CLR_WKP_EN_; + buf |= PMT_CTL_RES_CLR_WKP_STS_; + + buf |= PMT_CTL_PHY_WAKE_EN_; + buf |= PMT_CTL_WOL_EN_; + buf &= ~PMT_CTL_SUS_MODE_MASK_; + buf |= PMT_CTL_SUS_MODE_3_; + + ret = lan78xx_write_reg(dev, PMT_CTL, buf); + + ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + + buf |= PMT_CTL_WUPS_MASK_; + + ret = lan78xx_write_reg(dev, PMT_CTL, buf); + + ret = lan78xx_read_reg(dev, MAC_RX, &buf); + buf |= MAC_RX_RXEN_; + ret = lan78xx_write_reg(dev, MAC_RX, buf); + } else { + lan78xx_set_suspend(dev, pdata->wol); + } + } + +out: + return ret; +} + +int lan78xx_resume(struct usb_interface *intf) +{ + struct lan78xx_net *dev = usb_get_intfdata(intf); + struct sk_buff *skb; + struct urb *res; + int ret; + u32 buf; + + if (!--dev->suspend_count) { + /* resume interrupt URBs */ + if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags)) + usb_submit_urb(dev->urb_intr, GFP_NOIO); + + spin_lock_irq(&dev->txq.lock); + while ((res = usb_get_from_anchor(&dev->deferred))) { + skb = (struct sk_buff *)res->context; + ret = usb_submit_urb(res, GFP_ATOMIC); + if (ret < 0) { + dev_kfree_skb_any(skb); + usb_free_urb(res); + usb_autopm_put_interface_async(dev->intf); + } else { + dev->net->trans_start = jiffies; + lan78xx_queue_skb(&dev->txq, skb, tx_start); + } + } + + clear_bit(EVENT_DEV_ASLEEP, &dev->flags); + spin_unlock_irq(&dev->txq.lock); + + if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { + if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen)) + netif_start_queue(dev->net); + tasklet_schedule(&dev->bh); + } + } + + ret = lan78xx_write_reg(dev, WUCSR2, 0); + ret = lan78xx_write_reg(dev, WUCSR, 0); + ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); + + ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ | + WUCSR2_ARP_RCD_ | + WUCSR2_IPV6_TCPSYN_RCD_ | + WUCSR2_IPV4_TCPSYN_RCD_); + + ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ | + WUCSR_EEE_RX_WAKE_ | + WUCSR_PFDA_FR_ | + WUCSR_RFE_WAKE_FR_ | + WUCSR_WUFR_ | + WUCSR_MPR_ | + WUCSR_BCST_FR_); + + ret = lan78xx_read_reg(dev, MAC_TX, &buf); + buf |= MAC_TX_TXEN_; + ret = lan78xx_write_reg(dev, MAC_TX, buf); + + return 0; +} + +int lan78xx_reset_resume(struct usb_interface *intf) +{ + struct lan78xx_net *dev = usb_get_intfdata(intf); + + lan78xx_reset(dev); + return lan78xx_resume(intf); +} + +static const struct usb_device_id products[] = { + { + /* LAN7800 USB Gigabit Ethernet Device */ + USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID), + }, + { + /* LAN7850 USB Gigabit Ethernet Device */ + USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID), + }, + {}, +}; +MODULE_DEVICE_TABLE(usb, products); + +static struct usb_driver lan78xx_driver = { + .name = DRIVER_NAME, + .id_table = products, + .probe = lan78xx_probe, + .disconnect = lan78xx_disconnect, + .suspend = lan78xx_suspend, + .resume = lan78xx_resume, + .reset_resume = lan78xx_reset_resume, + .supports_autosuspend = 1, + .disable_hub_initiated_lpm = 1, +}; + +module_usb_driver(lan78xx_driver); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/usb/lan78xx.h b/drivers/net/usb/lan78xx.h new file mode 100644 index 000000000..ae7562ee7 --- /dev/null +++ b/drivers/net/usb/lan78xx.h @@ -0,0 +1,1069 @@ +/* + * Copyright (C) 2015 Microchip Technology + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +#ifndef _LAN78XX_H +#define _LAN78XX_H + +/* USB Vendor Requests */ +#define USB_VENDOR_REQUEST_WRITE_REGISTER 0xA0 +#define USB_VENDOR_REQUEST_READ_REGISTER 0xA1 +#define USB_VENDOR_REQUEST_GET_STATS 0xA2 + +/* Interrupt Endpoint status word bitfields */ +#define INT_ENP_EEE_START_TX_LPI_INT BIT(26) +#define INT_ENP_EEE_STOP_TX_LPI_INT BIT(25) +#define INT_ENP_EEE_RX_LPI_INT BIT(24) +#define INT_ENP_RDFO_INT BIT(22) +#define INT_ENP_TXE_INT BIT(21) +#define INT_ENP_TX_DIS_INT BIT(19) +#define INT_ENP_RX_DIS_INT BIT(18) +#define INT_ENP_PHY_INT BIT(17) +#define INT_ENP_DP_INT BIT(16) +#define INT_ENP_MAC_ERR_INT BIT(15) +#define INT_ENP_TDFU_INT BIT(14) +#define INT_ENP_TDFO_INT BIT(13) +#define INT_ENP_UTX_FP_INT BIT(12) + +#define TX_PKT_ALIGNMENT 4 +#define RX_PKT_ALIGNMENT 4 + +/* Tx Command A */ +#define TX_CMD_A_IGE_ (0x20000000) +#define TX_CMD_A_ICE_ (0x10000000) +#define TX_CMD_A_LSO_ (0x08000000) +#define TX_CMD_A_IPE_ (0x04000000) +#define TX_CMD_A_TPE_ (0x02000000) +#define TX_CMD_A_IVTG_ (0x01000000) +#define TX_CMD_A_RVTG_ (0x00800000) +#define TX_CMD_A_FCS_ (0x00400000) +#define TX_CMD_A_LEN_MASK_ (0x000FFFFF) + +/* Tx Command B */ +#define TX_CMD_B_MSS_SHIFT_ (16) +#define TX_CMD_B_MSS_MASK_ (0x3FFF0000) +#define TX_CMD_B_MSS_MIN_ ((unsigned short)8) +#define TX_CMD_B_VTAG_MASK_ (0x0000FFFF) +#define TX_CMD_B_VTAG_PRI_MASK_ (0x0000E000) +#define TX_CMD_B_VTAG_CFI_MASK_ (0x00001000) +#define TX_CMD_B_VTAG_VID_MASK_ (0x00000FFF) + +/* Rx Command A */ +#define RX_CMD_A_ICE_ (0x80000000) +#define RX_CMD_A_TCE_ (0x40000000) +#define RX_CMD_A_CSE_MASK_ (0xC0000000) +#define RX_CMD_A_IPV_ (0x20000000) +#define RX_CMD_A_PID_MASK_ (0x18000000) +#define RX_CMD_A_PID_NONE_IP_ (0x00000000) +#define RX_CMD_A_PID_TCP_IP_ (0x08000000) +#define RX_CMD_A_PID_UDP_IP_ (0x10000000) +#define RX_CMD_A_PID_IP_ (0x18000000) +#define RX_CMD_A_PFF_ (0x04000000) +#define RX_CMD_A_BAM_ (0x02000000) +#define RX_CMD_A_MAM_ (0x01000000) +#define RX_CMD_A_FVTG_ (0x00800000) +#define RX_CMD_A_RED_ (0x00400000) +#define RX_CMD_A_RX_ERRS_MASK_ (0xC03F0000) +#define RX_CMD_A_RWT_ (0x00200000) +#define RX_CMD_A_RUNT_ (0x00100000) +#define RX_CMD_A_LONG_ (0x00080000) +#define RX_CMD_A_RXE_ (0x00040000) +#define RX_CMD_A_DRB_ (0x00020000) +#define RX_CMD_A_FCS_ (0x00010000) +#define RX_CMD_A_UAM_ (0x00008000) +#define RX_CMD_A_ICSM_ (0x00004000) +#define RX_CMD_A_LEN_MASK_ (0x00003FFF) + +/* Rx Command B */ +#define RX_CMD_B_CSUM_SHIFT_ (16) +#define RX_CMD_B_CSUM_MASK_ (0xFFFF0000) +#define RX_CMD_B_VTAG_MASK_ (0x0000FFFF) +#define RX_CMD_B_VTAG_PRI_MASK_ (0x0000E000) +#define RX_CMD_B_VTAG_CFI_MASK_ (0x00001000) +#define RX_CMD_B_VTAG_VID_MASK_ (0x00000FFF) + +/* Rx Command C */ +#define RX_CMD_C_WAKE_SHIFT_ (15) +#define RX_CMD_C_WAKE_ (0x8000) +#define RX_CMD_C_REF_FAIL_SHIFT_ (14) +#define RX_CMD_C_REF_FAIL_ (0x4000) + +/* SCSRs */ +#define NUMBER_OF_REGS (193) + +#define ID_REV (0x00) +#define ID_REV_CHIP_ID_MASK_ (0xFFFF0000) +#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF) +#define ID_REV_CHIP_ID_7800_ (0x7800) + +#define FPGA_REV (0x04) +#define FPGA_REV_MINOR_MASK_ (0x0000FF00) +#define FPGA_REV_MAJOR_MASK_ (0x000000FF) + +#define INT_STS (0x0C) +#define INT_STS_CLEAR_ALL_ (0xFFFFFFFF) +#define INT_STS_EEE_TX_LPI_STRT_ (0x04000000) +#define INT_STS_EEE_TX_LPI_STOP_ (0x02000000) +#define INT_STS_EEE_RX_LPI_ (0x01000000) +#define INT_STS_RDFO_ (0x00400000) +#define INT_STS_TXE_ (0x00200000) +#define INT_STS_TX_DIS_ (0x00080000) +#define INT_STS_RX_DIS_ (0x00040000) +#define INT_STS_PHY_INT_ (0x00020000) +#define INT_STS_DP_INT_ (0x00010000) +#define INT_STS_MAC_ERR_ (0x00008000) +#define INT_STS_TDFU_ (0x00004000) +#define INT_STS_TDFO_ (0x00002000) +#define INT_STS_UFX_FP_ (0x00001000) +#define INT_STS_GPIO_MASK_ (0x00000FFF) +#define INT_STS_GPIO11_ (0x00000800) +#define INT_STS_GPIO10_ (0x00000400) +#define INT_STS_GPIO9_ (0x00000200) +#define INT_STS_GPIO8_ (0x00000100) +#define INT_STS_GPIO7_ (0x00000080) +#define INT_STS_GPIO6_ (0x00000040) +#define INT_STS_GPIO5_ (0x00000020) +#define INT_STS_GPIO4_ (0x00000010) +#define INT_STS_GPIO3_ (0x00000008) +#define INT_STS_GPIO2_ (0x00000004) +#define INT_STS_GPIO1_ (0x00000002) +#define INT_STS_GPIO0_ (0x00000001) + +#define HW_CFG (0x010) +#define HW_CFG_CLK125_EN_ (0x02000000) +#define HW_CFG_REFCLK25_EN_ (0x01000000) +#define HW_CFG_LED3_EN_ (0x00800000) +#define HW_CFG_LED2_EN_ (0x00400000) +#define HW_CFG_LED1_EN_ (0x00200000) +#define HW_CFG_LED0_EN_ (0x00100000) +#define HW_CFG_EEE_PHY_LUSU_ (0x00020000) +#define HW_CFG_EEE_TSU_ (0x00010000) +#define HW_CFG_NETDET_STS_ (0x00008000) +#define HW_CFG_NETDET_EN_ (0x00004000) +#define HW_CFG_EEM_ (0x00002000) +#define HW_CFG_RST_PROTECT_ (0x00001000) +#define HW_CFG_CONNECT_BUF_ (0x00000400) +#define HW_CFG_CONNECT_EN_ (0x00000200) +#define HW_CFG_CONNECT_POL_ (0x00000100) +#define HW_CFG_SUSPEND_N_SEL_MASK_ (0x000000C0) +#define HW_CFG_SUSPEND_N_SEL_2 (0x00000000) +#define HW_CFG_SUSPEND_N_SEL_12N (0x00000040) +#define HW_CFG_SUSPEND_N_SEL_012N (0x00000080) +#define HW_CFG_SUSPEND_N_SEL_0123N (0x000000C0) +#define HW_CFG_SUSPEND_N_POL_ (0x00000020) +#define HW_CFG_MEF_ (0x00000010) +#define HW_CFG_ETC_ (0x00000008) +#define HW_CFG_LRST_ (0x00000002) +#define HW_CFG_SRST_ (0x00000001) + +#define PMT_CTL (0x014) +#define PMT_CTL_EEE_WAKEUP_EN_ (0x00002000) +#define PMT_CTL_EEE_WUPS_ (0x00001000) +#define PMT_CTL_MAC_SRST_ (0x00000800) +#define PMT_CTL_PHY_PWRUP_ (0x00000400) +#define PMT_CTL_RES_CLR_WKP_MASK_ (0x00000300) +#define PMT_CTL_RES_CLR_WKP_STS_ (0x00000200) +#define PMT_CTL_RES_CLR_WKP_EN_ (0x00000100) +#define PMT_CTL_READY_ (0x00000080) +#define PMT_CTL_SUS_MODE_MASK_ (0x00000060) +#define PMT_CTL_SUS_MODE_0_ (0x00000000) +#define PMT_CTL_SUS_MODE_1_ (0x00000020) +#define PMT_CTL_SUS_MODE_2_ (0x00000040) +#define PMT_CTL_SUS_MODE_3_ (0x00000060) +#define PMT_CTL_PHY_RST_ (0x00000010) +#define PMT_CTL_WOL_EN_ (0x00000008) +#define PMT_CTL_PHY_WAKE_EN_ (0x00000004) +#define PMT_CTL_WUPS_MASK_ (0x00000003) +#define PMT_CTL_WUPS_MLT_ (0x00000003) +#define PMT_CTL_WUPS_MAC_ (0x00000002) +#define PMT_CTL_WUPS_PHY_ (0x00000001) + +#define GPIO_CFG0 (0x018) +#define GPIO_CFG0_GPIOEN_MASK_ (0x0000F000) +#define GPIO_CFG0_GPIOEN3_ (0x00008000) +#define GPIO_CFG0_GPIOEN2_ (0x00004000) +#define GPIO_CFG0_GPIOEN1_ (0x00002000) +#define GPIO_CFG0_GPIOEN0_ (0x00001000) +#define GPIO_CFG0_GPIOBUF_MASK_ (0x00000F00) +#define GPIO_CFG0_GPIOBUF3_ (0x00000800) +#define GPIO_CFG0_GPIOBUF2_ (0x00000400) +#define GPIO_CFG0_GPIOBUF1_ (0x00000200) +#define GPIO_CFG0_GPIOBUF0_ (0x00000100) +#define GPIO_CFG0_GPIODIR_MASK_ (0x000000F0) +#define GPIO_CFG0_GPIODIR3_ (0x00000080) +#define GPIO_CFG0_GPIODIR2_ (0x00000040) +#define GPIO_CFG0_GPIODIR1_ (0x00000020) +#define GPIO_CFG0_GPIODIR0_ (0x00000010) +#define GPIO_CFG0_GPIOD_MASK_ (0x0000000F) +#define GPIO_CFG0_GPIOD3_ (0x00000008) +#define GPIO_CFG0_GPIOD2_ (0x00000004) +#define GPIO_CFG0_GPIOD1_ (0x00000002) +#define GPIO_CFG0_GPIOD0_ (0x00000001) + +#define GPIO_CFG1 (0x01C) +#define GPIO_CFG1_GPIOEN_MASK_ (0xFF000000) +#define GPIO_CFG1_GPIOEN11_ (0x80000000) +#define GPIO_CFG1_GPIOEN10_ (0x40000000) +#define GPIO_CFG1_GPIOEN9_ (0x20000000) +#define GPIO_CFG1_GPIOEN8_ (0x10000000) +#define GPIO_CFG1_GPIOEN7_ (0x08000000) +#define GPIO_CFG1_GPIOEN6_ (0x04000000) +#define GPIO_CFG1_GPIOEN5_ (0x02000000) +#define GPIO_CFG1_GPIOEN4_ (0x01000000) +#define GPIO_CFG1_GPIOBUF_MASK_ (0x00FF0000) +#define GPIO_CFG1_GPIOBUF11_ (0x00800000) +#define GPIO_CFG1_GPIOBUF10_ (0x00400000) +#define GPIO_CFG1_GPIOBUF9_ (0x00200000) +#define GPIO_CFG1_GPIOBUF8_ (0x00100000) +#define GPIO_CFG1_GPIOBUF7_ (0x00080000) +#define GPIO_CFG1_GPIOBUF6_ (0x00040000) +#define GPIO_CFG1_GPIOBUF5_ (0x00020000) +#define GPIO_CFG1_GPIOBUF4_ (0x00010000) +#define GPIO_CFG1_GPIODIR_MASK_ (0x0000FF00) +#define GPIO_CFG1_GPIODIR11_ (0x00008000) +#define GPIO_CFG1_GPIODIR10_ (0x00004000) +#define GPIO_CFG1_GPIODIR9_ (0x00002000) +#define GPIO_CFG1_GPIODIR8_ (0x00001000) +#define GPIO_CFG1_GPIODIR7_ (0x00000800) +#define GPIO_CFG1_GPIODIR6_ (0x00000400) +#define GPIO_CFG1_GPIODIR5_ (0x00000200) +#define GPIO_CFG1_GPIODIR4_ (0x00000100) +#define GPIO_CFG1_GPIOD_MASK_ (0x000000FF) +#define GPIO_CFG1_GPIOD11_ (0x00000080) +#define GPIO_CFG1_GPIOD10_ (0x00000040) +#define GPIO_CFG1_GPIOD9_ (0x00000020) +#define GPIO_CFG1_GPIOD8_ (0x00000010) +#define GPIO_CFG1_GPIOD7_ (0x00000008) +#define GPIO_CFG1_GPIOD6_ (0x00000004) +#define GPIO_CFG1_GPIOD6_ (0x00000004) +#define GPIO_CFG1_GPIOD5_ (0x00000002) +#define GPIO_CFG1_GPIOD4_ (0x00000001) + +#define GPIO_WAKE (0x020) +#define GPIO_WAKE_GPIOPOL_MASK_ (0x0FFF0000) +#define GPIO_WAKE_GPIOPOL11_ (0x08000000) +#define GPIO_WAKE_GPIOPOL10_ (0x04000000) +#define GPIO_WAKE_GPIOPOL9_ (0x02000000) +#define GPIO_WAKE_GPIOPOL8_ (0x01000000) +#define GPIO_WAKE_GPIOPOL7_ (0x00800000) +#define GPIO_WAKE_GPIOPOL6_ (0x00400000) +#define GPIO_WAKE_GPIOPOL5_ (0x00200000) +#define GPIO_WAKE_GPIOPOL4_ (0x00100000) +#define GPIO_WAKE_GPIOPOL3_ (0x00080000) +#define GPIO_WAKE_GPIOPOL2_ (0x00040000) +#define GPIO_WAKE_GPIOPOL1_ (0x00020000) +#define GPIO_WAKE_GPIOPOL0_ (0x00010000) +#define GPIO_WAKE_GPIOWK_MASK_ (0x00000FFF) +#define GPIO_WAKE_GPIOWK11_ (0x00000800) +#define GPIO_WAKE_GPIOWK10_ (0x00000400) +#define GPIO_WAKE_GPIOWK9_ (0x00000200) +#define GPIO_WAKE_GPIOWK8_ (0x00000100) +#define GPIO_WAKE_GPIOWK7_ (0x00000080) +#define GPIO_WAKE_GPIOWK6_ (0x00000040) +#define GPIO_WAKE_GPIOWK5_ (0x00000020) +#define GPIO_WAKE_GPIOWK4_ (0x00000010) +#define GPIO_WAKE_GPIOWK3_ (0x00000008) +#define GPIO_WAKE_GPIOWK2_ (0x00000004) +#define GPIO_WAKE_GPIOWK1_ (0x00000002) +#define GPIO_WAKE_GPIOWK0_ (0x00000001) + +#define DP_SEL (0x024) +#define DP_SEL_DPRDY_ (0x80000000) +#define DP_SEL_RSEL_MASK_ (0x0000000F) +#define DP_SEL_RSEL_USB_PHY_CSRS_ (0x0000000F) +#define DP_SEL_RSEL_OTP_64BIT_ (0x00000009) +#define DP_SEL_RSEL_OTP_8BIT_ (0x00000008) +#define DP_SEL_RSEL_UTX_BUF_RAM_ (0x00000007) +#define DP_SEL_RSEL_DESC_RAM_ (0x00000005) +#define DP_SEL_RSEL_TXFIFO_ (0x00000004) +#define DP_SEL_RSEL_RXFIFO_ (0x00000003) +#define DP_SEL_RSEL_LSO_ (0x00000002) +#define DP_SEL_RSEL_VLAN_DA_ (0x00000001) +#define DP_SEL_RSEL_URXBUF_ (0x00000000) +#define DP_SEL_VHF_HASH_LEN (16) +#define DP_SEL_VHF_VLAN_LEN (128) + +#define DP_CMD (0x028) +#define DP_CMD_WRITE_ (0x00000001) +#define DP_CMD_READ_ (0x00000000) + +#define DP_ADDR (0x02C) +#define DP_ADDR_MASK_ (0x00003FFF) + +#define DP_DATA (0x030) + +#define E2P_CMD (0x040) +#define E2P_CMD_EPC_BUSY_ (0x80000000) +#define E2P_CMD_EPC_CMD_MASK_ (0x70000000) +#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000) +#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000) +#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000) +#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000) +#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) +#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) +#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000) +#define E2P_CMD_EPC_CMD_READ_ (0x00000000) +#define E2P_CMD_EPC_TIMEOUT_ (0x00000400) +#define E2P_CMD_EPC_DL_ (0x00000200) +#define E2P_CMD_EPC_ADDR_MASK_ (0x000001FF) + +#define E2P_DATA (0x044) +#define E2P_DATA_EEPROM_DATA_MASK_ (0x000000FF) + +#define BOS_ATTR (0x050) +#define BOS_ATTR_BLOCK_SIZE_MASK_ (0x000000FF) + +#define SS_ATTR (0x054) +#define SS_ATTR_POLL_INT_MASK_ (0x00FF0000) +#define SS_ATTR_DEV_DESC_SIZE_MASK_ (0x0000FF00) +#define SS_ATTR_CFG_BLK_SIZE_MASK_ (0x000000FF) + +#define HS_ATTR (0x058) +#define HS_ATTR_POLL_INT_MASK_ (0x00FF0000) +#define HS_ATTR_DEV_DESC_SIZE_MASK_ (0x0000FF00) +#define HS_ATTR_CFG_BLK_SIZE_MASK_ (0x000000FF) + +#define FS_ATTR (0x05C) +#define FS_ATTR_POLL_INT_MASK_ (0x00FF0000) +#define FS_ATTR_DEV_DESC_SIZE_MASK_ (0x0000FF00) +#define FS_ATTR_CFG_BLK_SIZE_MASK_ (0x000000FF) + +#define STR_ATTR0 (0x060) +#define STR_ATTR0_CFGSTR_DESC_SIZE_MASK_ (0xFF000000) +#define STR_ATTR0_SERSTR_DESC_SIZE_MASK_ (0x00FF0000) +#define STR_ATTR0_PRODSTR_DESC_SIZE_MASK_ (0x0000FF00) +#define STR_ATTR0_MANUF_DESC_SIZE_MASK_ (0x000000FF) + +#define STR_ATTR1 (0x064) +#define STR_ATTR1_INTSTR_DESC_SIZE_MASK_ (0x000000FF) + +#define STR_FLAG_ATTR (0x068) +#define STR_FLAG_ATTR_PME_FLAGS_MASK_ (0x000000FF) + +#define USB_CFG0 (0x080) +#define USB_CFG_LPM_RESPONSE_ (0x80000000) +#define USB_CFG_LPM_CAPABILITY_ (0x40000000) +#define USB_CFG_LPM_ENBL_SLPM_ (0x20000000) +#define USB_CFG_HIRD_THR_MASK_ (0x1F000000) +#define USB_CFG_HIRD_THR_960_ (0x1C000000) +#define USB_CFG_HIRD_THR_885_ (0x1B000000) +#define USB_CFG_HIRD_THR_810_ (0x1A000000) +#define USB_CFG_HIRD_THR_735_ (0x19000000) +#define USB_CFG_HIRD_THR_660_ (0x18000000) +#define USB_CFG_HIRD_THR_585_ (0x17000000) +#define USB_CFG_HIRD_THR_510_ (0x16000000) +#define USB_CFG_HIRD_THR_435_ (0x15000000) +#define USB_CFG_HIRD_THR_360_ (0x14000000) +#define USB_CFG_HIRD_THR_285_ (0x13000000) +#define USB_CFG_HIRD_THR_210_ (0x12000000) +#define USB_CFG_HIRD_THR_135_ (0x11000000) +#define USB_CFG_HIRD_THR_60_ (0x10000000) +#define USB_CFG_MAX_BURST_BI_MASK_ (0x00F00000) +#define USB_CFG_MAX_BURST_BO_MASK_ (0x000F0000) +#define USB_CFG_MAX_DEV_SPEED_MASK_ (0x0000E000) +#define USB_CFG_MAX_DEV_SPEED_SS_ (0x00008000) +#define USB_CFG_MAX_DEV_SPEED_HS_ (0x00000000) +#define USB_CFG_MAX_DEV_SPEED_FS_ (0x00002000) +#define USB_CFG_PHY_BOOST_MASK_ (0x00000180) +#define USB_CFG_PHY_BOOST_PLUS_12_ (0x00000180) +#define USB_CFG_PHY_BOOST_PLUS_8_ (0x00000100) +#define USB_CFG_PHY_BOOST_PLUS_4_ (0x00000080) +#define USB_CFG_PHY_BOOST_NORMAL_ (0x00000000) +#define USB_CFG_BIR_ (0x00000040) +#define USB_CFG_BCE_ (0x00000020) +#define USB_CFG_PORT_SWAP_ (0x00000010) +#define USB_CFG_LPM_EN_ (0x00000008) +#define USB_CFG_RMT_WKP_ (0x00000004) +#define USB_CFG_PWR_SEL_ (0x00000002) +#define USB_CFG_STALL_BO_DIS_ (0x00000001) + +#define USB_CFG1 (0x084) +#define USB_CFG1_U1_TIMEOUT_MASK_ (0xFF000000) +#define USB_CFG1_U2_TIMEOUT_MASK_ (0x00FF0000) +#define USB_CFG1_HS_TOUT_CAL_MASK_ (0x0000E000) +#define USB_CFG1_DEV_U2_INIT_EN_ (0x00001000) +#define USB_CFG1_DEV_U2_EN_ (0x00000800) +#define USB_CFG1_DEV_U1_INIT_EN_ (0x00000400) +#define USB_CFG1_DEV_U1_EN_ (0x00000200) +#define USB_CFG1_LTM_ENABLE_ (0x00000100) +#define USB_CFG1_FS_TOUT_CAL_MASK_ (0x00000070) +#define USB_CFG1_SCALE_DOWN_MASK_ (0x00000003) +#define USB_CFG1_SCALE_DOWN_MODE3_ (0x00000003) +#define USB_CFG1_SCALE_DOWN_MODE2_ (0x00000002) +#define USB_CFG1_SCALE_DOWN_MODE1_ (0x00000001) +#define USB_CFG1_SCALE_DOWN_MODE0_ (0x00000000) + +#define USB_CFG2 (0x088) +#define USB_CFG2_SS_DETACH_TIME_MASK_ (0xFFFF0000) +#define USB_CFG2_HS_DETACH_TIME_MASK_ (0x0000FFFF) + +#define BURST_CAP (0x090) +#define BURST_CAP_SIZE_MASK_ (0x000000FF) + +#define BULK_IN_DLY (0x094) +#define BULK_IN_DLY_MASK_ (0x0000FFFF) + +#define INT_EP_CTL (0x098) +#define INT_EP_INTEP_ON_ (0x80000000) +#define INT_STS_EEE_TX_LPI_STRT_EN_ (0x04000000) +#define INT_STS_EEE_TX_LPI_STOP_EN_ (0x02000000) +#define INT_STS_EEE_RX_LPI_EN_ (0x01000000) +#define INT_EP_RDFO_EN_ (0x00400000) +#define INT_EP_TXE_EN_ (0x00200000) +#define INT_EP_TX_DIS_EN_ (0x00080000) +#define INT_EP_RX_DIS_EN_ (0x00040000) +#define INT_EP_PHY_INT_EN_ (0x00020000) +#define INT_EP_DP_INT_EN_ (0x00010000) +#define INT_EP_MAC_ERR_EN_ (0x00008000) +#define INT_EP_TDFU_EN_ (0x00004000) +#define INT_EP_TDFO_EN_ (0x00002000) +#define INT_EP_UTX_FP_EN_ (0x00001000) +#define INT_EP_GPIO_EN_MASK_ (0x00000FFF) + +#define PIPE_CTL (0x09C) +#define PIPE_CTL_TXSWING_ (0x00000040) +#define PIPE_CTL_TXMARGIN_MASK_ (0x00000038) +#define PIPE_CTL_TXDEEMPHASIS_MASK_ (0x00000006) +#define PIPE_CTL_ELASTICITYBUFFERMODE_ (0x00000001) + +#define U1_LATENCY (0xA0) +#define U2_LATENCY (0xA4) + +#define USB_STATUS (0x0A8) +#define USB_STATUS_REMOTE_WK_ (0x00100000) +#define USB_STATUS_FUNC_REMOTE_WK_ (0x00080000) +#define USB_STATUS_LTM_ENABLE_ (0x00040000) +#define USB_STATUS_U2_ENABLE_ (0x00020000) +#define USB_STATUS_U1_ENABLE_ (0x00010000) +#define USB_STATUS_SET_SEL_ (0x00000020) +#define USB_STATUS_REMOTE_WK_STS_ (0x00000010) +#define USB_STATUS_FUNC_REMOTE_WK_STS_ (0x00000008) +#define USB_STATUS_LTM_ENABLE_STS_ (0x00000004) +#define USB_STATUS_U2_ENABLE_STS_ (0x00000002) +#define USB_STATUS_U1_ENABLE_STS_ (0x00000001) + +#define USB_CFG3 (0x0AC) +#define USB_CFG3_EN_U2_LTM_ (0x40000000) +#define USB_CFG3_BULK_OUT_NUMP_OVR_ (0x20000000) +#define USB_CFG3_DIS_FAST_U1_EXIT_ (0x10000000) +#define USB_CFG3_LPM_NYET_THR_ (0x0F000000) +#define USB_CFG3_RX_DET_2_POL_LFPS_ (0x00800000) +#define USB_CFG3_LFPS_FILT_ (0x00400000) +#define USB_CFG3_SKIP_RX_DET_ (0x00200000) +#define USB_CFG3_DELAY_P1P2P3_ (0x001C0000) +#define USB_CFG3_DELAY_PHY_PWR_CHG_ (0x00020000) +#define USB_CFG3_U1U2_EXIT_FR_ (0x00010000) +#define USB_CFG3_REQ_P1P2P3 (0x00008000) +#define USB_CFG3_HST_PRT_CMPL_ (0x00004000) +#define USB_CFG3_DIS_SCRAMB_ (0x00002000) +#define USB_CFG3_PWR_DN_SCALE_ (0x00001FFF) + +#define RFE_CTL (0x0B0) +#define RFE_CTL_IGMP_COE_ (0x00004000) +#define RFE_CTL_ICMP_COE_ (0x00002000) +#define RFE_CTL_TCPUDP_COE_ (0x00001000) +#define RFE_CTL_IP_COE_ (0x00000800) +#define RFE_CTL_BCAST_EN_ (0x00000400) +#define RFE_CTL_MCAST_EN_ (0x00000200) +#define RFE_CTL_UCAST_EN_ (0x00000100) +#define RFE_CTL_VLAN_STRIP_ (0x00000080) +#define RFE_CTL_DISCARD_UNTAGGED_ (0x00000040) +#define RFE_CTL_VLAN_FILTER_ (0x00000020) +#define RFE_CTL_SA_FILTER_ (0x00000010) +#define RFE_CTL_MCAST_HASH_ (0x00000008) +#define RFE_CTL_DA_HASH_ (0x00000004) +#define RFE_CTL_DA_PERFECT_ (0x00000002) +#define RFE_CTL_RST_ (0x00000001) + +#define VLAN_TYPE (0x0B4) +#define VLAN_TYPE_MASK_ (0x0000FFFF) + +#define FCT_RX_CTL (0x0C0) +#define FCT_RX_CTL_EN_ (0x80000000) +#define FCT_RX_CTL_RST_ (0x40000000) +#define FCT_RX_CTL_SBF_ (0x02000000) +#define FCT_RX_CTL_OVFL_ (0x01000000) +#define FCT_RX_CTL_DROP_ (0x00800000) +#define FCT_RX_CTL_NOT_EMPTY_ (0x00400000) +#define FCT_RX_CTL_EMPTY_ (0x00200000) +#define FCT_RX_CTL_DIS_ (0x00100000) +#define FCT_RX_CTL_USED_MASK_ (0x0000FFFF) + +#define FCT_TX_CTL (0x0C4) +#define FCT_TX_CTL_EN_ (0x80000000) +#define FCT_TX_CTL_RST_ (0x40000000) +#define FCT_TX_CTL_NOT_EMPTY_ (0x00400000) +#define FCT_TX_CTL_EMPTY_ (0x00200000) +#define FCT_TX_CTL_DIS_ (0x00100000) +#define FCT_TX_CTL_USED_MASK_ (0x0000FFFF) + +#define FCT_RX_FIFO_END (0x0C8) +#define FCT_RX_FIFO_END_MASK_ (0x0000007F) + +#define FCT_TX_FIFO_END (0x0CC) +#define FCT_TX_FIFO_END_MASK_ (0x0000003F) + +#define FCT_FLOW (0x0D0) +#define FCT_FLOW_OFF_MASK_ (0x00007F00) +#define FCT_FLOW_ON_MASK_ (0x0000007F) + +#define RX_DP_STOR (0x0D4) +#define RX_DP_STORE_TOT_RXUSED_MASK_ (0xFFFF0000) +#define RX_DP_STORE_UTX_RXUSED_MASK_ (0x0000FFFF) + +#define TX_DP_STOR (0x0D8) +#define TX_DP_STORE_TOT_TXUSED_MASK_ (0xFFFF0000) +#define TX_DP_STORE_URX_TXUSED_MASK_ (0x0000FFFF) + +#define LTM_BELT_IDLE0 (0x0E0) +#define LTM_BELT_IDLE0_IDLE1000_ (0x0FFF0000) +#define LTM_BELT_IDLE0_IDLE100_ (0x00000FFF) + +#define LTM_BELT_IDLE1 (0x0E4) +#define LTM_BELT_IDLE1_IDLE10_ (0x00000FFF) + +#define LTM_BELT_ACT0 (0x0E8) +#define LTM_BELT_ACT0_ACT1000_ (0x0FFF0000) +#define LTM_BELT_ACT0_ACT100_ (0x00000FFF) + +#define LTM_BELT_ACT1 (0x0EC) +#define LTM_BELT_ACT1_ACT10_ (0x00000FFF) + +#define LTM_INACTIVE0 (0x0F0) +#define LTM_INACTIVE0_TIMER1000_ (0xFFFF0000) +#define LTM_INACTIVE0_TIMER100_ (0x0000FFFF) + +#define LTM_INACTIVE1 (0x0F4) +#define LTM_INACTIVE1_TIMER10_ (0x0000FFFF) + +#define MAC_CR (0x100) +#define MAC_CR_GMII_EN_ (0x00080000) +#define MAC_CR_EEE_TX_CLK_STOP_EN_ (0x00040000) +#define MAC_CR_EEE_EN_ (0x00020000) +#define MAC_CR_EEE_TLAR_EN_ (0x00010000) +#define MAC_CR_ADP_ (0x00002000) +#define MAC_CR_AUTO_DUPLEX_ (0x00001000) +#define MAC_CR_AUTO_SPEED_ (0x00000800) +#define MAC_CR_LOOPBACK_ (0x00000400) +#define MAC_CR_BOLMT_MASK_ (0x000000C0) +#define MAC_CR_FULL_DUPLEX_ (0x00000008) +#define MAC_CR_SPEED_MASK_ (0x00000006) +#define MAC_CR_SPEED_1000_ (0x00000004) +#define MAC_CR_SPEED_100_ (0x00000002) +#define MAC_CR_SPEED_10_ (0x00000000) +#define MAC_CR_RST_ (0x00000001) + +#define MAC_RX (0x104) +#define MAC_RX_MAX_SIZE_SHIFT_ (16) +#define MAC_RX_MAX_SIZE_MASK_ (0x3FFF0000) +#define MAC_RX_FCS_STRIP_ (0x00000010) +#define MAC_RX_VLAN_FSE_ (0x00000004) +#define MAC_RX_RXD_ (0x00000002) +#define MAC_RX_RXEN_ (0x00000001) + +#define MAC_TX (0x108) +#define MAC_TX_BAD_FCS_ (0x00000004) +#define MAC_TX_TXD_ (0x00000002) +#define MAC_TX_TXEN_ (0x00000001) + +#define FLOW (0x10C) +#define FLOW_CR_FORCE_FC_ (0x80000000) +#define FLOW_CR_TX_FCEN_ (0x40000000) +#define FLOW_CR_RX_FCEN_ (0x20000000) +#define FLOW_CR_FPF_ (0x10000000) +#define FLOW_CR_FCPT_MASK_ (0x0000FFFF) + +#define RAND_SEED (0x110) +#define RAND_SEED_MASK_ (0x0000FFFF) + +#define ERR_STS (0x114) +#define ERR_STS_FERR_ (0x00000100) +#define ERR_STS_LERR_ (0x00000080) +#define ERR_STS_RFERR_ (0x00000040) +#define ERR_STS_ECERR_ (0x00000010) +#define ERR_STS_ALERR_ (0x00000008) +#define ERR_STS_URERR_ (0x00000004) + +#define RX_ADDRH (0x118) +#define RX_ADDRH_MASK_ (0x0000FFFF) + +#define RX_ADDRL (0x11C) +#define RX_ADDRL_MASK_ (0xFFFFFFFF) + +#define MII_ACC (0x120) +#define MII_ACC_PHY_ADDR_SHIFT_ (11) +#define MII_ACC_PHY_ADDR_MASK_ (0x0000F800) +#define MII_ACC_MIIRINDA_SHIFT_ (6) +#define MII_ACC_MIIRINDA_MASK_ (0x000007C0) +#define MII_ACC_MII_READ_ (0x00000000) +#define MII_ACC_MII_WRITE_ (0x00000002) +#define MII_ACC_MII_BUSY_ (0x00000001) + +#define MII_DATA (0x124) +#define MII_DATA_MASK_ (0x0000FFFF) + +#define MAC_RGMII_ID (0x128) +#define MAC_RGMII_ID_TXC_DELAY_EN_ (0x00000002) +#define MAC_RGMII_ID_RXC_DELAY_EN_ (0x00000001) + +#define EEE_TX_LPI_REQ_DLY (0x130) +#define EEE_TX_LPI_REQ_DLY_CNT_MASK_ (0xFFFFFFFF) + +#define EEE_TW_TX_SYS (0x134) +#define EEE_TW_TX_SYS_CNT1G_MASK_ (0xFFFF0000) +#define EEE_TW_TX_SYS_CNT100M_MASK_ (0x0000FFFF) + +#define EEE_TX_LPI_REM_DLY (0x138) +#define EEE_TX_LPI_REM_DLY_CNT_ (0x00FFFFFF) + +#define WUCSR (0x140) +#define WUCSR_TESTMODE_ (0x80000000) +#define WUCSR_RFE_WAKE_EN_ (0x00004000) +#define WUCSR_EEE_TX_WAKE_ (0x00002000) +#define WUCSR_EEE_TX_WAKE_EN_ (0x00001000) +#define WUCSR_EEE_RX_WAKE_ (0x00000800) +#define WUCSR_EEE_RX_WAKE_EN_ (0x00000400) +#define WUCSR_RFE_WAKE_FR_ (0x00000200) +#define WUCSR_STORE_WAKE_ (0x00000100) +#define WUCSR_PFDA_FR_ (0x00000080) +#define WUCSR_WUFR_ (0x00000040) +#define WUCSR_MPR_ (0x00000020) +#define WUCSR_BCST_FR_ (0x00000010) +#define WUCSR_PFDA_EN_ (0x00000008) +#define WUCSR_WAKE_EN_ (0x00000004) +#define WUCSR_MPEN_ (0x00000002) +#define WUCSR_BCST_EN_ (0x00000001) + +#define WK_SRC (0x144) +#define WK_SRC_GPIOX_INT_WK_SHIFT_ (20) +#define WK_SRC_GPIOX_INT_WK_MASK_ (0xFFF00000) +#define WK_SRC_IPV6_TCPSYN_RCD_WK_ (0x00010000) +#define WK_SRC_IPV4_TCPSYN_RCD_WK_ (0x00008000) +#define WK_SRC_EEE_TX_WK_ (0x00004000) +#define WK_SRC_EEE_RX_WK_ (0x00002000) +#define WK_SRC_GOOD_FR_WK_ (0x00001000) +#define WK_SRC_PFDA_FR_WK_ (0x00000800) +#define WK_SRC_MP_FR_WK_ (0x00000400) +#define WK_SRC_BCAST_FR_WK_ (0x00000200) +#define WK_SRC_WU_FR_WK_ (0x00000100) +#define WK_SRC_WUFF_MATCH_MASK_ (0x0000001F) + +#define WUF_CFG0 (0x150) +#define NUM_OF_WUF_CFG (32) +#define WUF_CFG_BEGIN (WUF_CFG0) +#define WUF_CFG(index) (WUF_CFG_BEGIN + (4 * (index))) +#define WUF_CFGX_EN_ (0x80000000) +#define WUF_CFGX_TYPE_MASK_ (0x03000000) +#define WUF_CFGX_TYPE_MCAST_ (0x02000000) +#define WUF_CFGX_TYPE_ALL_ (0x01000000) +#define WUF_CFGX_TYPE_UCAST_ (0x00000000) +#define WUF_CFGX_OFFSET_SHIFT_ (16) +#define WUF_CFGX_OFFSET_MASK_ (0x00FF0000) +#define WUF_CFGX_CRC16_MASK_ (0x0000FFFF) + +#define WUF_MASK0_0 (0x200) +#define WUF_MASK0_1 (0x204) +#define WUF_MASK0_2 (0x208) +#define WUF_MASK0_3 (0x20C) +#define NUM_OF_WUF_MASK (32) +#define WUF_MASK0_BEGIN (WUF_MASK0_0) +#define WUF_MASK1_BEGIN (WUF_MASK0_1) +#define WUF_MASK2_BEGIN (WUF_MASK0_2) +#define WUF_MASK3_BEGIN (WUF_MASK0_3) +#define WUF_MASK0(index) (WUF_MASK0_BEGIN + (0x10 * (index))) +#define WUF_MASK1(index) (WUF_MASK1_BEGIN + (0x10 * (index))) +#define WUF_MASK2(index) (WUF_MASK2_BEGIN + (0x10 * (index))) +#define WUF_MASK3(index) (WUF_MASK3_BEGIN + (0x10 * (index))) + +#define MAF_BASE (0x400) +#define MAF_HIX (0x00) +#define MAF_LOX (0x04) +#define NUM_OF_MAF (33) +#define MAF_HI_BEGIN (MAF_BASE + MAF_HIX) +#define MAF_LO_BEGIN (MAF_BASE + MAF_LOX) +#define MAF_HI(index) (MAF_BASE + (8 * (index)) + (MAF_HIX)) +#define MAF_LO(index) (MAF_BASE + (8 * (index)) + (MAF_LOX)) +#define MAF_HI_VALID_ (0x80000000) +#define MAF_HI_TYPE_MASK_ (0x40000000) +#define MAF_HI_TYPE_SRC_ (0x40000000) +#define MAF_HI_TYPE_DST_ (0x00000000) +#define MAF_HI_ADDR_MASK (0x0000FFFF) +#define MAF_LO_ADDR_MASK (0xFFFFFFFF) + +#define WUCSR2 (0x600) +#define WUCSR2_CSUM_DISABLE_ (0x80000000) +#define WUCSR2_NA_SA_SEL_ (0x00000100) +#define WUCSR2_NS_RCD_ (0x00000080) +#define WUCSR2_ARP_RCD_ (0x00000040) +#define WUCSR2_IPV6_TCPSYN_RCD_ (0x00000020) +#define WUCSR2_IPV4_TCPSYN_RCD_ (0x00000010) +#define WUCSR2_NS_OFFLOAD_EN_ (0x00000008) +#define WUCSR2_ARP_OFFLOAD_EN_ (0x00000004) +#define WUCSR2_IPV6_TCPSYN_WAKE_EN_ (0x00000002) +#define WUCSR2_IPV4_TCPSYN_WAKE_EN_ (0x00000001) + +#define NS1_IPV6_ADDR_DEST0 (0x610) +#define NS1_IPV6_ADDR_DEST1 (0x614) +#define NS1_IPV6_ADDR_DEST2 (0x618) +#define NS1_IPV6_ADDR_DEST3 (0x61C) + +#define NS1_IPV6_ADDR_SRC0 (0x620) +#define NS1_IPV6_ADDR_SRC1 (0x624) +#define NS1_IPV6_ADDR_SRC2 (0x628) +#define NS1_IPV6_ADDR_SRC3 (0x62C) + +#define NS1_ICMPV6_ADDR0_0 (0x630) +#define NS1_ICMPV6_ADDR0_1 (0x634) +#define NS1_ICMPV6_ADDR0_2 (0x638) +#define NS1_ICMPV6_ADDR0_3 (0x63C) + +#define NS1_ICMPV6_ADDR1_0 (0x640) +#define NS1_ICMPV6_ADDR1_1 (0x644) +#define NS1_ICMPV6_ADDR1_2 (0x648) +#define NS1_ICMPV6_ADDR1_3 (0x64C) + +#define NS2_IPV6_ADDR_DEST0 (0x650) +#define NS2_IPV6_ADDR_DEST1 (0x654) +#define NS2_IPV6_ADDR_DEST2 (0x658) +#define NS2_IPV6_ADDR_DEST3 (0x65C) + +#define NS2_IPV6_ADDR_SRC0 (0x660) +#define NS2_IPV6_ADDR_SRC1 (0x664) +#define NS2_IPV6_ADDR_SRC2 (0x668) +#define NS2_IPV6_ADDR_SRC3 (0x66C) + +#define NS2_ICMPV6_ADDR0_0 (0x670) +#define NS2_ICMPV6_ADDR0_1 (0x674) +#define NS2_ICMPV6_ADDR0_2 (0x678) +#define NS2_ICMPV6_ADDR0_3 (0x67C) + +#define NS2_ICMPV6_ADDR1_0 (0x680) +#define NS2_ICMPV6_ADDR1_1 (0x684) +#define NS2_ICMPV6_ADDR1_2 (0x688) +#define NS2_ICMPV6_ADDR1_3 (0x68C) + +#define SYN_IPV4_ADDR_SRC (0x690) +#define SYN_IPV4_ADDR_DEST (0x694) +#define SYN_IPV4_TCP_PORTS (0x698) +#define SYN_IPV4_TCP_PORTS_IPV4_DEST_PORT_SHIFT_ (16) +#define SYN_IPV4_TCP_PORTS_IPV4_DEST_PORT_MASK_ (0xFFFF0000) +#define SYN_IPV4_TCP_PORTS_IPV4_SRC_PORT_MASK_ (0x0000FFFF) + +#define SYN_IPV6_ADDR_SRC0 (0x69C) +#define SYN_IPV6_ADDR_SRC1 (0x6A0) +#define SYN_IPV6_ADDR_SRC2 (0x6A4) +#define SYN_IPV6_ADDR_SRC3 (0x6A8) + +#define SYN_IPV6_ADDR_DEST0 (0x6AC) +#define SYN_IPV6_ADDR_DEST1 (0x6B0) +#define SYN_IPV6_ADDR_DEST2 (0x6B4) +#define SYN_IPV6_ADDR_DEST3 (0x6B8) + +#define SYN_IPV6_TCP_PORTS (0x6BC) +#define SYN_IPV6_TCP_PORTS_IPV6_DEST_PORT_SHIFT_ (16) +#define SYN_IPV6_TCP_PORTS_IPV6_DEST_PORT_MASK_ (0xFFFF0000) +#define SYN_IPV6_TCP_PORTS_IPV6_SRC_PORT_MASK_ (0x0000FFFF) + +#define ARP_SPA (0x6C0) +#define ARP_TPA (0x6C4) + +#define PHY_DEV_ID (0x700) +#define PHY_DEV_ID_REV_SHIFT_ (28) +#define PHY_DEV_ID_REV_SHIFT_ (28) +#define PHY_DEV_ID_REV_MASK_ (0xF0000000) +#define PHY_DEV_ID_MODEL_SHIFT_ (22) +#define PHY_DEV_ID_MODEL_MASK_ (0x0FC00000) +#define PHY_DEV_ID_OUI_MASK_ (0x003FFFFF) + +#define OTP_BASE_ADDR (0x00001000) +#define OTP_ADDR_RANGE_ (0x1FF) + +#define OTP_PWR_DN (OTP_BASE_ADDR + 4 * 0x00) +#define OTP_PWR_DN_PWRDN_N_ (0x01) + +#define OTP_ADDR1 (OTP_BASE_ADDR + 4 * 0x01) +#define OTP_ADDR1_15_11 (0x1F) + +#define OTP_ADDR2 (OTP_BASE_ADDR + 4 * 0x02) +#define OTP_ADDR2_10_3 (0xFF) + +#define OTP_ADDR3 (OTP_BASE_ADDR + 4 * 0x03) +#define OTP_ADDR3_2_0 (0x03) + +#define OTP_PRGM_DATA (OTP_BASE_ADDR + 4 * 0x04) + +#define OTP_PRGM_MODE (OTP_BASE_ADDR + 4 * 0x05) +#define OTP_PRGM_MODE_BYTE_ (0x01) + +#define OTP_RD_DATA (OTP_BASE_ADDR + 4 * 0x06) + +#define OTP_FUNC_CMD (OTP_BASE_ADDR + 4 * 0x08) +#define OTP_FUNC_CMD_RESET_ (0x04) +#define OTP_FUNC_CMD_PROGRAM_ (0x02) +#define OTP_FUNC_CMD_READ_ (0x01) + +#define OTP_TST_CMD (OTP_BASE_ADDR + 4 * 0x09) +#define OTP_TST_CMD_TEST_DEC_SEL_ (0x10) +#define OTP_TST_CMD_PRGVRFY_ (0x08) +#define OTP_TST_CMD_WRTEST_ (0x04) +#define OTP_TST_CMD_TESTDEC_ (0x02) +#define OTP_TST_CMD_BLANKCHECK_ (0x01) + +#define OTP_CMD_GO (OTP_BASE_ADDR + 4 * 0x0A) +#define OTP_CMD_GO_GO_ (0x01) + +#define OTP_PASS_FAIL (OTP_BASE_ADDR + 4 * 0x0B) +#define OTP_PASS_FAIL_PASS_ (0x02) +#define OTP_PASS_FAIL_FAIL_ (0x01) + +#define OTP_STATUS (OTP_BASE_ADDR + 4 * 0x0C) +#define OTP_STATUS_OTP_LOCK_ (0x10) +#define OTP_STATUS_WEB_ (0x08) +#define OTP_STATUS_PGMEN (0x04) +#define OTP_STATUS_CPUMPEN_ (0x02) +#define OTP_STATUS_BUSY_ (0x01) + +#define OTP_MAX_PRG (OTP_BASE_ADDR + 4 * 0x0D) +#define OTP_MAX_PRG_MAX_PROG (0x1F) + +#define OTP_INTR_STATUS (OTP_BASE_ADDR + 4 * 0x10) +#define OTP_INTR_STATUS_READY_ (0x01) + +#define OTP_INTR_MASK (OTP_BASE_ADDR + 4 * 0x11) +#define OTP_INTR_MASK_READY_ (0x01) + +#define OTP_RSTB_PW1 (OTP_BASE_ADDR + 4 * 0x14) +#define OTP_RSTB_PW2 (OTP_BASE_ADDR + 4 * 0x15) +#define OTP_PGM_PW1 (OTP_BASE_ADDR + 4 * 0x18) +#define OTP_PGM_PW2 (OTP_BASE_ADDR + 4 * 0x19) +#define OTP_READ_PW1 (OTP_BASE_ADDR + 4 * 0x1C) +#define OTP_READ_PW2 (OTP_BASE_ADDR + 4 * 0x1D) +#define OTP_TCRST (OTP_BASE_ADDR + 4 * 0x20) +#define OTP_RSRD (OTP_BASE_ADDR + 4 * 0x21) +#define OTP_TREADEN_VAL (OTP_BASE_ADDR + 4 * 0x22) +#define OTP_TDLES_VAL (OTP_BASE_ADDR + 4 * 0x23) +#define OTP_TWWL_VAL (OTP_BASE_ADDR + 4 * 0x24) +#define OTP_TDLEH_VAL (OTP_BASE_ADDR + 4 * 0x25) +#define OTP_TWPED_VAL (OTP_BASE_ADDR + 4 * 0x26) +#define OTP_TPES_VAL (OTP_BASE_ADDR + 4 * 0x27) +#define OTP_TCPS_VAL (OTP_BASE_ADDR + 4 * 0x28) +#define OTP_TCPH_VAL (OTP_BASE_ADDR + 4 * 0x29) +#define OTP_TPGMVFY_VAL (OTP_BASE_ADDR + 4 * 0x2A) +#define OTP_TPEH_VAL (OTP_BASE_ADDR + 4 * 0x2B) +#define OTP_TPGRST_VAL (OTP_BASE_ADDR + 4 * 0x2C) +#define OTP_TCLES_VAL (OTP_BASE_ADDR + 4 * 0x2D) +#define OTP_TCLEH_VAL (OTP_BASE_ADDR + 4 * 0x2E) +#define OTP_TRDES_VAL (OTP_BASE_ADDR + 4 * 0x2F) +#define OTP_TBCACC_VAL (OTP_BASE_ADDR + 4 * 0x30) +#define OTP_TAAC_VAL (OTP_BASE_ADDR + 4 * 0x31) +#define OTP_TACCT_VAL (OTP_BASE_ADDR + 4 * 0x32) +#define OTP_TRDEP_VAL (OTP_BASE_ADDR + 4 * 0x38) +#define OTP_TPGSV_VAL (OTP_BASE_ADDR + 4 * 0x39) +#define OTP_TPVSR_VAL (OTP_BASE_ADDR + 4 * 0x3A) +#define OTP_TPVHR_VAL (OTP_BASE_ADDR + 4 * 0x3B) +#define OTP_TPVSA_VAL (OTP_BASE_ADDR + 4 * 0x3C) + +#define PHY_ID1 (0x02) +#define PHY_ID2 (0x03) + +#define PHY_DEV_ID_OUI_VTSE (0x04001C) +#define PHY_DEV_ID_MODEL_VTSE_8502 (0x23) + +#define PHY_AUTONEG_ADV (0x04) +#define NWAY_AR_NEXT_PAGE_ (0x8000) +#define NWAY_AR_REMOTE_FAULT_ (0x2000) +#define NWAY_AR_ASM_DIR_ (0x0800) +#define NWAY_AR_PAUSE_ (0x0400) +#define NWAY_AR_100T4_CAPS_ (0x0200) +#define NWAY_AR_100TX_FD_CAPS_ (0x0100) +#define NWAY_AR_SELECTOR_FIELD_ (0x001F) +#define NWAY_AR_100TX_HD_CAPS_ (0x0080) +#define NWAY_AR_10T_FD_CAPS_ (0x0040) +#define NWAY_AR_10T_HD_CAPS_ (0x0020) +#define NWAY_AR_ALL_CAPS_ (NWAY_AR_10T_HD_CAPS_ | \ + NWAY_AR_10T_FD_CAPS_ | \ + NWAY_AR_100TX_HD_CAPS_ | \ + NWAY_AR_100TX_FD_CAPS_) +#define NWAY_AR_PAUSE_MASK (NWAY_AR_PAUSE_ | NWAY_AR_ASM_DIR_) + +#define PHY_LP_ABILITY (0x05) +#define NWAY_LPAR_NEXT_PAGE_ (0x8000) +#define NWAY_LPAR_ACKNOWLEDGE_ (0x4000) +#define NWAY_LPAR_REMOTE_FAULT_ (0x2000) +#define NWAY_LPAR_ASM_DIR_ (0x0800) +#define NWAY_LPAR_PAUSE_ (0x0400) +#define NWAY_LPAR_100T4_CAPS_ (0x0200) +#define NWAY_LPAR_100TX_FD_CAPS_ (0x0100) +#define NWAY_LPAR_100TX_HD_CAPS_ (0x0080) +#define NWAY_LPAR_10T_FD_CAPS_ (0x0040) +#define NWAY_LPAR_10T_HD_CAPS_ (0x0020) +#define NWAY_LPAR_SELECTOR_FIELD_ (0x001F) + +#define PHY_AUTONEG_EXP (0x06) +#define NWAY_ER_PAR_DETECT_FAULT_ (0x0010) +#define NWAY_ER_LP_NEXT_PAGE_CAPS_ (0x0008) +#define NWAY_ER_NEXT_PAGE_CAPS_ (0x0004) +#define NWAY_ER_PAGE_RXD_ (0x0002) +#define NWAY_ER_LP_NWAY_CAPS_ (0x0001) + +#define PHY_NEXT_PAGE_TX (0x07) +#define NPTX_NEXT_PAGE_ (0x8000) +#define NPTX_MSG_PAGE_ (0x2000) +#define NPTX_ACKNOWLDGE2_ (0x1000) +#define NPTX_TOGGLE_ (0x0800) +#define NPTX_MSG_CODE_FIELD_ (0x0001) + +#define PHY_LP_NEXT_PAGE (0x08) +#define LP_RNPR_NEXT_PAGE_ (0x8000) +#define LP_RNPR_ACKNOWLDGE_ (0x4000) +#define LP_RNPR_MSG_PAGE_ (0x2000) +#define LP_RNPR_ACKNOWLDGE2_ (0x1000) +#define LP_RNPR_TOGGLE_ (0x0800) +#define LP_RNPR_MSG_CODE_FIELD_ (0x0001) + +#define PHY_1000T_CTRL (0x09) +#define CR_1000T_TEST_MODE_4_ (0x8000) +#define CR_1000T_TEST_MODE_3_ (0x6000) +#define CR_1000T_TEST_MODE_2_ (0x4000) +#define CR_1000T_TEST_MODE_1_ (0x2000) +#define CR_1000T_MS_ENABLE_ (0x1000) +#define CR_1000T_MS_VALUE_ (0x0800) +#define CR_1000T_REPEATER_DTE_ (0x0400) +#define CR_1000T_FD_CAPS_ (0x0200) +#define CR_1000T_HD_CAPS_ (0x0100) +#define CR_1000T_ASYM_PAUSE_ (0x0080) +#define CR_1000T_TEST_MODE_NORMAL_ (0x0000) + +#define PHY_1000T_STATUS (0x0A) +#define SR_1000T_MS_CONFIG_FAULT_ (0x8000) +#define SR_1000T_MS_CONFIG_RES_ (0x4000) +#define SR_1000T_LOCAL_RX_STATUS_ (0x2000) +#define SR_1000T_REMOTE_RX_STATUS_ (0x1000) +#define SR_1000T_LP_FD_CAPS_ (0x0800) +#define SR_1000T_LP_HD_CAPS_ (0x0400) +#define SR_1000T_ASYM_PAUSE_DIR_ (0x0100) +#define SR_1000T_IDLE_ERROR_CNT_ (0x00FF) +#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +#define PHY_EXT_STATUS (0x0F) +#define IEEE_ESR_1000X_FD_CAPS_ (0x8000) +#define IEEE_ESR_1000X_HD_CAPS_ (0x4000) +#define IEEE_ESR_1000T_FD_CAPS_ (0x2000) +#define IEEE_ESR_1000T_HD_CAPS_ (0x1000) +#define PHY_TX_POLARITY_MASK_ (0x0100) +#define PHY_TX_NORMAL_POLARITY_ (0x0000) +#define AUTO_POLARITY_DISABLE_ (0x0010) + +#define PHY_MMD_CTL (0x0D) +#define PHY_MMD_CTRL_OP_MASK_ (0xC000) +#define PHY_MMD_CTRL_OP_REG_ (0x0000) +#define PHY_MMD_CTRL_OP_DNI_ (0x4000) +#define PHY_MMD_CTRL_OP_DPIRW_ (0x8000) +#define PHY_MMD_CTRL_OP_DPIWO_ (0xC000) +#define PHY_MMD_CTRL_DEV_ADDR_MASK_ (0x001F) + +#define PHY_MMD_REG_DATA (0x0E) + +/* VTSE Vendor Specific registers */ +#define PHY_VTSE_BYPASS (0x12) +#define PHY_VTSE_BYPASS_DISABLE_PAIR_SWAP_ (0x0020) + +#define PHY_VTSE_INT_MASK (0x19) +#define PHY_VTSE_INT_MASK_MDINTPIN_EN_ (0x8000) +#define PHY_VTSE_INT_MASK_SPEED_CHANGE_ (0x4000) +#define PHY_VTSE_INT_MASK_LINK_CHANGE_ (0x2000) +#define PHY_VTSE_INT_MASK_FDX_CHANGE_ (0x1000) +#define PHY_VTSE_INT_MASK_AUTONEG_ERR_ (0x0800) +#define PHY_VTSE_INT_MASK_AUTONEG_DONE_ (0x0400) +#define PHY_VTSE_INT_MASK_POE_DETECT_ (0x0200) +#define PHY_VTSE_INT_MASK_SYMBOL_ERR_ (0x0100) +#define PHY_VTSE_INT_MASK_FAST_LINK_FAIL_ (0x0080) +#define PHY_VTSE_INT_MASK_WOL_EVENT_ (0x0040) +#define PHY_VTSE_INT_MASK_EXTENDED_INT_ (0x0020) +#define PHY_VTSE_INT_MASK_RESERVED_ (0x0010) +#define PHY_VTSE_INT_MASK_FALSE_CARRIER_ (0x0008) +#define PHY_VTSE_INT_MASK_LINK_SPEED_DS_ (0x0004) +#define PHY_VTSE_INT_MASK_MASTER_SLAVE_DONE_ (0x0002) +#define PHY_VTSE_INT_MASK_RX__ER_ (0x0001) + +#define PHY_VTSE_INT_STS (0x1A) +#define PHY_VTSE_INT_STS_INT_ACTIVE_ (0x8000) +#define PHY_VTSE_INT_STS_SPEED_CHANGE_ (0x4000) +#define PHY_VTSE_INT_STS_LINK_CHANGE_ (0x2000) +#define PHY_VTSE_INT_STS_FDX_CHANGE_ (0x1000) +#define PHY_VTSE_INT_STS_AUTONEG_ERR_ (0x0800) +#define PHY_VTSE_INT_STS_AUTONEG_DONE_ (0x0400) +#define PHY_VTSE_INT_STS_POE_DETECT_ (0x0200) +#define PHY_VTSE_INT_STS_SYMBOL_ERR_ (0x0100) +#define PHY_VTSE_INT_STS_FAST_LINK_FAIL_ (0x0080) +#define PHY_VTSE_INT_STS_WOL_EVENT_ (0x0040) +#define PHY_VTSE_INT_STS_EXTENDED_INT_ (0x0020) +#define PHY_VTSE_INT_STS_RESERVED_ (0x0010) +#define PHY_VTSE_INT_STS_FALSE_CARRIER_ (0x0008) +#define PHY_VTSE_INT_STS_LINK_SPEED_DS_ (0x0004) +#define PHY_VTSE_INT_STS_MASTER_SLAVE_DONE_ (0x0002) +#define PHY_VTSE_INT_STS_RX_ER_ (0x0001) + +/* VTSE PHY registers */ +#define PHY_EXT_GPIO_PAGE (0x1F) +#define PHY_EXT_GPIO_PAGE_SPACE_0 (0x0000) +#define PHY_EXT_GPIO_PAGE_SPACE_1 (0x0001) +#define PHY_EXT_GPIO_PAGE_SPACE_2 (0x0002) + +/* Extended Register Page 1 space */ +#define PHY_EXT_MODE_CTRL (0x13) +#define PHY_EXT_MODE_CTRL_MDIX_MASK_ (0x000C) +#define PHY_EXT_MODE_CTRL_AUTO_MDIX_ (0x0000) +#define PHY_EXT_MODE_CTRL_MDI_ (0x0008) +#define PHY_EXT_MODE_CTRL_MDI_X_ (0x000C) + +#define PHY_ANA_10BASE_T_HD 0x01 +#define PHY_ANA_10BASE_T_FD 0x02 +#define PHY_ANA_100BASE_TX_HD 0x04 +#define PHY_ANA_100BASE_TX_FD 0x08 +#define PHY_ANA_1000BASE_T_FD 0x10 +#define PHY_ANA_ALL_SUPPORTED_MEDIA (PHY_ANA_10BASE_T_HD | \ + PHY_ANA_10BASE_T_FD | \ + PHY_ANA_100BASE_TX_HD | \ + PHY_ANA_100BASE_TX_FD | \ + PHY_ANA_1000BASE_T_FD) +/* PHY MMD registers */ +#define PHY_MMD_DEV_3 3 + +#define PHY_EEE_PCS_STATUS (0x1) +#define PHY_EEE_PCS_STATUS_TX_LPI_RCVD_ ((WORD)0x0800) +#define PHY_EEE_PCS_STATUS_RX_LPI_RCVD_ ((WORD)0x0400) +#define PHY_EEE_PCS_STATUS_TX_LPI_IND_ ((WORD)0x0200) +#define PHY_EEE_PCS_STATUS_RX_LPI_IND_ ((WORD)0x0100) +#define PHY_EEE_PCS_STATUS_PCS_RCV_LNK_STS_ ((WORD)0x0004) + +#define PHY_EEE_CAPABILITIES (0x14) +#define PHY_EEE_CAPABILITIES_1000BT_EEE_ ((WORD)0x0004) +#define PHY_EEE_CAPABILITIES_100BT_EEE_ ((WORD)0x0002) + +#define PHY_MMD_DEV_7 7 + +#define PHY_EEE_ADVERTISEMENT (0x3C) +#define PHY_EEE_ADVERTISEMENT_1000BT_EEE_ ((WORD)0x0004) +#define PHY_EEE_ADVERTISEMENT_100BT_EEE_ ((WORD)0x0002) + +#define PHY_EEE_LP_ADVERTISEMENT (0x3D) +#define PHY_EEE_1000BT_EEE_CAPABLE_ ((WORD)0x0004) +#define PHY_EEE_100BT_EEE_CAPABLE_ ((WORD)0x0002) +#endif /* _LAN78XX_H */ diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 64a60afbe..2a7c1be23 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -752,8 +752,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ - {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */ - {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */ + {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */ + {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */ {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ @@ -765,6 +765,10 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ + {QMI_FIXED_INTF(0x1199, 0x9070, 8)}, /* Sierra Wireless MC74xx/EM74xx */ + {QMI_FIXED_INTF(0x1199, 0x9070, 10)}, /* Sierra Wireless MC74xx/EM74xx */ + {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */ + {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ @@ -785,6 +789,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index ad8cbc6c9..d9427ca3d 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -26,8 +26,13 @@ #include #include -/* Version Information */ -#define DRIVER_VERSION "v1.08.1 (2015/07/28)" +/* Information for net-next */ +#define NETNEXT_VERSION "08" + +/* Information for net */ +#define NET_VERSION "2" + +#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers " #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" #define MODULENAME "r8152" @@ -143,6 +148,7 @@ #define OCP_EEE_ABLE 0xa5c4 #define OCP_EEE_ADV 0xa5d0 #define OCP_EEE_LPABLE 0xa5d2 +#define OCP_PHY_STATE 0xa708 /* nway state for 8153 */ #define OCP_ADC_CFG 0xbc06 /* SRAM Register */ @@ -339,6 +345,7 @@ /* USB_USB_CTRL */ #define RX_AGG_DISABLE 0x0010 +#define RX_ZERO_EN 0x0080 /* USB_U2P3_CTRL */ #define U2P3_ENABLE 0x0001 @@ -426,6 +433,10 @@ /* OCP_DOWN_SPEED */ #define EN_10M_BGOFF 0x0080 +/* OCP_PHY_STATE */ +#define TXDIS_STATE 0x01 +#define ABD_STATE 0x02 + /* OCP_ADC_CFG */ #define CKADSEL_L 0x0100 #define ADC_EN 0x0080 @@ -603,6 +614,7 @@ struct r8152 { void (*unload)(struct r8152 *); int (*eee_get)(struct r8152 *, struct ethtool_eee *); int (*eee_set)(struct r8152 *, struct ethtool_eee *); + bool (*in_nway)(struct r8152 *); } rtl_ops; int intr_interval; @@ -622,6 +634,7 @@ enum rtl_version { RTL_VER_03, RTL_VER_04, RTL_VER_05, + RTL_VER_06, RTL_VER_MAX }; @@ -2610,7 +2623,10 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) u32 ocp_data; u16 data; - ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L); + if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 || + tp->version == RTL_VER_05) + ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L); + data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; @@ -2711,7 +2727,7 @@ static void r8153_first_init(struct r8152 *tp) /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); - ocp_data &= ~RX_AGG_DISABLE; + ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); } @@ -2936,6 +2952,32 @@ static void rtl8153_down(struct r8152 *tp) r8153_enable_aldps(tp); } +static bool rtl8152_in_nway(struct r8152 *tp) +{ + u16 nway_state; + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, 0x2000); + tp->ocp_base = 0x2000; + ocp_write_byte(tp, MCU_TYPE_PLA, 0xb014, 0x4c); /* phy state */ + nway_state = ocp_read_word(tp, MCU_TYPE_PLA, 0xb01a); + + /* bit 15: TXDIS_STATE, bit 14: ABD_STATE */ + if (nway_state & 0xc000) + return false; + else + return true; +} + +static bool rtl8153_in_nway(struct r8152 *tp) +{ + u16 phy_state = ocp_reg_read(tp, OCP_PHY_STATE) & 0xff; + + if (phy_state == TXDIS_STATE || phy_state == ABD_STATE) + return false; + else + return true; +} + static void set_carrier(struct r8152 *tp) { struct net_device *netdev = tp->netdev; @@ -3241,7 +3283,7 @@ static void r8152b_init(struct r8152 *tp) /* enable rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); - ocp_data &= ~RX_AGG_DISABLE; + ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); } @@ -3287,6 +3329,13 @@ static void r8153_init(struct r8152 *tp) ocp_data &= ~ECM_ALDPS; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_DMY_REG0, ocp_data); + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1); + if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0) + ocp_data &= ~DYNAMIC_BURST; + else + ocp_data |= DYNAMIC_BURST; + ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data); + } else if (tp->version == RTL_VER_06) { ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1); if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0) ocp_data &= ~DYNAMIC_BURST; @@ -3393,6 +3442,27 @@ static int rtl8152_post_reset(struct usb_interface *intf) return 0; } +static bool delay_autosuspend(struct r8152 *tp) +{ + bool sw_linking = !!netif_carrier_ok(tp->netdev); + bool hw_linking = !!(rtl8152_get_speed(tp) & LINK_STATUS); + + /* This means a linking change occurs and the driver doesn't detect it, + * yet. If the driver has disabled tx/rx and hw is linking on, the + * device wouldn't wake up by receiving any packet. + */ + if (work_busy(&tp->schedule.work) || sw_linking != hw_linking) + return true; + + /* If the linking down is occurred by nway, the device may miss the + * linking change event. And it wouldn't wake when linking on. + */ + if (!sw_linking && tp->rtl_ops.in_nway(tp)) + return true; + else + return false; +} + static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) { struct r8152 *tp = usb_get_intfdata(intf); @@ -3402,7 +3472,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) mutex_lock(&tp->control); if (PMSG_IS_AUTO(message)) { - if (netif_running(netdev) && work_busy(&tp->schedule.work)) { + if (netif_running(netdev) && delay_autosuspend(tp)) { ret = -EBUSY; goto out1; } @@ -3988,6 +4058,10 @@ static void r8152b_get_version(struct r8152 *tp) tp->version = RTL_VER_05; tp->mii.supports_gmii = 1; break; + case 0x5c30: + tp->version = RTL_VER_06; + tp->mii.supports_gmii = 1; + break; default: netif_info(tp, probe, tp->netdev, "Unknown version 0x%04x\n", version); @@ -4028,11 +4102,13 @@ static int rtl_ops_init(struct r8152 *tp) ops->unload = rtl8152_unload; ops->eee_get = r8152_get_eee; ops->eee_set = r8152_set_eee; + ops->in_nway = rtl8152_in_nway; break; case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: + case RTL_VER_06: ops->init = r8153_init; ops->enable = rtl8153_enable; ops->disable = rtl8153_disable; @@ -4041,6 +4117,7 @@ static int rtl_ops_init(struct r8152 *tp) ops->unload = rtl8153_unload; ops->eee_get = r8153_get_eee; ops->eee_set = r8153_set_eee; + ops->in_nway = rtl8153_in_nway; break; default: diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index e0498571a..b4cf10781 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -428,12 +428,18 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, old_state = entry->state; entry->state = state; __skb_unlink(skb, list); - spin_unlock(&list->lock); - spin_lock(&dev->done.lock); + + /* defer_bh() is never called with list == &dev->done. + * spin_lock_nested() tells lockdep that it is OK to take + * dev->done.lock here with list->lock held. + */ + spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING); + __skb_queue_tail(&dev->done, skb); if (dev->done.qlen == 1) tasklet_schedule(&dev->bh); - spin_unlock_irqrestore(&dev->done.lock, flags); + spin_unlock(&dev->done.lock); + spin_unlock_irqrestore(&list->lock, flags); return old_state; } @@ -749,6 +755,20 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs); /*-------------------------------------------------------------------------*/ +static void wait_skb_queue_empty(struct sk_buff_head *q) +{ + unsigned long flags; + + spin_lock_irqsave(&q->lock, flags); + while (!skb_queue_empty(q)) { + spin_unlock_irqrestore(&q->lock, flags); + schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); + set_current_state(TASK_UNINTERRUPTIBLE); + spin_lock_irqsave(&q->lock, flags); + } + spin_unlock_irqrestore(&q->lock, flags); +} + // precondition: never called in_interrupt static void usbnet_terminate_urbs(struct usbnet *dev) { @@ -762,14 +782,11 @@ static void usbnet_terminate_urbs(struct usbnet *dev) unlink_urbs(dev, &dev->rxq); /* maybe wait for deletions to finish. */ - while (!skb_queue_empty(&dev->rxq) - && !skb_queue_empty(&dev->txq) - && !skb_queue_empty(&dev->done)) { - schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); - set_current_state(TASK_UNINTERRUPTIBLE); - netif_dbg(dev, ifdown, dev->net, - "waited for %d urb completions\n", temp); - } + wait_skb_queue_empty(&dev->rxq); + wait_skb_queue_empty(&dev->txq); + wait_skb_queue_empty(&dev->done); + netif_dbg(dev, ifdown, dev->net, + "waited for %d urb completions\n", temp); set_current_state(TASK_RUNNING); remove_wait_queue(&dev->wait, &wait); } diff --git a/drivers/net/veth.c b/drivers/net/veth.c index c8186ffda..0ef4a5ad5 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -290,6 +290,7 @@ static const struct net_device_ops veth_netdev_ops = { .ndo_poll_controller = veth_poll_controller, #endif .ndo_get_iflink = veth_get_iflink, + .ndo_features_check = passthru_features_check, }; #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ @@ -305,6 +306,7 @@ static void veth_setup(struct net_device *dev) dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + dev->priv_flags |= IFF_NO_QUEUE; dev->netdev_ops = &veth_netdev_ops; dev->ethtool_ops = &veth_ethtool_ops; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 237f8e5e4..d8838dedb 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -40,12 +40,12 @@ module_param(gso, bool, 0444); #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) #define GOOD_COPY_LEN 128 -/* Weight used for the RX packet size EWMA. The average packet size is used to - * determine the packet buffer size when refilling RX rings. As the entire RX - * ring may be refilled at once, the weight is chosen so that the EWMA will be - * insensitive to short-term, transient changes in packet size. +/* RX packet size EWMA. The average packet size is used to determine the packet + * buffer size when refilling RX rings. As the entire RX ring may be refilled + * at once, the weight is chosen so that the EWMA will be insensitive to short- + * term, transient changes in packet size. */ -#define RECEIVE_AVG_WEIGHT 64 +DECLARE_EWMA(pkt_len, 1, 64) /* Minimum alignment for mergeable packet buffers. */ #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) @@ -85,7 +85,7 @@ struct receive_queue { struct page *pages; /* Average packet length for mergeable receive buffers. */ - struct ewma mrg_avg_pkt_len; + struct ewma_pkt_len mrg_avg_pkt_len; /* Page frag for packet buffer allocation. */ struct page_frag alloc_frag; @@ -407,7 +407,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } } - ewma_add(&rq->mrg_avg_pkt_len, head_skb->len); + ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); return head_skb; err_skb: @@ -518,7 +518,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, skb_mark_napi_id(skb, &rq->napi); - netif_receive_skb(skb); + napi_gro_receive(&rq->napi, skb); return; frame_err: @@ -540,7 +540,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, skb_put(skb, GOOD_PACKET_LEN); hdr = skb_vnet_hdr(skb); - sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); + sg_init_table(rq->sg, 2); sg_set_buf(rq->sg, hdr, vi->hdr_len); skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); @@ -600,12 +600,12 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, return err; } -static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len) +static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len) { const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); unsigned int len; - len = hdr_len + clamp_t(unsigned int, ewma_read(avg_pkt_len), + len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), GOOD_PACKET_LEN, PAGE_SIZE - hdr_len); return ALIGN(len, MERGEABLE_BUFFER_ALIGN); } @@ -756,7 +756,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) /* Out of packets? */ if (received < budget) { r = virtqueue_enable_cb_prepare(rq->vq); - napi_complete(napi); + napi_complete_done(napi, received); if (unlikely(virtqueue_poll(rq->vq, r)) && napi_schedule_prep(napi)) { virtqueue_disable_cb(rq->vq); @@ -893,7 +893,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) if (vi->mergeable_rx_bufs) hdr->num_buffers = 0; - sg_init_table(sq->sg, MAX_SKB_FRAGS + 2); + sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); if (can_push) { __skb_push(skb, hdr_len); num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); @@ -1615,7 +1615,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) napi_hash_add(&vi->rq[i].napi); sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); - ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT); + ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); } @@ -1658,7 +1658,7 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, { struct virtnet_info *vi = netdev_priv(queue->dev); unsigned int queue_index = get_netdev_rx_queue_index(queue); - struct ewma *avg; + struct ewma_pkt_len *avg; BUG_ON(queue_index >= vi->max_queue_pairs); avg = &vi->rq[queue_index].mrg_avg_pkt_len; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c new file mode 100644 index 000000000..488c6f50d --- /dev/null +++ b/drivers/net/vrf.c @@ -0,0 +1,711 @@ +/* + * vrf.c: device driver to encapsulate a VRF space + * + * Copyright (c) 2015 Cumulus Networks. All rights reserved. + * Copyright (c) 2015 Shrijeet Mukherjee + * Copyright (c) 2015 David Ahern + * + * Based on dummy, team and ipvlan drivers + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "vrf" +#define DRV_VERSION "1.0" + +#define vrf_is_slave(dev) ((dev)->flags & IFF_SLAVE) + +#define vrf_master_get_rcu(dev) \ + ((struct net_device *)rcu_dereference(dev->rx_handler_data)) + +struct pcpu_dstats { + u64 tx_pkts; + u64 tx_bytes; + u64 tx_drps; + u64 rx_pkts; + u64 rx_bytes; + struct u64_stats_sync syncp; +}; + +static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie) +{ + return dst; +} + +static int vrf_ip_local_out(struct sk_buff *skb) +{ + return ip_local_out(skb); +} + +static unsigned int vrf_v4_mtu(const struct dst_entry *dst) +{ + /* TO-DO: return max ethernet size? */ + return dst->dev->mtu; +} + +static void vrf_dst_destroy(struct dst_entry *dst) +{ + /* our dst lives forever - or until the device is closed */ +} + +static unsigned int vrf_default_advmss(const struct dst_entry *dst) +{ + return 65535 - 40; +} + +static struct dst_ops vrf_dst_ops = { + .family = AF_INET, + .local_out = vrf_ip_local_out, + .check = vrf_ip_check, + .mtu = vrf_v4_mtu, + .destroy = vrf_dst_destroy, + .default_advmss = vrf_default_advmss, +}; + +static bool is_ip_rx_frame(struct sk_buff *skb) +{ + switch (skb->protocol) { + case htons(ETH_P_IP): + case htons(ETH_P_IPV6): + return true; + } + return false; +} + +static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb) +{ + vrf_dev->stats.tx_errors++; + kfree_skb(skb); +} + +/* note: already called with rcu_read_lock */ +static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + + if (is_ip_rx_frame(skb)) { + struct net_device *dev = vrf_master_get_rcu(skb->dev); + struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); + + u64_stats_update_begin(&dstats->syncp); + dstats->rx_pkts++; + dstats->rx_bytes += skb->len; + u64_stats_update_end(&dstats->syncp); + + skb->dev = dev; + + return RX_HANDLER_ANOTHER; + } + return RX_HANDLER_PASS; +} + +static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + int i; + + for_each_possible_cpu(i) { + const struct pcpu_dstats *dstats; + u64 tbytes, tpkts, tdrops, rbytes, rpkts; + unsigned int start; + + dstats = per_cpu_ptr(dev->dstats, i); + do { + start = u64_stats_fetch_begin_irq(&dstats->syncp); + tbytes = dstats->tx_bytes; + tpkts = dstats->tx_pkts; + tdrops = dstats->tx_drps; + rbytes = dstats->rx_bytes; + rpkts = dstats->rx_pkts; + } while (u64_stats_fetch_retry_irq(&dstats->syncp, start)); + stats->tx_bytes += tbytes; + stats->tx_packets += tpkts; + stats->tx_dropped += tdrops; + stats->rx_bytes += rbytes; + stats->rx_packets += rpkts; + } + return stats; +} + +static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, + struct net_device *dev) +{ + vrf_tx_error(dev, skb); + return NET_XMIT_DROP; +} + +static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4, + struct net_device *vrf_dev) +{ + struct rtable *rt; + int err = 1; + + rt = ip_route_output_flow(dev_net(vrf_dev), fl4, NULL); + if (IS_ERR(rt)) + goto out; + + /* TO-DO: what about broadcast ? */ + if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { + ip_rt_put(rt); + goto out; + } + + skb_dst_drop(skb); + skb_dst_set(skb, &rt->dst); + err = 0; +out: + return err; +} + +static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, + struct net_device *vrf_dev) +{ + struct iphdr *ip4h = ip_hdr(skb); + int ret = NET_XMIT_DROP; + struct flowi4 fl4 = { + /* needed to match OIF rule */ + .flowi4_oif = vrf_dev->ifindex, + .flowi4_iif = LOOPBACK_IFINDEX, + .flowi4_tos = RT_TOS(ip4h->tos), + .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC | + FLOWI_FLAG_SKIP_NH_OIF, + .daddr = ip4h->daddr, + }; + + if (vrf_send_v4_prep(skb, &fl4, vrf_dev)) + goto err; + + if (!ip4h->saddr) { + ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0, + RT_SCOPE_LINK); + } + + ret = ip_local_out(skb); + if (unlikely(net_xmit_eval(ret))) + vrf_dev->stats.tx_errors++; + else + ret = NET_XMIT_SUCCESS; + +out: + return ret; +err: + vrf_tx_error(vrf_dev, skb); + goto out; +} + +static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) +{ + /* strip the ethernet header added for pass through VRF device */ + __skb_pull(skb, skb_network_offset(skb)); + + switch (skb->protocol) { + case htons(ETH_P_IP): + return vrf_process_v4_outbound(skb, dev); + case htons(ETH_P_IPV6): + return vrf_process_v6_outbound(skb, dev); + default: + vrf_tx_error(dev, skb); + return NET_XMIT_DROP; + } +} + +static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) +{ + netdev_tx_t ret = is_ip_tx_frame(skb, dev); + + if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { + struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); + + u64_stats_update_begin(&dstats->syncp); + dstats->tx_pkts++; + dstats->tx_bytes += skb->len; + u64_stats_update_end(&dstats->syncp); + } else { + this_cpu_inc(dev->dstats->tx_drps); + } + + return ret; +} + +/* modelled after ip_finish_output2 */ +static int vrf_finish_output(struct sock *sk, struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + struct rtable *rt = (struct rtable *)dst; + struct net_device *dev = dst->dev; + unsigned int hh_len = LL_RESERVED_SPACE(dev); + struct neighbour *neigh; + u32 nexthop; + int ret = -EINVAL; + + /* Be paranoid, rather than too clever. */ + if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { + struct sk_buff *skb2; + + skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); + if (!skb2) { + ret = -ENOMEM; + goto err; + } + if (skb->sk) + skb_set_owner_w(skb2, skb->sk); + + consume_skb(skb); + skb = skb2; + } + + rcu_read_lock_bh(); + + nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr); + neigh = __ipv4_neigh_lookup_noref(dev, nexthop); + if (unlikely(!neigh)) + neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); + if (!IS_ERR(neigh)) + ret = dst_neigh_output(dst, neigh, skb); + + rcu_read_unlock_bh(); +err: + if (unlikely(ret < 0)) + vrf_tx_error(skb->dev, skb); + return ret; +} + +static int vrf_output(struct sock *sk, struct sk_buff *skb) +{ + struct net_device *dev = skb_dst(skb)->dev; + + IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len); + + skb->dev = dev; + skb->protocol = htons(ETH_P_IP); + + return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb, + NULL, dev, + vrf_finish_output, + !(IPCB(skb)->flags & IPSKB_REROUTED)); +} + +static void vrf_rtable_destroy(struct net_vrf *vrf) +{ + struct dst_entry *dst = (struct dst_entry *)vrf->rth; + + dst_destroy(dst); + vrf->rth = NULL; +} + +static struct rtable *vrf_rtable_create(struct net_device *dev) +{ + struct rtable *rth; + + rth = dst_alloc(&vrf_dst_ops, dev, 2, + DST_OBSOLETE_NONE, + (DST_HOST | DST_NOPOLICY | DST_NOXFRM)); + if (rth) { + rth->dst.output = vrf_output; + rth->rt_genid = rt_genid_ipv4(dev_net(dev)); + rth->rt_flags = 0; + rth->rt_type = RTN_UNICAST; + rth->rt_is_input = 0; + rth->rt_iif = 0; + rth->rt_pmtu = 0; + rth->rt_gateway = 0; + rth->rt_uses_gateway = 0; + INIT_LIST_HEAD(&rth->rt_uncached); + rth->rt_uncached_list = NULL; + } + + return rth; +} + +/**************************** device handling ********************/ + +/* cycle interface to flush neighbor cache and move routes across tables */ +static void cycle_netdev(struct net_device *dev) +{ + unsigned int flags = dev->flags; + int ret; + + if (!netif_running(dev)) + return; + + ret = dev_change_flags(dev, flags & ~IFF_UP); + if (ret >= 0) + ret = dev_change_flags(dev, flags); + + if (ret < 0) { + netdev_err(dev, + "Failed to cycle device %s; route tables might be wrong!\n", + dev->name); + } +} + +static struct slave *__vrf_find_slave_dev(struct slave_queue *queue, + struct net_device *dev) +{ + struct list_head *head = &queue->all_slaves; + struct slave *slave; + + list_for_each_entry(slave, head, list) { + if (slave->dev == dev) + return slave; + } + + return NULL; +} + +/* inverse of __vrf_insert_slave */ +static void __vrf_remove_slave(struct slave_queue *queue, struct slave *slave) +{ + list_del(&slave->list); +} + +static void __vrf_insert_slave(struct slave_queue *queue, struct slave *slave) +{ + list_add(&slave->list, &queue->all_slaves); +} + +static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev) +{ + struct net_vrf_dev *vrf_ptr = kmalloc(sizeof(*vrf_ptr), GFP_KERNEL); + struct slave *slave = kzalloc(sizeof(*slave), GFP_KERNEL); + struct net_vrf *vrf = netdev_priv(dev); + struct slave_queue *queue = &vrf->queue; + int ret = -ENOMEM; + + if (!slave || !vrf_ptr) + goto out_fail; + + slave->dev = port_dev; + vrf_ptr->ifindex = dev->ifindex; + vrf_ptr->tb_id = vrf->tb_id; + + /* register the packet handler for slave ports */ + ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev); + if (ret) { + netdev_err(port_dev, + "Device %s failed to register rx_handler\n", + port_dev->name); + goto out_fail; + } + + ret = netdev_master_upper_dev_link(port_dev, dev); + if (ret < 0) + goto out_unregister; + + port_dev->flags |= IFF_SLAVE; + __vrf_insert_slave(queue, slave); + rcu_assign_pointer(port_dev->vrf_ptr, vrf_ptr); + cycle_netdev(port_dev); + + return 0; + +out_unregister: + netdev_rx_handler_unregister(port_dev); +out_fail: + kfree(vrf_ptr); + kfree(slave); + return ret; +} + +static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev) +{ + if (netif_is_vrf(port_dev) || vrf_is_slave(port_dev)) + return -EINVAL; + + return do_vrf_add_slave(dev, port_dev); +} + +/* inverse of do_vrf_add_slave */ +static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev) +{ + struct net_vrf_dev *vrf_ptr = rtnl_dereference(port_dev->vrf_ptr); + struct net_vrf *vrf = netdev_priv(dev); + struct slave_queue *queue = &vrf->queue; + struct slave *slave; + + RCU_INIT_POINTER(port_dev->vrf_ptr, NULL); + + netdev_upper_dev_unlink(port_dev, dev); + port_dev->flags &= ~IFF_SLAVE; + + netdev_rx_handler_unregister(port_dev); + + /* after netdev_rx_handler_unregister for synchronize_rcu */ + kfree(vrf_ptr); + + cycle_netdev(port_dev); + + slave = __vrf_find_slave_dev(queue, port_dev); + if (slave) + __vrf_remove_slave(queue, slave); + + kfree(slave); + + return 0; +} + +static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev) +{ + return do_vrf_del_slave(dev, port_dev); +} + +static void vrf_dev_uninit(struct net_device *dev) +{ + struct net_vrf *vrf = netdev_priv(dev); + struct slave_queue *queue = &vrf->queue; + struct list_head *head = &queue->all_slaves; + struct slave *slave, *next; + + vrf_rtable_destroy(vrf); + + list_for_each_entry_safe(slave, next, head, list) + vrf_del_slave(dev, slave->dev); + + free_percpu(dev->dstats); + dev->dstats = NULL; +} + +static int vrf_dev_init(struct net_device *dev) +{ + struct net_vrf *vrf = netdev_priv(dev); + + INIT_LIST_HEAD(&vrf->queue.all_slaves); + + dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); + if (!dev->dstats) + goto out_nomem; + + /* create the default dst which points back to us */ + vrf->rth = vrf_rtable_create(dev); + if (!vrf->rth) + goto out_stats; + + dev->flags = IFF_MASTER | IFF_NOARP; + + return 0; + +out_stats: + free_percpu(dev->dstats); + dev->dstats = NULL; +out_nomem: + return -ENOMEM; +} + +static const struct net_device_ops vrf_netdev_ops = { + .ndo_init = vrf_dev_init, + .ndo_uninit = vrf_dev_uninit, + .ndo_start_xmit = vrf_xmit, + .ndo_get_stats64 = vrf_get_stats64, + .ndo_add_slave = vrf_add_slave, + .ndo_del_slave = vrf_del_slave, +}; + +static void vrf_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); +} + +static const struct ethtool_ops vrf_ethtool_ops = { + .get_drvinfo = vrf_get_drvinfo, +}; + +static void vrf_setup(struct net_device *dev) +{ + ether_setup(dev); + + /* Initialize the device structure. */ + dev->netdev_ops = &vrf_netdev_ops; + dev->ethtool_ops = &vrf_ethtool_ops; + dev->destructor = free_netdev; + + /* Fill in device structure with ethernet-generic values. */ + eth_hw_addr_random(dev); + + /* don't acquire vrf device's netif_tx_lock when transmitting */ + dev->features |= NETIF_F_LLTX; + + /* don't allow vrf devices to change network namespaces. */ + dev->features |= NETIF_F_NETNS_LOCAL; +} + +static int vrf_validate(struct nlattr *tb[], struct nlattr *data[]) +{ + if (tb[IFLA_ADDRESS]) { + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) + return -EINVAL; + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) + return -EADDRNOTAVAIL; + } + return 0; +} + +static void vrf_dellink(struct net_device *dev, struct list_head *head) +{ + struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr); + + RCU_INIT_POINTER(dev->vrf_ptr, NULL); + kfree_rcu(vrf_ptr, rcu); + unregister_netdevice_queue(dev, head); +} + +static int vrf_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct net_vrf *vrf = netdev_priv(dev); + struct net_vrf_dev *vrf_ptr; + int err; + + if (!data || !data[IFLA_VRF_TABLE]) + return -EINVAL; + + vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]); + + dev->priv_flags |= IFF_VRF_MASTER; + + err = -ENOMEM; + vrf_ptr = kmalloc(sizeof(*dev->vrf_ptr), GFP_KERNEL); + if (!vrf_ptr) + goto out_fail; + + vrf_ptr->ifindex = dev->ifindex; + vrf_ptr->tb_id = vrf->tb_id; + + err = register_netdevice(dev); + if (err < 0) + goto out_fail; + + rcu_assign_pointer(dev->vrf_ptr, vrf_ptr); + + return 0; + +out_fail: + kfree(vrf_ptr); + free_netdev(dev); + return err; +} + +static size_t vrf_nl_getsize(const struct net_device *dev) +{ + return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */ +} + +static int vrf_fillinfo(struct sk_buff *skb, + const struct net_device *dev) +{ + struct net_vrf *vrf = netdev_priv(dev); + + return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id); +} + +static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = { + [IFLA_VRF_TABLE] = { .type = NLA_U32 }, +}; + +static struct rtnl_link_ops vrf_link_ops __read_mostly = { + .kind = DRV_NAME, + .priv_size = sizeof(struct net_vrf), + + .get_size = vrf_nl_getsize, + .policy = vrf_nl_policy, + .validate = vrf_validate, + .fill_info = vrf_fillinfo, + + .newlink = vrf_newlink, + .dellink = vrf_dellink, + .setup = vrf_setup, + .maxtype = IFLA_VRF_MAX, +}; + +static int vrf_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + /* only care about unregister events to drop slave references */ + if (event == NETDEV_UNREGISTER) { + struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr); + struct net_device *vrf_dev; + + if (!vrf_ptr || netif_is_vrf(dev)) + goto out; + + vrf_dev = netdev_master_upper_dev_get(dev); + vrf_del_slave(vrf_dev, dev); + } +out: + return NOTIFY_DONE; +} + +static struct notifier_block vrf_notifier_block __read_mostly = { + .notifier_call = vrf_device_event, +}; + +static int __init vrf_init_module(void) +{ + int rc; + + vrf_dst_ops.kmem_cachep = + kmem_cache_create("vrf_ip_dst_cache", + sizeof(struct rtable), 0, + SLAB_HWCACHE_ALIGN, + NULL); + + if (!vrf_dst_ops.kmem_cachep) + return -ENOMEM; + + register_netdevice_notifier(&vrf_notifier_block); + + rc = rtnl_link_register(&vrf_link_ops); + if (rc < 0) + goto error; + + return 0; + +error: + unregister_netdevice_notifier(&vrf_notifier_block); + kmem_cache_destroy(vrf_dst_ops.kmem_cachep); + return rc; +} + +static void __exit vrf_cleanup_module(void) +{ + rtnl_link_unregister(&vrf_link_ops); + unregister_netdevice_notifier(&vrf_notifier_block); + kmem_cache_destroy(vrf_dst_ops.kmem_cachep); +} + +module_init(vrf_init_module); +module_exit(vrf_cleanup_module); +MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern"); +MODULE_DESCRIPTION("Device driver to instantiate VRF domains"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_RTNL_LINK(DRV_NAME); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 5bc4b1ed6..c1587ece2 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -49,15 +49,12 @@ #include #include #endif +#include #define VXLAN_VERSION "0.1" #define PORT_HASH_BITS 8 #define PORT_HASH_SIZE (1<flags & VXLAN_F_COLLECT_METADATA || + ip_tunnel_collect_metadata(); +} + #if IS_ENABLED(CONFIG_IPV6) static inline bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) @@ -269,7 +236,7 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family, hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { if (inet_sk(vs->sock->sk)->inet_sport == port && - inet_sk(vs->sock->sk)->sk.sk_family == family && + vxlan_get_sk_family(vs) == family && vs->flags == flags) return vs; } @@ -345,7 +312,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip)) goto nla_put_failure; - if (rdst->remote_port && rdst->remote_port != vxlan->dst_port && + if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port && nla_put_be16(skb, NDA_PORT, rdst->remote_port)) goto nla_put_failure; if (rdst->remote_vni != vxlan->default_dst.remote_vni && @@ -552,10 +519,10 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, u32 data, struct gro_remcsum *grc, bool nopartial) { - size_t start, offset, plen; + size_t start, offset; if (skb->remcsum_offload) - return NULL; + return vh; if (!NAPI_GRO_CB(skb)->csum_valid) return NULL; @@ -565,17 +532,8 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, offsetof(struct udphdr, check) : offsetof(struct tcphdr, check)); - plen = hdrlen + offset + sizeof(u16); - - /* Pull checksum that will be written */ - if (skb_gro_header_hard(skb, off + plen)) { - vh = skb_gro_header_slow(skb, off + plen, off); - if (!vh) - return NULL; - } - - skb_gro_remcsum_process(skb, (void *)vh + hdrlen, - start, offset, grc, nopartial); + vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen, + start, offset, grc, nopartial); skb->remcsum_offload = 1; @@ -606,7 +564,6 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, goto out; } - skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr)); flags = ntohl(vh->vx_flags); @@ -621,6 +578,8 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, goto out; } + skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ + flush = 0; for (p = *head; p; p = p->next) { @@ -658,7 +617,7 @@ static void vxlan_notify_add_rx_port(struct vxlan_sock *vs) struct net_device *dev; struct sock *sk = vs->sock->sk; struct net *net = sock_net(sk); - sa_family_t sa_family = sk->sk_family; + sa_family_t sa_family = vxlan_get_sk_family(vs); __be16 port = inet_sk(sk)->inet_sport; int err; @@ -683,7 +642,7 @@ static void vxlan_notify_del_rx_port(struct vxlan_sock *vs) struct net_device *dev; struct sock *sk = vs->sock->sk; struct net *net = sock_net(sk); - sa_family_t sa_family = sk->sk_family; + sa_family_t sa_family = vxlan_get_sk_family(vs); __be16 port = inet_sk(sk)->inet_sport; rcu_read_lock(); @@ -749,7 +708,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, if (!(flags & NLM_F_CREATE)) return -ENOENT; - if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax) + if (vxlan->cfg.addrmax && + vxlan->addrcnt >= vxlan->cfg.addrmax) return -ENOSPC; /* Disallow replace to add a multicast entry */ @@ -835,7 +795,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, return -EINVAL; *port = nla_get_be16(tb[NDA_PORT]); } else { - *port = vxlan->dst_port; + *port = vxlan->cfg.dst_port; } if (tb[NDA_VNI]) { @@ -963,10 +923,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { struct vxlan_rdst *rd; - if (idx < cb->args[0]) - goto skip; - list_for_each_entry_rcu(rd, &f->remotes, list) { + if (idx < cb->args[0]) + goto skip; + err = vxlan_fdb_info(skb, vxlan, f, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, @@ -974,9 +934,9 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, NLM_F_MULTI, rd); if (err < 0) goto out; - } skip: - ++idx; + ++idx; + } } } out: @@ -1021,7 +981,7 @@ static bool vxlan_snoop(struct net_device *dev, vxlan_fdb_create(vxlan, src_mac, src_ip, NUD_REACHABLE, NLM_F_EXCL|NLM_F_CREATE, - vxlan->dst_port, + vxlan->cfg.dst_port, vxlan->default_dst.remote_vni, 0, NTF_SELF); spin_unlock(&vxlan->hash_lock); @@ -1062,7 +1022,7 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) return false; } -void vxlan_sock_release(struct vxlan_sock *vs) +static void vxlan_sock_release(struct vxlan_sock *vs) { struct sock *sk = vs->sock->sk; struct net *net = sock_net(sk); @@ -1078,7 +1038,6 @@ void vxlan_sock_release(struct vxlan_sock *vs) queue_work(vxlan_wq, &vs->del_work); } -EXPORT_SYMBOL_GPL(vxlan_sock_release); /* Update multicast group membership when first VNI on * multicast address is brought up @@ -1143,6 +1102,9 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh, { size_t start, offset, plen; + if (skb->remcsum_offload) + return vh; + start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT; offset = start + ((data & VXLAN_RCO_UDP) ? offsetof(struct udphdr, check) : @@ -1161,13 +1123,111 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh, return vh; } +static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, + struct vxlan_metadata *md, u32 vni, + struct metadata_dst *tun_dst) +{ + struct iphdr *oip = NULL; + struct ipv6hdr *oip6 = NULL; + struct vxlan_dev *vxlan; + struct pcpu_sw_netstats *stats; + union vxlan_addr saddr; + int err = 0; + union vxlan_addr *remote_ip; + + /* For flow based devices, map all packets to VNI 0 */ + if (vs->flags & VXLAN_F_COLLECT_METADATA) + vni = 0; + + /* Is this VNI defined? */ + vxlan = vxlan_vs_find_vni(vs, vni); + if (!vxlan) + goto drop; + + remote_ip = &vxlan->default_dst.remote_ip; + skb_reset_mac_header(skb); + skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); + skb->protocol = eth_type_trans(skb, vxlan->dev); + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); + + /* Ignore packet loops (and multicast echo) */ + if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) + goto drop; + + /* Re-examine inner Ethernet packet */ + if (remote_ip->sa.sa_family == AF_INET) { + oip = ip_hdr(skb); + saddr.sin.sin_addr.s_addr = oip->saddr; + saddr.sa.sa_family = AF_INET; +#if IS_ENABLED(CONFIG_IPV6) + } else { + oip6 = ipv6_hdr(skb); + saddr.sin6.sin6_addr = oip6->saddr; + saddr.sa.sa_family = AF_INET6; +#endif + } + + if (tun_dst) { + skb_dst_set(skb, (struct dst_entry *)tun_dst); + tun_dst = NULL; + } + + if ((vxlan->flags & VXLAN_F_LEARN) && + vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source)) + goto drop; + + skb_reset_network_header(skb); + /* In flow-based mode, GBP is carried in dst_metadata */ + if (!(vs->flags & VXLAN_F_COLLECT_METADATA)) + skb->mark = md->gbp; + + if (oip6) + err = IP6_ECN_decapsulate(oip6, skb); + if (oip) + err = IP_ECN_decapsulate(oip, skb); + + if (unlikely(err)) { + if (log_ecn_error) { + if (oip6) + net_info_ratelimited("non-ECT from %pI6\n", + &oip6->saddr); + if (oip) + net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", + &oip->saddr, oip->tos); + } + if (err > 1) { + ++vxlan->dev->stats.rx_frame_errors; + ++vxlan->dev->stats.rx_errors; + goto drop; + } + } + + stats = this_cpu_ptr(vxlan->dev->tstats); + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; + stats->rx_bytes += skb->len; + u64_stats_update_end(&stats->syncp); + + gro_cells_receive(&vxlan->gro_cells, skb); + + return; +drop: + if (tun_dst) + dst_release((struct dst_entry *)tun_dst); + + /* Consume bad packet */ + kfree_skb(skb); +} + /* Callback from net/ipv4/udp.c to receive packets */ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { + struct metadata_dst *tun_dst = NULL; struct vxlan_sock *vs; struct vxlanhdr *vxh; u32 flags, vni; - struct vxlan_metadata md = {0}; + struct vxlan_metadata _md; + struct vxlan_metadata *md = &_md; /* Need Vxlan and inner Ethernet header to be present */ if (!pskb_may_pull(skb, VXLAN_HLEN)) @@ -1202,6 +1262,18 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) vni &= VXLAN_VNI_MASK; } + if (vxlan_collect_metadata(vs)) { + tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY, + cpu_to_be64(vni >> 8), sizeof(*md)); + + if (!tun_dst) + goto drop; + + md = ip_tunnel_info_opts(&tun_dst->u.tun_info); + } else { + memset(md, 0, sizeof(*md)); + } + /* For backwards compatibility, only allow reserved fields to be * used by VXLAN extensions if explicitly requested. */ @@ -1209,13 +1281,16 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) struct vxlanhdr_gbp *gbp; gbp = (struct vxlanhdr_gbp *)vxh; - md.gbp = ntohs(gbp->policy_id); + md->gbp = ntohs(gbp->policy_id); + + if (tun_dst) + tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT; if (gbp->dont_learn) - md.gbp |= VXLAN_GBP_DONT_LEARN; + md->gbp |= VXLAN_GBP_DONT_LEARN; if (gbp->policy_applied) - md.gbp |= VXLAN_GBP_POLICY_APPLIED; + md->gbp |= VXLAN_GBP_POLICY_APPLIED; flags &= ~VXLAN_GBP_USED_BITS; } @@ -1233,8 +1308,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) goto bad_flags; } - md.vni = vxh->vx_vni; - vs->rcv(vs, skb, &md); + vxlan_rcv(vs, skb, md, vni >> 8, tun_dst); return 0; drop: @@ -1247,93 +1321,13 @@ bad_flags: ntohl(vxh->vx_flags), ntohl(vxh->vx_vni)); error: + if (tun_dst) + dst_release((struct dst_entry *)tun_dst); + /* Return non vxlan pkt */ return 1; } -static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, - struct vxlan_metadata *md) -{ - struct iphdr *oip = NULL; - struct ipv6hdr *oip6 = NULL; - struct vxlan_dev *vxlan; - struct pcpu_sw_netstats *stats; - union vxlan_addr saddr; - __u32 vni; - int err = 0; - union vxlan_addr *remote_ip; - - vni = ntohl(md->vni) >> 8; - /* Is this VNI defined? */ - vxlan = vxlan_vs_find_vni(vs, vni); - if (!vxlan) - goto drop; - - remote_ip = &vxlan->default_dst.remote_ip; - skb_reset_mac_header(skb); - skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); - skb->protocol = eth_type_trans(skb, vxlan->dev); - skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); - - /* Ignore packet loops (and multicast echo) */ - if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) - goto drop; - - /* Re-examine inner Ethernet packet */ - if (remote_ip->sa.sa_family == AF_INET) { - oip = ip_hdr(skb); - saddr.sin.sin_addr.s_addr = oip->saddr; - saddr.sa.sa_family = AF_INET; -#if IS_ENABLED(CONFIG_IPV6) - } else { - oip6 = ipv6_hdr(skb); - saddr.sin6.sin6_addr = oip6->saddr; - saddr.sa.sa_family = AF_INET6; -#endif - } - - if ((vxlan->flags & VXLAN_F_LEARN) && - vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source)) - goto drop; - - skb_reset_network_header(skb); - skb->mark = md->gbp; - - if (oip6) - err = IP6_ECN_decapsulate(oip6, skb); - if (oip) - err = IP_ECN_decapsulate(oip, skb); - - if (unlikely(err)) { - if (log_ecn_error) { - if (oip6) - net_info_ratelimited("non-ECT from %pI6\n", - &oip6->saddr); - if (oip) - net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", - &oip->saddr, oip->tos); - } - if (err > 1) { - ++vxlan->dev->stats.rx_frame_errors; - ++vxlan->dev->stats.rx_errors; - goto drop; - } - } - - stats = this_cpu_ptr(vxlan->dev->tstats); - u64_stats_update_begin(&stats->syncp); - stats->rx_packets++; - stats->rx_bytes += skb->len; - u64_stats_update_end(&stats->syncp); - - netif_rx(skb); - - return; -drop: - /* Consume bad packet */ - kfree_skb(skb); -} - static int arp_reduce(struct net_device *dev, struct sk_buff *skb) { struct vxlan_dev *vxlan = netdev_priv(dev); @@ -1672,7 +1666,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, struct net_device *dev, struct in6_addr *saddr, struct in6_addr *daddr, __u8 prio, __u8 ttl, - __be16 src_port, __be16 dst_port, + __be16 src_port, __be16 dst_port, __be32 vni, struct vxlan_metadata *md, bool xnet, u32 vxflags) { struct vxlanhdr *vxh; @@ -1722,7 +1716,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk, vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_HF_VNI); - vxh->vx_vni = md->vni; + vxh->vx_vni = vni; if (type & SKB_GSO_TUNNEL_REMCSUM) { u32 data = (skb_checksum_start_offset(skb) - hdrlen) >> @@ -1755,10 +1749,10 @@ err: } #endif -int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, - __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, - __be16 src_port, __be16 dst_port, - struct vxlan_metadata *md, bool xnet, u32 vxflags) +static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, + __be16 src_port, __be16 dst_port, __be32 vni, + struct vxlan_metadata *md, bool xnet, u32 vxflags) { struct vxlanhdr *vxh; int min_headroom; @@ -1801,7 +1795,7 @@ int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_HF_VNI); - vxh->vx_vni = md->vni; + vxh->vx_vni = vni; if (type & SKB_GSO_TUNNEL_REMCSUM) { u32 data = (skb_checksum_start_offset(skb) - hdrlen) >> @@ -1828,7 +1822,6 @@ int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, ttl, df, src_port, dst_port, xnet, !(vxflags & VXLAN_F_UDP_CSUM)); } -EXPORT_SYMBOL_GPL(vxlan_xmit_skb); /* Bypass encapsulation if the destination is local */ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, @@ -1878,22 +1871,48 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct vxlan_rdst *rdst, bool did_rsc) { + struct ip_tunnel_info *info; struct vxlan_dev *vxlan = netdev_priv(dev); struct sock *sk = vxlan->vn_sock->sock->sk; + unsigned short family = vxlan_get_sk_family(vxlan->vn_sock); struct rtable *rt = NULL; const struct iphdr *old_iph; struct flowi4 fl4; union vxlan_addr *dst; - struct vxlan_metadata md; + union vxlan_addr remote_ip; + struct vxlan_metadata _md; + struct vxlan_metadata *md = &_md; __be16 src_port = 0, dst_port; u32 vni; __be16 df = 0; __u8 tos, ttl; int err; + u32 flags = vxlan->flags; + + info = skb_tunnel_info(skb); - dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port; - vni = rdst->remote_vni; - dst = &rdst->remote_ip; + if (rdst) { + dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; + vni = rdst->remote_vni; + dst = &rdst->remote_ip; + } else { + if (!info) { + WARN_ONCE(1, "%s: Missing encapsulation instructions\n", + dev->name); + goto drop; + } + if (family != ip_tunnel_info_af(info)) + goto drop; + + dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; + vni = be64_to_cpu(info->key.tun_id); + remote_ip.sa.sa_family = family; + if (family == AF_INET) + remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; + else + remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; + dst = &remote_ip; + } if (vxlan_addr_any(dst)) { if (did_rsc) { @@ -1906,25 +1925,43 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, old_iph = ip_hdr(skb); - ttl = vxlan->ttl; + ttl = vxlan->cfg.ttl; if (!ttl && vxlan_addr_multicast(dst)) ttl = 1; - tos = vxlan->tos; + tos = vxlan->cfg.tos; if (tos == 1) tos = ip_tunnel_get_dsfield(old_iph, skb); - src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min, - vxlan->port_max, true); + src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, + vxlan->cfg.port_max, true); + + if (info) { + if (info->key.tun_flags & TUNNEL_CSUM) + flags |= VXLAN_F_UDP_CSUM; + else + flags &= ~VXLAN_F_UDP_CSUM; + + ttl = info->key.ttl; + tos = info->key.tos; + + if (info->options_len) + md = ip_tunnel_info_opts(info); + } else { + md->gbp = skb->mark; + } if (dst->sa.sa_family == AF_INET) { + if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)) + df = htons(IP_DF); + memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_oif = rdst->remote_ifindex; + fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0; fl4.flowi4_tos = RT_TOS(tos); fl4.flowi4_mark = skb->mark; fl4.flowi4_proto = IPPROTO_UDP; fl4.daddr = dst->sin.sin_addr.s_addr; - fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr; + fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; rt = ip_route_output_key(vxlan->net, &fl4); if (IS_ERR(rt)) { @@ -1958,14 +1995,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); - md.vni = htonl(vni << 8); - md.gbp = skb->mark; - err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr, dst->sin.sin_addr.s_addr, tos, ttl, df, - src_port, dst_port, &md, + src_port, dst_port, htonl(vni << 8), md, !net_eq(vxlan->net, dev_net(vxlan->dev)), - vxlan->flags); + flags); if (err < 0) { /* skb is already freed. */ skb = NULL; @@ -1977,16 +2011,16 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } else { struct dst_entry *ndst; struct flowi6 fl6; - u32 flags; + u32 rt6i_flags; memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_oif = rdst->remote_ifindex; + fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0; fl6.daddr = dst->sin6.sin6_addr; - fl6.saddr = vxlan->saddr.sin6.sin6_addr; + fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; fl6.flowi6_mark = skb->mark; fl6.flowi6_proto = IPPROTO_UDP; - if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) { + if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) { netdev_dbg(dev, "no route to %pI6\n", &dst->sin6.sin6_addr); dev->stats.tx_carrier_errors++; @@ -2002,9 +2036,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } /* Bypass encapsulation if the destination is local */ - flags = ((struct rt6_info *)ndst)->rt6i_flags; - if (flags & RTF_LOCAL && - !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { + rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; + if (rt6i_flags & RTF_LOCAL && + !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { struct vxlan_dev *dst_vxlan; dst_release(ndst); @@ -2018,13 +2052,10 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } ttl = ttl ? : ip6_dst_hoplimit(ndst); - md.vni = htonl(vni << 8); - md.gbp = skb->mark; - err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr, - 0, ttl, src_port, dst_port, &md, + 0, ttl, src_port, dst_port, htonl(vni << 8), md, !net_eq(vxlan->net, dev_net(vxlan->dev)), - vxlan->flags); + flags); #endif } @@ -2051,11 +2082,14 @@ tx_free: static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); + const struct ip_tunnel_info *info; struct ethhdr *eth; bool did_rsc = false; struct vxlan_rdst *rdst, *fdst = NULL; struct vxlan_fdb *f; + info = skb_tunnel_info(skb); + skb_reset_mac_header(skb); eth = eth_hdr(skb); @@ -2078,6 +2112,12 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) #endif } + if (vxlan->flags & VXLAN_F_COLLECT_METADATA && + info && info->mode & IP_TUNNEL_INFO_TX) { + vxlan_xmit_one(skb, dev, NULL, false); + return NETDEV_TX_OK; + } + f = vxlan_find_mac(vxlan, eth->h_dest); did_rsc = false; @@ -2143,7 +2183,7 @@ static void vxlan_cleanup(unsigned long arg) if (f->state & NUD_PERMANENT) continue; - timeout = f->used + vxlan->age_interval * HZ; + timeout = f->used + vxlan->cfg.age_interval * HZ; if (time_before_eq(timeout, jiffies)) { netdev_dbg(vxlan->dev, "garbage collect %pM\n", @@ -2207,8 +2247,8 @@ static int vxlan_open(struct net_device *dev) struct vxlan_sock *vs; int ret = 0; - vs = vxlan_sock_add(vxlan->net, vxlan->dst_port, vxlan_rcv, NULL, - false, vxlan->flags); + vs = vxlan_sock_add(vxlan->net, vxlan->cfg.dst_port, + vxlan->cfg.no_share, vxlan->flags); if (IS_ERR(vs)) return PTR_ERR(vs); @@ -2224,7 +2264,7 @@ static int vxlan_open(struct net_device *dev) } } - if (vxlan->age_interval) + if (vxlan->cfg.age_interval) mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL); return ret; @@ -2297,6 +2337,46 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu) return 0; } +static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb, + struct ip_tunnel_info *info, + __be16 sport, __be16 dport) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct rtable *rt; + struct flowi4 fl4; + + memset(&fl4, 0, sizeof(fl4)); + fl4.flowi4_tos = RT_TOS(info->key.tos); + fl4.flowi4_mark = skb->mark; + fl4.flowi4_proto = IPPROTO_UDP; + fl4.daddr = info->key.u.ipv4.dst; + + rt = ip_route_output_key(vxlan->net, &fl4); + if (IS_ERR(rt)) + return PTR_ERR(rt); + ip_rt_put(rt); + + info->key.u.ipv4.src = fl4.saddr; + info->key.tp_src = sport; + info->key.tp_dst = dport; + return 0; +} + +static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct ip_tunnel_info *info = skb_tunnel_info(skb); + __be16 sport, dport; + + sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, + vxlan->cfg.port_max, true); + dport = info->key.tp_dst ? : vxlan->cfg.dst_port; + + if (ip_tunnel_info_af(info) == AF_INET) + return egress_ipv4_tun_info(dev, skb, info, sport, dport); + return -EINVAL; +} + static const struct net_device_ops vxlan_netdev_ops = { .ndo_init = vxlan_init, .ndo_uninit = vxlan_uninit, @@ -2311,6 +2391,7 @@ static const struct net_device_ops vxlan_netdev_ops = { .ndo_fdb_add = vxlan_fdb_add, .ndo_fdb_del = vxlan_fdb_delete, .ndo_fdb_dump = vxlan_fdb_dump, + .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, }; /* Info for udev, that this is a virtual tunnel endpoint */ @@ -2335,7 +2416,7 @@ void vxlan_get_rx_port(struct net_device *dev) for (i = 0; i < PORT_HASH_SIZE; ++i) { hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) { port = inet_sk(vs->sock->sk)->inet_sport; - sa_family = vs->sock->sk->sk_family; + sa_family = vxlan_get_sk_family(vs); dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, port); } @@ -2352,16 +2433,11 @@ static void vxlan_setup(struct net_device *dev) eth_hw_addr_random(dev); ether_setup(dev); - if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6) - dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM; - else - dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM; dev->netdev_ops = &vxlan_netdev_ops; dev->destructor = free_netdev; SET_NETDEV_DEVTYPE(dev, &vxlan_type); - dev->tx_queue_len = 0; dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; dev->features |= NETIF_F_RXCSUM; @@ -2373,7 +2449,7 @@ static void vxlan_setup(struct net_device *dev) dev->hw_features |= NETIF_F_GSO_SOFTWARE; dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; netif_keep_dst(dev); - dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; INIT_LIST_HEAD(&vxlan->next); spin_lock_init(&vxlan->hash_lock); @@ -2382,10 +2458,12 @@ static void vxlan_setup(struct net_device *dev) vxlan->age_timer.function = vxlan_cleanup; vxlan->age_timer.data = (unsigned long) vxlan; - vxlan->dst_port = htons(vxlan_port); + vxlan->cfg.dst_port = htons(vxlan_port); vxlan->dev = dev; + gro_cells_init(&vxlan->gro_cells, dev); + for (h = 0; h < FDB_HASH_SIZE; ++h) INIT_HLIST_HEAD(&vxlan->fdb_head[h]); } @@ -2407,6 +2485,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_RSC] = { .type = NLA_U8 }, [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 }, [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 }, + [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 }, [IFLA_VXLAN_PORT] = { .type = NLA_U16 }, [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 }, [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, @@ -2486,6 +2565,7 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6, udp_conf.family = AF_INET6; udp_conf.use_udp6_rx_checksums = !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); + udp_conf.ipv6_v6only = 1; } else { udp_conf.family = AF_INET; } @@ -2502,7 +2582,6 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6, /* Create new listen socket if needed */ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, - vxlan_rcv_t *rcv, void *data, u32 flags) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); @@ -2531,8 +2610,6 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, vs->sock = sock; atomic_set(&vs->refcnt, 1); - vs->rcv = rcv; - vs->data = data; vs->flags = (flags & VXLAN_F_RCV_FLAGS); /* Initialize the vxlan udp offloads structure */ @@ -2556,9 +2633,8 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, return vs; } -struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, - vxlan_rcv_t *rcv, void *data, - bool no_share, u32 flags) +static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, + bool no_share, u32 flags) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; @@ -2568,7 +2644,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, spin_lock(&vn->sock_lock); vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port, flags); - if (vs && vs->rcv == rcv) { + if (vs) { if (!atomic_add_unless(&vs->refcnt, 1, 0)) vs = ERR_PTR(-EBUSY); spin_unlock(&vn->sock_lock); @@ -2577,58 +2653,41 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, spin_unlock(&vn->sock_lock); } - return vxlan_socket_create(net, port, rcv, data, flags); + return vxlan_socket_create(net, port, flags); } -EXPORT_SYMBOL_GPL(vxlan_sock_add); -static int vxlan_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[]) +static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, + struct vxlan_config *conf) { struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_rdst *dst = &vxlan->default_dst; - __u32 vni; int err; bool use_ipv6 = false; - - if (!data[IFLA_VXLAN_ID]) - return -EINVAL; + __be16 default_port = vxlan->cfg.dst_port; vxlan->net = src_net; - vni = nla_get_u32(data[IFLA_VXLAN_ID]); - dst->remote_vni = vni; + dst->remote_vni = conf->vni; - /* Unless IPv6 is explicitly requested, assume IPv4 */ - dst->remote_ip.sa.sa_family = AF_INET; - if (data[IFLA_VXLAN_GROUP]) { - dst->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); - } else if (data[IFLA_VXLAN_GROUP6]) { - if (!IS_ENABLED(CONFIG_IPV6)) - return -EPFNOSUPPORT; + memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip)); - dst->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]); - dst->remote_ip.sa.sa_family = AF_INET6; - use_ipv6 = true; - } + /* Unless IPv6 is explicitly requested, assume IPv4 */ + if (!dst->remote_ip.sa.sa_family) + dst->remote_ip.sa.sa_family = AF_INET; - if (data[IFLA_VXLAN_LOCAL]) { - vxlan->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]); - vxlan->saddr.sa.sa_family = AF_INET; - } else if (data[IFLA_VXLAN_LOCAL6]) { + if (dst->remote_ip.sa.sa_family == AF_INET6 || + vxlan->cfg.saddr.sa.sa_family == AF_INET6) { if (!IS_ENABLED(CONFIG_IPV6)) return -EPFNOSUPPORT; - - /* TODO: respect scope id */ - vxlan->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]); - vxlan->saddr.sa.sa_family = AF_INET6; use_ipv6 = true; } - if (data[IFLA_VXLAN_LINK] && - (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) { + if (conf->remote_ifindex) { struct net_device *lowerdev - = __dev_get_by_index(src_net, dst->remote_ifindex); + = __dev_get_by_index(src_net, conf->remote_ifindex); + + dst->remote_ifindex = conf->remote_ifindex; if (!lowerdev) { pr_info("ifindex %d does not exist\n", dst->remote_ifindex); @@ -2646,109 +2705,199 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, } #endif - if (!tb[IFLA_MTU]) + if (!conf->mtu) dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); dev->needed_headroom = lowerdev->hard_header_len + (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); - } else if (use_ipv6) + } else if (use_ipv6) { vxlan->flags |= VXLAN_F_IPV6; + dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM; + } else { + dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM; + } + + memcpy(&vxlan->cfg, conf, sizeof(*conf)); + if (!vxlan->cfg.dst_port) + vxlan->cfg.dst_port = default_port; + vxlan->flags |= conf->flags; + + if (!vxlan->cfg.age_interval) + vxlan->cfg.age_interval = FDB_AGE_DEFAULT; + + if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET, + vxlan->cfg.dst_port, vxlan->flags)) + return -EEXIST; + + dev->ethtool_ops = &vxlan_ethtool_ops; + + /* create an fdb entry for a valid default destination */ + if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { + err = vxlan_fdb_create(vxlan, all_zeros_mac, + &vxlan->default_dst.remote_ip, + NUD_REACHABLE|NUD_PERMANENT, + NLM_F_EXCL|NLM_F_CREATE, + vxlan->cfg.dst_port, + vxlan->default_dst.remote_vni, + vxlan->default_dst.remote_ifindex, + NTF_SELF); + if (err) + return err; + } + + err = register_netdevice(dev); + if (err) { + vxlan_fdb_delete_default(vxlan); + return err; + } + + list_add(&vxlan->next, &vn->vxlan_list); + + return 0; +} + +struct net_device *vxlan_dev_create(struct net *net, const char *name, + u8 name_assign_type, struct vxlan_config *conf) +{ + struct nlattr *tb[IFLA_MAX+1]; + struct net_device *dev; + int err; + + memset(&tb, 0, sizeof(tb)); + + dev = rtnl_create_link(net, name, name_assign_type, + &vxlan_link_ops, tb); + if (IS_ERR(dev)) + return dev; + + err = vxlan_dev_configure(net, dev, conf); + if (err < 0) { + free_netdev(dev); + return ERR_PTR(err); + } + + return dev; +} +EXPORT_SYMBOL_GPL(vxlan_dev_create); + +static int vxlan_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct vxlan_config conf; + int err; + + memset(&conf, 0, sizeof(conf)); + + if (data[IFLA_VXLAN_ID]) + conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]); + + if (data[IFLA_VXLAN_GROUP]) { + conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); + } else if (data[IFLA_VXLAN_GROUP6]) { + if (!IS_ENABLED(CONFIG_IPV6)) + return -EPFNOSUPPORT; + + conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]); + conf.remote_ip.sa.sa_family = AF_INET6; + } + + if (data[IFLA_VXLAN_LOCAL]) { + conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]); + conf.saddr.sa.sa_family = AF_INET; + } else if (data[IFLA_VXLAN_LOCAL6]) { + if (!IS_ENABLED(CONFIG_IPV6)) + return -EPFNOSUPPORT; + + /* TODO: respect scope id */ + conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]); + conf.saddr.sa.sa_family = AF_INET6; + } + + if (data[IFLA_VXLAN_LINK]) + conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]); if (data[IFLA_VXLAN_TOS]) - vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); + conf.tos = nla_get_u8(data[IFLA_VXLAN_TOS]); if (data[IFLA_VXLAN_TTL]) - vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); + conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING])) - vxlan->flags |= VXLAN_F_LEARN; + conf.flags |= VXLAN_F_LEARN; if (data[IFLA_VXLAN_AGEING]) - vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); - else - vxlan->age_interval = FDB_AGE_DEFAULT; + conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY])) - vxlan->flags |= VXLAN_F_PROXY; + conf.flags |= VXLAN_F_PROXY; if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC])) - vxlan->flags |= VXLAN_F_RSC; + conf.flags |= VXLAN_F_RSC; if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS])) - vxlan->flags |= VXLAN_F_L2MISS; + conf.flags |= VXLAN_F_L2MISS; if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS])) - vxlan->flags |= VXLAN_F_L3MISS; + conf.flags |= VXLAN_F_L3MISS; if (data[IFLA_VXLAN_LIMIT]) - vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); + conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); + + if (data[IFLA_VXLAN_COLLECT_METADATA] && + nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA])) + conf.flags |= VXLAN_F_COLLECT_METADATA; if (data[IFLA_VXLAN_PORT_RANGE]) { const struct ifla_vxlan_port_range *p = nla_data(data[IFLA_VXLAN_PORT_RANGE]); - vxlan->port_min = ntohs(p->low); - vxlan->port_max = ntohs(p->high); + conf.port_min = ntohs(p->low); + conf.port_max = ntohs(p->high); } if (data[IFLA_VXLAN_PORT]) - vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); + conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM])) - vxlan->flags |= VXLAN_F_UDP_CSUM; + conf.flags |= VXLAN_F_UDP_CSUM; if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] && nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX])) - vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX; + conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX; if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] && nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) - vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; + conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; if (data[IFLA_VXLAN_REMCSUM_TX] && nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX])) - vxlan->flags |= VXLAN_F_REMCSUM_TX; + conf.flags |= VXLAN_F_REMCSUM_TX; if (data[IFLA_VXLAN_REMCSUM_RX] && nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX])) - vxlan->flags |= VXLAN_F_REMCSUM_RX; + conf.flags |= VXLAN_F_REMCSUM_RX; if (data[IFLA_VXLAN_GBP]) - vxlan->flags |= VXLAN_F_GBP; + conf.flags |= VXLAN_F_GBP; if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) - vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL; + conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; - if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET, - vxlan->dst_port, vxlan->flags)) { - pr_info("duplicate VNI %u\n", vni); - return -EEXIST; - } - - dev->ethtool_ops = &vxlan_ethtool_ops; + err = vxlan_dev_configure(src_net, dev, &conf); + switch (err) { + case -ENODEV: + pr_info("ifindex %d does not exist\n", conf.remote_ifindex); + break; - /* create an fdb entry for a valid default destination */ - if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { - err = vxlan_fdb_create(vxlan, all_zeros_mac, - &vxlan->default_dst.remote_ip, - NUD_REACHABLE|NUD_PERMANENT, - NLM_F_EXCL|NLM_F_CREATE, - vxlan->dst_port, - vxlan->default_dst.remote_vni, - vxlan->default_dst.remote_ifindex, - NTF_SELF); - if (err) - return err; - } + case -EPERM: + pr_info("IPv6 is disabled via sysctl\n"); + break; - err = register_netdevice(dev); - if (err) { - vxlan_fdb_delete_default(vxlan); - return err; + case -EEXIST: + pr_info("duplicate VNI %u\n", conf.vni); + break; } - list_add(&vxlan->next, &vn->vxlan_list); - - return 0; + return err; } static void vxlan_dellink(struct net_device *dev, struct list_head *head) @@ -2761,6 +2910,7 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) hlist_del_rcu(&vxlan->hlist); spin_unlock(&vn->sock_lock); + gro_cells_destroy(&vxlan->gro_cells); list_del(&vxlan->next); unregister_netdevice_queue(dev, head); } @@ -2779,6 +2929,7 @@ static size_t vxlan_get_size(const struct net_device *dev) nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */ + nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ nla_total_size(sizeof(struct ifla_vxlan_port_range)) + @@ -2796,8 +2947,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) const struct vxlan_dev *vxlan = netdev_priv(dev); const struct vxlan_rdst *dst = &vxlan->default_dst; struct ifla_vxlan_port_range ports = { - .low = htons(vxlan->port_min), - .high = htons(vxlan->port_max), + .low = htons(vxlan->cfg.port_min), + .high = htons(vxlan->cfg.port_max), }; if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni)) @@ -2820,22 +2971,22 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex)) goto nla_put_failure; - if (!vxlan_addr_any(&vxlan->saddr)) { - if (vxlan->saddr.sa.sa_family == AF_INET) { + if (!vxlan_addr_any(&vxlan->cfg.saddr)) { + if (vxlan->cfg.saddr.sa.sa_family == AF_INET) { if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL, - vxlan->saddr.sin.sin_addr.s_addr)) + vxlan->cfg.saddr.sin.sin_addr.s_addr)) goto nla_put_failure; #if IS_ENABLED(CONFIG_IPV6) } else { if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6, - &vxlan->saddr.sin6.sin6_addr)) + &vxlan->cfg.saddr.sin6.sin6_addr)) goto nla_put_failure; #endif } } - if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) || - nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) || + if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || + nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || nla_put_u8(skb, IFLA_VXLAN_LEARNING, !!(vxlan->flags & VXLAN_F_LEARN)) || nla_put_u8(skb, IFLA_VXLAN_PROXY, @@ -2845,9 +2996,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) !!(vxlan->flags & VXLAN_F_L2MISS)) || nla_put_u8(skb, IFLA_VXLAN_L3MISS, !!(vxlan->flags & VXLAN_F_L3MISS)) || - nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) || - nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) || - nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) || + nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA, + !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) || + nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) || + nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) || + nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) || nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM, !!(vxlan->flags & VXLAN_F_UDP_CSUM)) || nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX, @@ -2966,8 +3119,10 @@ static void __net_exit vxlan_exit_net(struct net *net) /* If vxlan->dev is in the same netns, it has already been added * to the list by the previous loop. */ - if (!net_eq(dev_net(vxlan->dev), net)) + if (!net_eq(dev_net(vxlan->dev), net)) { + gro_cells_destroy(&vxlan->gro_cells); unregister_netdevice_queue(vxlan->dev, &list); + } } unregister_netdevice_many(&list); diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 3ebed1c40..e92aaf615 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -1096,7 +1096,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) } dev->netdev_ops = &pvc_ops; dev->mtu = HDLC_MAX_MTU; - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; dev->ml_priv = pvc; if (register_netdevice(dev) != 0) { diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c index 758c4ba1e..8fef8d834 100644 --- a/drivers/net/wan/sbni.c +++ b/drivers/net/wan/sbni.c @@ -1358,6 +1358,8 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd ) if( !slave_dev || !(slave_dev->flags & IFF_UP) ) { netdev_err(dev, "trying to enslave non-active device %s\n", slave_name); + if (slave_dev) + dev_put(slave_dev); return -EPERM; } diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile index 9729e6941..c04fb00e7 100644 --- a/drivers/net/wireless/ath/ath10k/Makefile +++ b/drivers/net/wireless/ath/ath10k/Makefile @@ -11,7 +11,8 @@ ath10k_core-y += mac.o \ wmi-tlv.o \ bmi.o \ hw.o \ - p2p.o + p2p.o \ + swap.o ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h index 31a990635..df7c76165 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.h +++ b/drivers/net/wireless/ath/ath10k/bmi.h @@ -178,7 +178,7 @@ struct bmi_target_info { }; /* in msec */ -#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ) +#define BMI_COMMUNICATION_TIMEOUT_HZ (2 * HZ) #define BMI_CE_NUM_TO_TARG 0 #define BMI_CE_NUM_TO_HOST 1 diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index e508c65b6..cf28fbeba 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -452,6 +452,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, { struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; + struct ath10k *ar = ce_state->ar; unsigned int sw_index = dest_ring->sw_index; struct ce_desc *base = dest_ring->base_addr_owner_space; diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 0eddb204d..5c903e15d 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -21,7 +21,7 @@ #include "hif.h" /* Maximum number of Copy Engine's supported */ -#define CE_COUNT_MAX 8 +#define CE_COUNT_MAX 12 #define CE_HTT_H2T_MSG_SRC_NENTRIES 4096 /* Descriptor rings must be aligned to this boundary */ @@ -38,8 +38,13 @@ struct ath10k_ce_pipe; #define CE_DESC_FLAGS_GATHER (1 << 0) #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1) -#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC -#define CE_DESC_FLAGS_META_DATA_LSB 2 + +/* Following desc flags are used in QCA99X0 */ +#define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2) +#define CE_DESC_FLAGS_TGT_INT_DIS (1 << 3) + +#define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask +#define CE_DESC_FLAGS_META_DATA_LSB ar->hw_values->ce_desc_meta_data_lsb struct ce_desc { __le32 addr; @@ -423,8 +428,10 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask)) -#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8 -#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00 +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \ + ar->regs->ce_wrap_intr_sum_host_msi_lsb +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \ + ar->regs->ce_wrap_intr_sum_host_msi_mask #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \ (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \ CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index e566ae99c..b04111c19 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -31,16 +31,19 @@ #include "wmi-ops.h" unsigned int ath10k_debug_mask; +static unsigned int ath10k_cryptmode_param; static bool uart_print; static bool skip_otp; module_param_named(debug_mask, ath10k_debug_mask, uint, 0644); +module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644); module_param(uart_print, bool, 0644); module_param(skip_otp, bool, 0644); MODULE_PARM_DESC(debug_mask, "Debugging mask"); MODULE_PARM_DESC(uart_print, "Uart target debugging"); MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode"); +MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software"); static const struct ath10k_hw_params ath10k_hw_params_list[] = { { @@ -49,6 +52,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, .has_shifted_cc_wraparound = true, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, .fw = { .dir = QCA988X_HW_2_0_FW_DIR, .fw = QCA988X_HW_2_0_FW_FILE, @@ -63,6 +68,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca6174 hw2.1", .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, .uart_pin = 6, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, .fw = { .dir = QCA6174_HW_2_1_FW_DIR, .fw = QCA6174_HW_2_1_FW_FILE, @@ -77,6 +84,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca6174 hw3.0", .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, .uart_pin = 6, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, .fw = { .dir = QCA6174_HW_3_0_FW_DIR, .fw = QCA6174_HW_3_0_FW_FILE, @@ -91,6 +100,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca6174 hw3.2", .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, .uart_pin = 6, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, .fw = { /* uses same binaries as hw3.0 */ .dir = QCA6174_HW_3_0_FW_DIR, @@ -101,8 +112,74 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, }, }, + { + .id = QCA99X0_HW_2_0_DEV_VERSION, + .name = "qca99x0 hw2.0", + .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .otp_exe_param = 0x00000700, + .continuous_frag_desc = true, + .channel_counters_freq_hz = 150000, + .fw = { + .dir = QCA99X0_HW_2_0_FW_DIR, + .fw = QCA99X0_HW_2_0_FW_FILE, + .otp = QCA99X0_HW_2_0_OTP_FILE, + .board = QCA99X0_HW_2_0_BOARD_DATA_FILE, + .board_size = QCA99X0_BOARD_DATA_SZ, + .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, + }, + }, +}; + +static const char *const ath10k_core_fw_feature_str[] = { + [ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX] = "wmi-mgmt-rx", + [ATH10K_FW_FEATURE_WMI_10X] = "wmi-10.x", + [ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX] = "has-wmi-mgmt-tx", + [ATH10K_FW_FEATURE_NO_P2P] = "no-p2p", + [ATH10K_FW_FEATURE_WMI_10_2] = "wmi-10.2", + [ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT] = "multi-vif-ps", + [ATH10K_FW_FEATURE_WOWLAN_SUPPORT] = "wowlan", + [ATH10K_FW_FEATURE_IGNORE_OTP_RESULT] = "ignore-otp", + [ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad", + [ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init", + [ATH10K_FW_FEATURE_RAW_MODE_SUPPORT] = "raw-mode", }; +static unsigned int ath10k_core_get_fw_feature_str(char *buf, + size_t buf_len, + enum ath10k_fw_features feat) +{ + /* make sure that ath10k_core_fw_feature_str[] gets updated */ + BUILD_BUG_ON(ARRAY_SIZE(ath10k_core_fw_feature_str) != + ATH10K_FW_FEATURE_COUNT); + + if (feat >= ARRAY_SIZE(ath10k_core_fw_feature_str) || + WARN_ON(!ath10k_core_fw_feature_str[feat])) { + return scnprintf(buf, buf_len, "bit%d", feat); + } + + return scnprintf(buf, buf_len, "%s", ath10k_core_fw_feature_str[feat]); +} + +void ath10k_core_get_fw_features_str(struct ath10k *ar, + char *buf, + size_t buf_len) +{ + unsigned int len = 0; + int i; + + for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) { + if (test_bit(i, ar->fw_features)) { + if (len > 0) + len += scnprintf(buf + len, buf_len - len, ","); + + len += ath10k_core_get_fw_feature_str(buf + len, + buf_len - len, + i); + } + } +} + static void ath10k_send_suspend_complete(struct ath10k *ar) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n"); @@ -164,6 +241,17 @@ static int ath10k_init_configure_target(struct ath10k *ar) return ret; } + /* Some devices have a special sanity check that verifies the PCI + * Device ID is written to this host interest var. It is known to be + * required to boot QCA6164. + */ + ret = ath10k_bmi_write32(ar, hi_hci_uart_pwr_mgmt_params_ext, + ar->dev_id); + if (ret) { + ath10k_err(ar, "failed to set pwr_mgmt_params: %d\n", ret); + return ret; + } + return 0; } @@ -355,6 +443,7 @@ out: static int ath10k_download_and_run_otp(struct ath10k *ar) { u32 result, address = ar->hw_params.patch_load_addr; + u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param; int ret; ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len); @@ -380,7 +469,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) return ret; } - ret = ath10k_bmi_execute(ar, address, 0, &result); + ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result); if (ret) { ath10k_err(ar, "could not execute otp (%d)\n", ret); return ret; @@ -412,6 +501,13 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode) data = ar->firmware_data; data_len = ar->firmware_len; mode_name = "normal"; + ret = ath10k_swap_code_seg_configure(ar, + ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW); + if (ret) { + ath10k_err(ar, "failed to configure fw code swap: %d\n", + ret); + return ret; + } break; case ATH10K_FIRMWARE_MODE_UTF: data = ar->testmode.utf->data; @@ -451,6 +547,8 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar) if (!IS_ERR(ar->cal_file)) release_firmware(ar->cal_file); + ath10k_swap_code_seg_release(ar); + ar->board = NULL; ar->board_data = NULL; ar->board_len = 0; @@ -464,6 +562,7 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar) ar->firmware_len = 0; ar->cal_file = NULL; + } static int ath10k_fetch_cal_file(struct ath10k *ar) @@ -737,6 +836,13 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n", ar->htt.op_version); break; + case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE: + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "found fw code swap image ie (%zd B)\n", + ie_len); + ar->swap.firmware_codeswap_data = data; + ar->swap.firmware_codeswap_len = ie_len; + break; default: ath10k_warn(ar, "Unknown FW IE: %u\n", le32_to_cpu(hdr->id)); @@ -991,6 +1097,46 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) return -EINVAL; } + ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_NATIVE_WIFI; + switch (ath10k_cryptmode_param) { + case ATH10K_CRYPT_MODE_HW: + clear_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags); + clear_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags); + break; + case ATH10K_CRYPT_MODE_SW: + if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT, + ar->fw_features)) { + ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware"); + return -EINVAL; + } + + set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags); + set_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags); + break; + default: + ath10k_info(ar, "invalid cryptmode: %d\n", + ath10k_cryptmode_param); + return -EINVAL; + } + + ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT; + ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT; + + if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW; + + /* Workaround: + * + * Firmware A-MSDU aggregation breaks with RAW Tx encap mode + * and causes enormous performance issues (malformed frames, + * etc). + * + * Disabling A-MSDU makes RAW mode stable with heavy traffic + * albeit a bit slower compared to regular operation. + */ + ar->htt.max_num_amsdu = 1; + } + /* Backwards compatibility for firmwares without * ATH10K_FW_IE_WMI_OP_VERSION. */ @@ -1014,6 +1160,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC; ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV | WMI_STAT_PEER; + ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM; break; case ATH10K_FW_WMI_OP_VERSION_10_1: case ATH10K_FW_WMI_OP_VERSION_10_2: @@ -1023,6 +1170,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->max_num_vdevs = TARGET_10X_NUM_VDEVS; ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; ar->fw_stats_req_mask = WMI_STAT_PEER; + ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM; break; case ATH10K_FW_WMI_OP_VERSION_TLV: ar->max_num_peers = TARGET_TLV_NUM_PEERS; @@ -1033,6 +1181,17 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS; ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV | WMI_STAT_PEER; + ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM; + break; + case ATH10K_FW_WMI_OP_VERSION_10_4: + ar->max_num_peers = TARGET_10_4_NUM_PEERS; + ar->max_num_stations = TARGET_10_4_NUM_STATIONS; + ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS; + ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS; + ar->num_tids = TARGET_10_4_TGT_NUM_TIDS; + ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC; + ar->fw_stats_req_mask = WMI_STAT_PEER; + ar->max_spatial_stream = WMI_10_4_MAX_SPATIAL_STREAM; break; case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: @@ -1056,6 +1215,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) case ATH10K_FW_WMI_OP_VERSION_TLV: ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV; break; + case ATH10K_FW_WMI_OP_VERSION_10_4: case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: WARN_ON(1); @@ -1272,13 +1432,13 @@ int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt) void ath10k_core_stop(struct ath10k *ar) { lockdep_assert_held(&ar->conf_mutex); + ath10k_debug_stop(ar); /* try to suspend target */ if (ar->state != ATH10K_STATE_RESTARTING && ar->state != ATH10K_STATE_UTF) ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR); - ath10k_debug_stop(ar); ath10k_hif_stop(ar); ath10k_htt_tx_free(&ar->htt); ath10k_htt_rx_free(&ar->htt); @@ -1330,6 +1490,13 @@ static int ath10k_core_probe_fw(struct ath10k *ar) goto err_free_firmware_files; } + ret = ath10k_swap_code_seg_init(ar); + if (ret) { + ath10k_err(ar, "failed to initialize code swap segment: %d\n", + ret); + goto err_free_firmware_files; + } + mutex_lock(&ar->conf_mutex); ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL); @@ -1470,9 +1637,15 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, switch (hw_rev) { case ATH10K_HW_QCA988X: ar->regs = &qca988x_regs; + ar->hw_values = &qca988x_values; break; case ATH10K_HW_QCA6174: ar->regs = &qca6174_regs; + ar->hw_values = &qca6174_values; + break; + case ATH10K_HW_QCA99X0: + ar->regs = &qca99x0_regs; + ar->hw_values = &qca99x0_values; break; default: ath10k_err(ar, "unsupported core hardware revision %d\n", @@ -1497,6 +1670,10 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, if (!ar->workqueue) goto err_free_mac; + ar->workqueue_aux = create_singlethread_workqueue("ath10k_aux_wq"); + if (!ar->workqueue_aux) + goto err_free_wq; + mutex_init(&ar->conf_mutex); spin_lock_init(&ar->data_lock); @@ -1517,10 +1694,12 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, ret = ath10k_debug_create(ar); if (ret) - goto err_free_wq; + goto err_free_aux_wq; return ar; +err_free_aux_wq: + destroy_workqueue(ar->workqueue_aux); err_free_wq: destroy_workqueue(ar->workqueue); @@ -1536,6 +1715,9 @@ void ath10k_core_destroy(struct ath10k *ar) flush_workqueue(ar->workqueue); destroy_workqueue(ar->workqueue); + flush_workqueue(ar->workqueue_aux); + destroy_workqueue(ar->workqueue_aux); + ath10k_debug_destroy(ar); ath10k_mac_destroy(ar); } diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 78094f23c..12542144f 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -36,6 +36,7 @@ #include "spectral.h" #include "thermal.h" #include "wow.h" +#include "swap.h" #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) @@ -91,6 +92,7 @@ struct ath10k_skb_cb { u8 tid; u16 freq; bool is_offchan; + bool nohwcrypt; struct ath10k_htt_txbuf *txbuf; u32 txbuf_paddr; } __packed htt; @@ -151,6 +153,7 @@ struct ath10k_wmi { const struct wmi_ops *ops; u32 num_mem_chunks; + u32 rx_decap_mode; struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS]; }; @@ -327,8 +330,8 @@ struct ath10k_vif { u32 uapsd; } sta; struct { - /* 127 stations; wmi limit */ - u8 tim_bitmap[16]; + /* 512 stations */ + u8 tim_bitmap[64]; u8 tim_len; u32 ssid_len; u8 ssid[IEEE80211_MAX_SSID_LEN]; @@ -340,6 +343,7 @@ struct ath10k_vif { } u; bool use_cts_prot; + bool nohwcrypt; int num_legacy_stations; int txpower; struct wmi_wmm_params_all_arg wmm_params; @@ -381,9 +385,6 @@ struct ath10k_debug { u32 reg_addr; u32 nf_cal_period; - u8 htt_max_amsdu; - u8 htt_max_ampdu; - struct ath10k_fw_crash_data *fw_crash_data; }; @@ -452,16 +453,21 @@ enum ath10k_fw_features { ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6, /* Don't trust error code from otp.bin */ - ATH10K_FW_FEATURE_IGNORE_OTP_RESULT, + ATH10K_FW_FEATURE_IGNORE_OTP_RESULT = 7, /* Some firmware revisions pad 4th hw address to 4 byte boundary making * it 8 bytes long in Native Wifi Rx decap. */ - ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, + ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING = 8, /* Firmware supports bypassing PLL setting on init. */ ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT = 9, + /* Raw mode support. If supported, FW supports receiving and trasmitting + * frames in raw mode. + */ + ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10, + /* keep last */ ATH10K_FW_FEATURE_COUNT, }; @@ -475,6 +481,15 @@ enum ath10k_dev_flags { * waiters should immediately cancel instead of waiting for a time out. */ ATH10K_FLAG_CRASH_FLUSH, + + /* Use Raw mode instead of native WiFi Tx/Rx encap mode. + * Raw mode supports both hardware and software crypto. Native WiFi only + * supports hardware crypto. + */ + ATH10K_FLAG_RAW_MODE, + + /* Disable HW crypto engine */ + ATH10K_FLAG_HW_CRYPTO_DISABLED, }; enum ath10k_cal_mode { @@ -483,6 +498,13 @@ enum ath10k_cal_mode { ATH10K_CAL_MODE_DT, }; +enum ath10k_crypt_mode { + /* Only use hardware crypto engine */ + ATH10K_CRYPT_MODE_HW, + /* Only use software crypto engine */ + ATH10K_CRYPT_MODE_SW, +}; + static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode) { switch (mode) { @@ -532,6 +554,7 @@ struct ath10k { u8 mac_addr[ETH_ALEN]; enum ath10k_hw_rev hw_rev; + u16 dev_id; u32 chip_id; u32 target_version; u8 fw_version_major; @@ -545,6 +568,7 @@ struct ath10k { u32 ht_cap_info; u32 vht_cap_info; u32 num_rf_chains; + u32 max_spatial_stream; /* protected by conf_mutex */ bool ani_enabled; @@ -560,6 +584,7 @@ struct ath10k { struct completion target_suspend; const struct ath10k_hw_regs *regs; + const struct ath10k_hw_values *hw_values; struct ath10k_bmi bmi; struct ath10k_wmi wmi; struct ath10k_htc htc; @@ -570,6 +595,7 @@ struct ath10k { const char *name; u32 patch_load_addr; int uart_pin; + u32 otp_exe_param; /* This is true if given HW chip has a quirky Cycle Counter * wraparound which resets to 0x7fffffff instead of 0. All @@ -578,6 +604,14 @@ struct ath10k { */ bool has_shifted_cc_wraparound; + /* Some of chip expects fragment descriptor to be continuous + * memory for any TX operation. Set continuous_frag_desc flag + * for the hardware which have such requirement. + */ + bool continuous_frag_desc; + + u32 channel_counters_freq_hz; + struct ath10k_hw_params_fw { const char *dir; const char *fw; @@ -602,6 +636,12 @@ struct ath10k { const struct firmware *cal_file; + struct { + const void *firmware_codeswap_data; + size_t firmware_codeswap_len; + struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info; + } swap; + char spec_board_id[100]; bool spec_board_loaded; @@ -617,6 +657,7 @@ struct ath10k { bool is_roc; int vdev_id; int roc_freq; + bool roc_notify; } scan; struct { @@ -656,6 +697,8 @@ struct ath10k { struct completion vdev_setup_done; struct workqueue_struct *workqueue; + /* Auxiliary workqueue */ + struct workqueue_struct *workqueue_aux; /* prevents concurrent FW reconfiguration */ struct mutex conf_mutex; @@ -675,6 +718,11 @@ struct ath10k { int max_num_stations; int max_num_vdevs; int max_num_tdls_vdevs; + int num_active_peers; + int num_tids; + + struct work_struct svc_rdy_work; + struct sk_buff *svc_rdy_skb; struct work_struct offchan_tx_work; struct sk_buff_head offchan_tx_queue; @@ -749,6 +797,9 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, enum ath10k_hw_rev hw_rev, const struct ath10k_hif_ops *hif_ops); void ath10k_core_destroy(struct ath10k *ar); +void ath10k_core_get_fw_features_str(struct ath10k *ar, + char *buf, + size_t max_len); int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode); int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt); diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 8fa606a9c..bf033f46f 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -124,7 +124,11 @@ EXPORT_SYMBOL(ath10k_info); void ath10k_print_driver_info(struct ath10k *ar) { - ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n", + char fw_features[128] = {}; + + ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features)); + + ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n", ar->hw_params.name, ar->target_version, ar->chip_id, @@ -137,8 +141,12 @@ void ath10k_print_driver_info(struct ath10k *ar) ar->htt.target_version_major, ar->htt.target_version_minor, ar->wmi.op_version, + ar->htt.op_version, ath10k_cal_mode_str(ar->cal_mode), - ar->max_num_stations); + ar->max_num_stations, + test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags), + !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags), + fw_features); ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n", config_enabled(CONFIG_ATH10K_DEBUG), config_enabled(CONFIG_ATH10K_DEBUGFS), @@ -315,7 +323,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats); if (ret) { ath10k_warn(ar, "failed to pull fw stats: %d\n", ret); - goto unlock; + goto free; } /* Stat data may exceed htc-wmi buffer limit. In such case firmware @@ -378,7 +386,6 @@ free: ath10k_debug_fw_stats_vdevs_free(&stats.vdevs); ath10k_debug_fw_stats_peers_free(&stats.peers); -unlock: spin_unlock_bh(&ar->data_lock); } @@ -1357,12 +1364,8 @@ static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file, mutex_lock(&ar->conf_mutex); - if (ar->debug.htt_max_amsdu) - amsdu = ar->debug.htt_max_amsdu; - - if (ar->debug.htt_max_ampdu) - ampdu = ar->debug.htt_max_ampdu; - + amsdu = ar->htt.max_num_amsdu; + ampdu = ar->htt.max_num_ampdu; mutex_unlock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu); @@ -1396,8 +1399,8 @@ static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file, goto out; res = count; - ar->debug.htt_max_amsdu = amsdu; - ar->debug.htt_max_ampdu = ampdu; + ar->htt.max_num_amsdu = amsdu; + ar->htt.max_num_ampdu = ampdu; out: mutex_unlock(&ar->conf_mutex); @@ -1899,9 +1902,6 @@ void ath10k_debug_stop(struct ath10k *ar) if (ar->debug.htt_stats_mask != 0) cancel_delayed_work(&ar->debug.htt_stats_dwork); - ar->debug.htt_max_amsdu = 0; - ar->debug.htt_max_ampdu = 0; - ath10k_wmi_pdev_pktlog_disable(ar); } diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 6da6ef261..3e6ba63df 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -102,6 +102,43 @@ static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = { [HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST, }; +static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = { + [HTT_10_4_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF, + [HTT_10_4_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND, + [HTT_10_4_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH, + [HTT_10_4_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP, + [HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP, + [HTT_10_4_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA, + [HTT_10_4_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA, + [HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND, + [HTT_10_4_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG, + [HTT_10_4_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF, + [HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND, + [HTT_10_4_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND, + [HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND, + [HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND] = + HTT_T2H_MSG_TYPE_TX_INSPECT_IND, + [HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] = + HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION, + [HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE, + [HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] = + HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND, + [HTT_10_4_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND, + [HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] = + HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND, + [HTT_10_4_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST, + [HTT_10_4_T2H_MSG_TYPE_EN_STATS] = HTT_T2H_MSG_TYPE_EN_STATS, + [HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF, + [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] = + HTT_T2H_MSG_TYPE_TX_FETCH_IND, + [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF] = + HTT_T2H_MSG_TYPE_TX_FETCH_CONF, + [HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] = + HTT_T2H_MSG_TYPE_STATS_NOUPLOAD, + [HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND] = + HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND, +}; + int ath10k_htt_connect(struct ath10k_htt *htt) { struct ath10k_htc_svc_conn_req conn_req; @@ -147,6 +184,10 @@ int ath10k_htt_init(struct ath10k *ar) 2; /* ip4 dscp or ip6 priority */ switch (ar->htt.op_version) { + case ATH10K_FW_HTT_OP_VERSION_10_4: + ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types; + ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS; + break; case ATH10K_FW_HTT_OP_VERSION_10_1: ar->htt.t2h_msg_types = htt_10x_t2h_msg_types; ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS; @@ -205,8 +246,31 @@ int ath10k_htt_setup(struct ath10k_htt *htt) } status = ath10k_htt_verify_version(htt); + if (status) { + ath10k_warn(ar, "failed to verify htt version: %d\n", + status); + return status; + } + + status = ath10k_htt_send_frag_desc_bank_cfg(htt); if (status) return status; - return ath10k_htt_send_rx_ring_cfg_ll(htt); + status = ath10k_htt_send_rx_ring_cfg_ll(htt); + if (status) { + ath10k_warn(ar, "failed to setup rx ring: %d\n", + status); + return status; + } + + status = ath10k_htt_h2t_aggr_cfg_msg(htt, + htt->max_num_ampdu, + htt->max_num_amsdu); + if (status) { + ath10k_warn(ar, "failed to setup amsdu/ampdu limit: %d\n", + status); + return status; + } + + return 0; } diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 7e8a0d835..573187512 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -83,10 +83,39 @@ struct htt_ver_req { * around the mask + shift defs. */ struct htt_data_tx_desc_frag { - __le32 paddr; - __le32 len; + union { + struct double_word_addr { + __le32 paddr; + __le32 len; + } __packed dword_addr; + struct triple_word_addr { + __le32 paddr_lo; + __le16 paddr_hi; + __le16 len_16; + } __packed tword_addr; + } __packed; } __packed; +struct htt_msdu_ext_desc { + __le32 tso_flag[3]; + __le16 ip_identification; + u8 flags; + u8 reserved; + struct htt_data_tx_desc_frag frags[6]; +}; + +#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0) +#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1) +#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2) +#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE BIT(3) +#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE BIT(4) + +#define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \ + | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \ + | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \ + | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \ + | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE) + enum htt_data_tx_desc_flags0 { HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0, HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1, @@ -255,6 +284,9 @@ struct htt_aggr_conf { } __packed; #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32 +struct htt_mgmt_tx_desc_qca99x0 { + __le32 rate; +} __packed; struct htt_mgmt_tx_desc { u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)]; @@ -263,6 +295,9 @@ struct htt_mgmt_tx_desc { __le32 len; __le32 vdev_id; u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN]; + union { + struct htt_mgmt_tx_desc_qca99x0 qca99x0; + } __packed; } __packed; enum htt_mgmt_tx_status { @@ -349,6 +384,38 @@ enum htt_tlv_t2h_msg_type { HTT_TLV_T2H_NUM_MSGS }; +enum htt_10_4_t2h_msg_type { + HTT_10_4_T2H_MSG_TYPE_VERSION_CONF = 0x0, + HTT_10_4_T2H_MSG_TYPE_RX_IND = 0x1, + HTT_10_4_T2H_MSG_TYPE_RX_FLUSH = 0x2, + HTT_10_4_T2H_MSG_TYPE_PEER_MAP = 0x3, + HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP = 0x4, + HTT_10_4_T2H_MSG_TYPE_RX_ADDBA = 0x5, + HTT_10_4_T2H_MSG_TYPE_RX_DELBA = 0x6, + HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, + HTT_10_4_T2H_MSG_TYPE_PKTLOG = 0x8, + HTT_10_4_T2H_MSG_TYPE_STATS_CONF = 0x9, + HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, + HTT_10_4_T2H_MSG_TYPE_SEC_IND = 0xb, + HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, + HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, + HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe, + HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE = 0xf, + HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0x10, + HTT_10_4_T2H_MSG_TYPE_RX_PN_IND = 0x11, + HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12, + HTT_10_4_T2H_MSG_TYPE_TEST = 0x13, + HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14, + HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15, + HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16, + HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF = 0x17, + HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18, + /* 0x19 to 0x2f are reserved */ + HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND = 0x30, + /* keep this last */ + HTT_10_4_T2H_NUM_MSGS +}; + enum htt_t2h_msg_type { HTT_T2H_MSG_TYPE_VERSION_CONF, HTT_T2H_MSG_TYPE_RX_IND, @@ -375,6 +442,10 @@ enum htt_t2h_msg_type { HTT_T2H_MSG_TYPE_AGGR_CONF, HTT_T2H_MSG_TYPE_STATS_NOUPLOAD, HTT_T2H_MSG_TYPE_TEST, + HTT_T2H_MSG_TYPE_EN_STATS, + HTT_T2H_MSG_TYPE_TX_FETCH_IND, + HTT_T2H_MSG_TYPE_TX_FETCH_CONF, + HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND, /* keep this last */ HTT_T2H_NUM_MSGS }; @@ -1325,6 +1396,8 @@ struct ath10k_htt { u8 target_version_minor; struct completion target_version_received; enum ath10k_fw_htt_op_version op_version; + u8 max_num_amsdu; + u8 max_num_ampdu; const enum htt_t2h_msg_type *t2h_msg_types; u32 t2h_msg_types_max; @@ -1430,6 +1503,11 @@ struct ath10k_htt { /* rx_status template */ struct ieee80211_rx_status rx_status; + + struct { + dma_addr_t paddr; + struct htt_msdu_ext_desc *vaddr; + } frag_desc; }; #define RX_HTT_HDR_STATUS_LEN 64 @@ -1482,6 +1560,12 @@ struct htt_rx_desc { #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */ #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1) +/* These values are default in most firmware revisions and apparently are a + * sweet spot performance wise. + */ +#define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3 +#define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64 + int ath10k_htt_connect(struct ath10k_htt *htt); int ath10k_htt_init(struct ath10k *ar); int ath10k_htt_setup(struct ath10k_htt *htt); @@ -1497,6 +1581,7 @@ void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt); int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie); +int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt); int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt); int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, u8 max_subfrms_ampdu, diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 89eb16b30..1b7a04366 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -368,7 +368,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); - msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), + msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0), RX_MSDU_START_INFO0_MSDU_LENGTH); msdu_chained = rx_desc->frag_info.ring2_more_count; @@ -394,7 +394,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, msdu_chaining = 1; } - last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & + last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) & RX_MSDU_END_INFO0_LAST_MSDU; trace_ath10k_htt_rx_desc(ar, &rx_desc->attention, @@ -740,7 +740,7 @@ ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd) __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID)) return NULL; - if (!(rxd->msdu_end.info0 & + if (!(rxd->msdu_end.common.info0 & __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU))) return NULL; @@ -991,9 +991,9 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, bool is_last; rxd = (void *)msdu->data - sizeof(*rxd); - is_first = !!(rxd->msdu_end.info0 & + is_first = !!(rxd->msdu_end.common.info0 & __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); - is_last = !!(rxd->msdu_end.info0 & + is_last = !!(rxd->msdu_end.common.info0 & __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); /* Delivered decapped frame: @@ -1017,9 +1017,8 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, skb_trim(msdu, msdu->len - FCS_LEN); /* In most cases this will be true for sniffed frames. It makes sense - * to deliver them as-is without stripping the crypto param. This would - * also make sense for software based decryption (which is not - * implemented in ath10k). + * to deliver them as-is without stripping the crypto param. This is + * necessary for software based decryption. * * If there's no error then the frame is decrypted. At least that is * the case for frames that come in via fragmented rx indication. @@ -1104,9 +1103,9 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar, rxd = (void *)msdu->data - sizeof(*rxd); hdr = (void *)rxd->rx_hdr_status; - is_first = !!(rxd->msdu_end.info0 & + is_first = !!(rxd->msdu_end.common.info0 & __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); - is_last = !!(rxd->msdu_end.info0 & + is_last = !!(rxd->msdu_end.common.info0 & __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); is_amsdu = !(is_first && is_last); @@ -1201,7 +1200,6 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar, { struct htt_rx_desc *rxd; enum rx_msdu_decap_format decap; - struct ieee80211_hdr *hdr; /* First msdu's decapped header: * [802.11 header] <-- padded to 4 bytes long @@ -1215,8 +1213,7 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar, */ rxd = (void *)msdu->data - sizeof(*rxd); - hdr = (void *)rxd->rx_hdr_status; - decap = MS(__le32_to_cpu(rxd->msdu_start.info1), + decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1), RX_MSDU_START_INFO1_DECAP_FORMAT); switch (decap) { @@ -1246,7 +1243,7 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) rxd = (void *)skb->data - sizeof(*rxd); flags = __le32_to_cpu(rxd->attention.flags); - info = __le32_to_cpu(rxd->msdu_start.info1); + info = __le32_to_cpu(rxd->msdu_start.common.info1); is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); @@ -1439,7 +1436,7 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar, first = skb_peek(amsdu); rxd = (void *)first->data - sizeof(*rxd); - decap = MS(__le32_to_cpu(rxd->msdu_start.info1), + decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1), RX_MSDU_START_INFO1_DECAP_FORMAT); if (!chained) @@ -1633,8 +1630,6 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, __le16 msdu_id; int i; - lockdep_assert_held(&htt->tx_lock); - switch (status) { case HTT_DATA_TX_STATUS_NO_ACK: tx_done.no_ack = true; @@ -1759,14 +1754,14 @@ static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list, __skb_queue_tail(amsdu, msdu); rxd = (void *)msdu->data - sizeof(*rxd); - if (rxd->msdu_end.info0 & + if (rxd->msdu_end.common.info0 & __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)) break; } msdu = skb_peek_tail(amsdu); rxd = (void *)msdu->data - sizeof(*rxd); - if (!(rxd->msdu_end.info0 & + if (!(rxd->msdu_end.common.info0 & __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) { skb_queue_splice_init(amsdu, list); return -EAGAIN; @@ -2000,15 +1995,11 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } - spin_lock_bh(&htt->tx_lock); ath10k_txrx_tx_unref(htt, &tx_done); - spin_unlock_bh(&htt->tx_lock); break; } case HTT_T2H_MSG_TYPE_TX_COMPL_IND: - spin_lock_bh(&htt->tx_lock); - __skb_queue_tail(&htt->tx_compl_q, skb); - spin_unlock_bh(&htt->tx_lock); + skb_queue_tail(&htt->tx_compl_q, skb); tasklet_schedule(&htt->txrx_compl_task); return; case HTT_T2H_MSG_TYPE_SEC_IND: { @@ -2074,6 +2065,12 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; case HTT_T2H_MSG_TYPE_CHAN_CHANGE: break; + case HTT_T2H_MSG_TYPE_AGGR_CONF: + break; + case HTT_T2H_MSG_TYPE_EN_STATS: + case HTT_T2H_MSG_TYPE_TX_FETCH_IND: + case HTT_T2H_MSG_TYPE_TX_FETCH_CONF: + case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND: default: ath10k_warn(ar, "htt event (%d) not handled\n", resp->hdr.msg_type); @@ -2093,12 +2090,10 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) struct htt_resp *resp; struct sk_buff *skb; - spin_lock_bh(&htt->tx_lock); - while ((skb = __skb_dequeue(&htt->tx_compl_q))) { + while ((skb = skb_dequeue(&htt->tx_compl_q))) { ath10k_htt_rx_frm_tx_compl(htt->ar, skb); dev_kfree_skb_any(skb); } - spin_unlock_bh(&htt->tx_lock); spin_lock_bh(&htt->rx_ring.lock); while ((skb = __skb_dequeue(&htt->rx_compl_q))) { diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 7be3ce6e0..43aa5e2d1 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -63,7 +63,8 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) lockdep_assert_held(&htt->tx_lock); - ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC); + ret = idr_alloc(&htt->pending_tx, skb, 0, + htt->max_num_pending_tx, GFP_ATOMIC); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); @@ -84,6 +85,7 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; + int ret, size; ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); @@ -94,11 +96,31 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt) htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, sizeof(struct ath10k_htt_txbuf), 4, 0); if (!htt->tx_pool) { - idr_destroy(&htt->pending_tx); - return -ENOMEM; + ret = -ENOMEM; + goto free_idr_pending_tx; + } + + if (!ar->hw_params.continuous_frag_desc) + goto skip_frag_desc_alloc; + + size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); + htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, + &htt->frag_desc.paddr, + GFP_DMA); + if (!htt->frag_desc.vaddr) { + ath10k_warn(ar, "failed to alloc fragment desc memory\n"); + ret = -ENOMEM; + goto free_tx_pool; } +skip_frag_desc_alloc: return 0; + +free_tx_pool: + dma_pool_destroy(htt->tx_pool); +free_idr_pending_tx: + idr_destroy(&htt->pending_tx); + return ret; } static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) @@ -112,18 +134,25 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) tx_done.discard = 1; tx_done.msdu_id = msdu_id; - spin_lock_bh(&htt->tx_lock); ath10k_txrx_tx_unref(htt, &tx_done); - spin_unlock_bh(&htt->tx_lock); return 0; } void ath10k_htt_tx_free(struct ath10k_htt *htt) { + int size; + idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); idr_destroy(&htt->pending_tx); dma_pool_destroy(htt->tx_pool); + + if (htt->frag_desc.vaddr) { + size = htt->max_num_pending_tx * + sizeof(struct htt_msdu_ext_desc); + dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr, + htt->frag_desc.paddr); + } } void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) @@ -201,6 +230,49 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) return 0; } +int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + struct sk_buff *skb; + struct htt_cmd *cmd; + int ret, size; + + if (!ar->hw_params.continuous_frag_desc) + return 0; + + if (!htt->frag_desc.paddr) { + ath10k_warn(ar, "invalid frag desc memory\n"); + return -EINVAL; + } + + size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg); + skb = ath10k_htc_alloc_skb(ar, size); + if (!skb) + return -ENOMEM; + + skb_put(skb, size); + cmd = (struct htt_cmd *)skb->data; + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; + cmd->frag_desc_bank_cfg.info = 0; + cmd->frag_desc_bank_cfg.num_banks = 1; + cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc); + cmd->frag_desc_bank_cfg.bank_base_addrs[0] = + __cpu_to_le32(htt->frag_desc.paddr); + cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0; + cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id = + __cpu_to_le16(htt->max_num_pending_tx - 1); + + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); + if (ret) { + ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", + ret); + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; @@ -355,12 +427,11 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); + spin_unlock_bh(&htt->tx_lock); if (res < 0) { - spin_unlock_bh(&htt->tx_lock); goto err_tx_dec; } msdu_id = res; - spin_unlock_bh(&htt->tx_lock); txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { @@ -378,6 +449,8 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; + memset(cmd, 0, len); + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); @@ -424,6 +497,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) u16 msdu_id, flags1 = 0; dma_addr_t paddr = 0; u32 frags_paddr = 0; + struct htt_msdu_ext_desc *ext_desc = NULL; res = ath10k_htt_tx_inc_pending(htt); if (res) @@ -431,12 +505,11 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); + spin_unlock_bh(&htt->tx_lock); if (res < 0) { - spin_unlock_bh(&htt->tx_lock); goto err_tx_dec; } msdu_id = res; - spin_unlock_bh(&htt->tx_lock); prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); @@ -452,8 +525,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && - ieee80211_has_protected(hdr->frame_control)) + ieee80211_has_protected(hdr->frame_control)) { + skb_put(msdu, IEEE80211_CCMP_MIC_LEN); + } else if (!skb_cb->htt.nohwcrypt && + skb_cb->txmode == ATH10K_HW_TXRX_RAW) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); + } skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); @@ -469,16 +546,30 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; /* pass through */ case ATH10K_HW_TXRX_ETHERNET: - frags = skb_cb->htt.txbuf->frags; - - frags[0].paddr = __cpu_to_le32(skb_cb->paddr); - frags[0].len = __cpu_to_le32(msdu->len); - frags[1].paddr = 0; - frags[1].len = 0; - + if (ar->hw_params.continuous_frag_desc) { + memset(&htt->frag_desc.vaddr[msdu_id], 0, + sizeof(struct htt_msdu_ext_desc)); + frags = (struct htt_data_tx_desc_frag *) + &htt->frag_desc.vaddr[msdu_id].frags; + ext_desc = &htt->frag_desc.vaddr[msdu_id]; + frags[0].tword_addr.paddr_lo = + __cpu_to_le32(skb_cb->paddr); + frags[0].tword_addr.paddr_hi = 0; + frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); + + frags_paddr = htt->frag_desc.paddr + + (sizeof(struct htt_msdu_ext_desc) * msdu_id); + } else { + frags = skb_cb->htt.txbuf->frags; + frags[0].dword_addr.paddr = + __cpu_to_le32(skb_cb->paddr); + frags[0].dword_addr.len = __cpu_to_le32(msdu->len); + frags[1].dword_addr.paddr = 0; + frags[1].dword_addr.len = 0; + + frags_paddr = skb_cb->htt.txbuf_paddr; + } flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); - - frags_paddr = skb_cb->htt.txbuf_paddr; break; case ATH10K_HW_TXRX_MGMT: flags0 |= SM(ATH10K_HW_TXRX_MGMT, @@ -512,14 +603,20 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) prefetch_len); skb_cb->htt.txbuf->htc_hdr.flags = 0; + if (skb_cb->htt.nohwcrypt) + flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; + if (!skb_cb->is_protected) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); - if (msdu->ip_summed == CHECKSUM_PARTIAL) { + if (msdu->ip_summed == CHECKSUM_PARTIAL && + !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; + if (ar->hw_params.continuous_frag_desc) + ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; } /* Prevent firmware from sending up tx inspection requests. There's diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index 5997f00af..7b84d08a5 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -34,8 +34,15 @@ const struct ath10k_hw_regs qca988x_regs = { .ce7_base_address = 0x00059000, .soc_reset_control_si0_rst_mask = 0x00000001, .soc_reset_control_ce_rst_mask = 0x00040000, - .soc_chip_id_address = 0x00ec, - .scratch_3_address = 0x0030, + .soc_chip_id_address = 0x000000ec, + .scratch_3_address = 0x00000030, + .fw_indicator_address = 0x00009030, + .pcie_local_base_address = 0x00080000, + .ce_wrap_intr_sum_host_msi_lsb = 0x00000008, + .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00, + .pcie_intr_fw_mask = 0x00000400, + .pcie_intr_ce_mask_all = 0x0007f800, + .pcie_intr_clr_address = 0x00000014, }; const struct ath10k_hw_regs qca6174_regs = { @@ -54,8 +61,79 @@ const struct ath10k_hw_regs qca6174_regs = { .ce7_base_address = 0x00036000, .soc_reset_control_si0_rst_mask = 0x00000000, .soc_reset_control_ce_rst_mask = 0x00000001, - .soc_chip_id_address = 0x000f0, - .scratch_3_address = 0x0028, + .soc_chip_id_address = 0x000000f0, + .scratch_3_address = 0x00000028, + .fw_indicator_address = 0x0003a028, + .pcie_local_base_address = 0x00080000, + .ce_wrap_intr_sum_host_msi_lsb = 0x00000008, + .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00, + .pcie_intr_fw_mask = 0x00000400, + .pcie_intr_ce_mask_all = 0x0007f800, + .pcie_intr_clr_address = 0x00000014, +}; + +const struct ath10k_hw_regs qca99x0_regs = { + .rtc_state_cold_reset_mask = 0x00000400, + .rtc_soc_base_address = 0x00080000, + .rtc_wmac_base_address = 0x00000000, + .soc_core_base_address = 0x00082000, + .ce_wrapper_base_address = 0x0004d000, + .ce0_base_address = 0x0004a000, + .ce1_base_address = 0x0004a400, + .ce2_base_address = 0x0004a800, + .ce3_base_address = 0x0004ac00, + .ce4_base_address = 0x0004b000, + .ce5_base_address = 0x0004b400, + .ce6_base_address = 0x0004b800, + .ce7_base_address = 0x0004bc00, + /* Note: qca99x0 supports upto 12 Copy Engines. Other than address of + * CE0 and CE1 no other copy engine is directly referred in the code. + * It is not really neccessary to assign address for newly supported + * CEs in this address table. + * Copy Engine Address + * CE8 0x0004c000 + * CE9 0x0004c400 + * CE10 0x0004c800 + * CE11 0x0004cc00 + */ + .soc_reset_control_si0_rst_mask = 0x00000001, + .soc_reset_control_ce_rst_mask = 0x00000100, + .soc_chip_id_address = 0x000000ec, + .scratch_3_address = 0x00040050, + .fw_indicator_address = 0x00040050, + .pcie_local_base_address = 0x00000000, + .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c, + .ce_wrap_intr_sum_host_msi_mask = 0x00fff000, + .pcie_intr_fw_mask = 0x00100000, + .pcie_intr_ce_mask_all = 0x000fff00, + .pcie_intr_clr_address = 0x00000010, +}; + +const struct ath10k_hw_values qca988x_values = { + .rtc_state_val_on = 3, + .ce_count = 8, + .msi_assign_ce_max = 7, + .num_target_ce_config_wlan = 7, + .ce_desc_meta_data_mask = 0xFFFC, + .ce_desc_meta_data_lsb = 2, +}; + +const struct ath10k_hw_values qca6174_values = { + .rtc_state_val_on = 3, + .ce_count = 8, + .msi_assign_ce_max = 7, + .num_target_ce_config_wlan = 7, + .ce_desc_meta_data_mask = 0xFFFC, + .ce_desc_meta_data_lsb = 2, +}; + +const struct ath10k_hw_values qca99x0_values = { + .rtc_state_val_on = 5, + .ce_count = 12, + .msi_assign_ce_max = 12, + .num_target_ce_config_wlan = 10, + .ce_desc_meta_data_mask = 0xFFF0, + .ce_desc_meta_data_lsb = 4, }; void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, @@ -74,6 +152,6 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, cc -= cc_prev - cc_fix; rcc -= rcc_prev; - survey->time = CCNT_TO_MSEC(cc); - survey->time_busy = CCNT_TO_MSEC(rcc); + survey->time = CCNT_TO_MSEC(ar, cc); + survey->time_busy = CCNT_TO_MSEC(ar, rcc); } diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 9a0376dc6..47b486efd 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -72,6 +72,18 @@ enum qca6174_chip_id_rev { #define QCA6174_HW_3_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/" #define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234 +/* QCA99X0 1.0 definitions (unsupported) */ +#define QCA99X0_HW_1_0_CHIP_ID_REV 0x0 + +/* QCA99X0 2.0 definitions */ +#define QCA99X0_HW_2_0_DEV_VERSION 0x01000000 +#define QCA99X0_HW_2_0_CHIP_ID_REV 0x1 +#define QCA99X0_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA99X0/hw2.0" +#define QCA99X0_HW_2_0_FW_FILE "/*(DEBLOBBED)*/" +#define QCA99X0_HW_2_0_OTP_FILE "/*(DEBLOBBED)*/" +#define QCA99X0_HW_2_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/" +#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234 + #define ATH10K_FW_API2_FILE "/*(DEBLOBBED)*/" #define ATH10K_FW_API3_FILE "/*(DEBLOBBED)*/" @@ -112,6 +124,9 @@ enum ath10k_fw_ie_type { * FW API 5 and above. */ ATH10K_FW_IE_HTT_OP_VERSION = 6, + + /* Code swap image for firmware binary */ + ATH10K_FW_IE_FW_CODE_SWAP_IMAGE = 7, }; enum ath10k_fw_wmi_op_version { @@ -122,6 +137,7 @@ enum ath10k_fw_wmi_op_version { ATH10K_FW_WMI_OP_VERSION_10_2 = 3, ATH10K_FW_WMI_OP_VERSION_TLV = 4, ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5, + ATH10K_FW_WMI_OP_VERSION_10_4 = 6, /* keep last */ ATH10K_FW_WMI_OP_VERSION_MAX, @@ -137,6 +153,8 @@ enum ath10k_fw_htt_op_version { ATH10K_FW_HTT_OP_VERSION_TLV = 3, + ATH10K_FW_HTT_OP_VERSION_10_4 = 4, + /* keep last */ ATH10K_FW_HTT_OP_VERSION_MAX, }; @@ -144,6 +162,7 @@ enum ath10k_fw_htt_op_version { enum ath10k_hw_rev { ATH10K_HW_QCA988X, ATH10K_HW_QCA6174, + ATH10K_HW_QCA99X0, }; struct ath10k_hw_regs { @@ -164,26 +183,50 @@ struct ath10k_hw_regs { u32 soc_reset_control_ce_rst_mask; u32 soc_chip_id_address; u32 scratch_3_address; + u32 fw_indicator_address; + u32 pcie_local_base_address; + u32 ce_wrap_intr_sum_host_msi_lsb; + u32 ce_wrap_intr_sum_host_msi_mask; + u32 pcie_intr_fw_mask; + u32 pcie_intr_ce_mask_all; + u32 pcie_intr_clr_address; }; extern const struct ath10k_hw_regs qca988x_regs; extern const struct ath10k_hw_regs qca6174_regs; +extern const struct ath10k_hw_regs qca99x0_regs; + +struct ath10k_hw_values { + u32 rtc_state_val_on; + u8 ce_count; + u8 msi_assign_ce_max; + u8 num_target_ce_config_wlan; + u16 ce_desc_meta_data_mask; + u8 ce_desc_meta_data_lsb; +}; + +extern const struct ath10k_hw_values qca988x_values; +extern const struct ath10k_hw_values qca6174_values; +extern const struct ath10k_hw_values qca99x0_values; void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev); #define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X) #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174) +#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0) /* Known pecularities: - * - current FW doesn't support raw rx mode (last tested v599) - * - current FW dumps upon raw tx mode (last tested v599) * - raw appears in nwifi decap, raw and nwifi appear in ethernet decap * - raw have FCS, nwifi doesn't * - ethernet frames have 802.11 header decapped and parts (base hdr, cipher * param, llc/snap) are aligned to 4byte boundaries each */ enum ath10k_hw_txrx_mode { ATH10K_HW_TXRX_RAW = 0, + + /* Native Wifi decap mode is used to align IP frames to 4-byte + * boundaries and avoid a very expensive re-alignment in mac80211. + */ ATH10K_HW_TXRX_NATIVE_WIFI = 1, ATH10K_HW_TXRX_ETHERNET = 2, @@ -245,10 +288,6 @@ enum ath10k_hw_rate_cck { #define TARGET_RX_TIMEOUT_LO_PRI 100 #define TARGET_RX_TIMEOUT_HI_PRI 40 -/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and - * avoid a very expensive re-alignment in mac80211. */ -#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI - #define TARGET_SCAN_MAX_PENDING_REQS 4 #define TARGET_BMISS_OFFLOAD_MAX_VDEV 3 #define TARGET_ROAM_OFFLOAD_MAX_VDEV 3 @@ -283,7 +322,6 @@ enum ath10k_hw_rate_cck { #define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) #define TARGET_10X_RX_TIMEOUT_LO_PRI 100 #define TARGET_10X_RX_TIMEOUT_HI_PRI 40 -#define TARGET_10X_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI #define TARGET_10X_SCAN_MAX_PENDING_REQS 4 #define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2 #define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2 @@ -299,7 +337,7 @@ enum ath10k_hw_rate_cck { #define TARGET_10X_MAX_FRAG_ENTRIES 0 /* 10.2 parameters */ -#define TARGET_10_2_DMA_BURST_SIZE 1 +#define TARGET_10_2_DMA_BURST_SIZE 0 /* Target specific defines for WMI-TLV firmware */ #define TARGET_TLV_NUM_VDEVS 4 @@ -310,8 +348,70 @@ enum ath10k_hw_rate_cck { #define TARGET_TLV_NUM_MSDU_DESC (1024 + 32) #define TARGET_TLV_NUM_WOW_PATTERNS 22 +/* Diagnostic Window */ +#define CE_DIAG_PIPE 7 + +#define NUM_TARGET_CE_CONFIG_WLAN ar->hw_values->num_target_ce_config_wlan + +/* Target specific defines for 10.4 firmware */ +#define TARGET_10_4_NUM_VDEVS 16 +#define TARGET_10_4_NUM_STATIONS 32 +#define TARGET_10_4_NUM_PEERS ((TARGET_10_4_NUM_STATIONS) + \ + (TARGET_10_4_NUM_VDEVS)) +#define TARGET_10_4_ACTIVE_PEERS 0 + +#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512 +#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50 +#define TARGET_10_4_NUM_OFFLOAD_PEERS 0 +#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0 +#define TARGET_10_4_NUM_PEER_KEYS 2 +#define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2) +#define TARGET_10_4_AST_SKID_LIMIT 32 +#define TARGET_10_4_TX_CHAIN_MASK (BIT(0) | BIT(1) | \ + BIT(2) | BIT(3)) +#define TARGET_10_4_RX_CHAIN_MASK (BIT(0) | BIT(1) | \ + BIT(2) | BIT(3)) + +/* 100 ms for video, best-effort, and background */ +#define TARGET_10_4_RX_TIMEOUT_LO_PRI 100 + +/* 40 ms for voice */ +#define TARGET_10_4_RX_TIMEOUT_HI_PRI 40 + +#define TARGET_10_4_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI +#define TARGET_10_4_SCAN_MAX_REQS 4 +#define TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV 3 +#define TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV 3 +#define TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES 8 + +/* Note: mcast to ucast is disabled by default */ +#define TARGET_10_4_NUM_MCAST_GROUPS 0 +#define TARGET_10_4_NUM_MCAST_TABLE_ELEMS 0 +#define TARGET_10_4_MCAST2UCAST_MODE 0 + +#define TARGET_10_4_TX_DBG_LOG_SIZE 1024 +#define TARGET_10_4_NUM_WDS_ENTRIES 32 +#define TARGET_10_4_DMA_BURST_SIZE 0 +#define TARGET_10_4_MAC_AGGR_DELIM 0 +#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 +#define TARGET_10_4_VOW_CONFIG 0 +#define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV 3 +#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400) +#define TARGET_10_4_11AC_TX_MAX_FRAGS 2 +#define TARGET_10_4_MAX_PEER_EXT_STATS 16 +#define TARGET_10_4_SMART_ANT_CAP 0 +#define TARGET_10_4_BK_MIN_FREE 0 +#define TARGET_10_4_BE_MIN_FREE 0 +#define TARGET_10_4_VI_MIN_FREE 0 +#define TARGET_10_4_VO_MIN_FREE 0 +#define TARGET_10_4_RX_BATCH_MODE 1 +#define TARGET_10_4_THERMAL_THROTTLING_CONFIG 0 +#define TARGET_10_4_ATF_CONFIG 0 +#define TARGET_10_4_IPHDR_PAD_CONFIG 1 +#define TARGET_10_4_QWRAP_CONFIG 0 + /* Number of Copy Engines supported */ -#define CE_COUNT 8 +#define CE_COUNT ar->hw_values->ce_count /* * Total number of PCIe MSI interrupts requested for all interrupt sources. @@ -335,10 +435,10 @@ enum ath10k_hw_rate_cck { /* MSIs for Copy Engines */ #define MSI_ASSIGN_CE_INITIAL 1 -#define MSI_ASSIGN_CE_MAX 7 +#define MSI_ASSIGN_CE_MAX ar->hw_values->msi_assign_ce_max /* as of IP3.7.1 */ -#define RTC_STATE_V_ON 3 +#define RTC_STATE_V_ON ar->hw_values->rtc_state_val_on #define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask #define RTC_STATE_V_LSB 0 @@ -374,7 +474,7 @@ enum ath10k_hw_rate_cck { #define CE7_BASE_ADDRESS ar->regs->ce7_base_address #define DBI_BASE_ADDRESS 0x00060000 #define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000 -#define PCIE_LOCAL_BASE_ADDRESS 0x00080000 +#define PCIE_LOCAL_BASE_ADDRESS ar->regs->pcie_local_base_address #define SOC_RESET_CONTROL_ADDRESS 0x00000000 #define SOC_RESET_CONTROL_OFFSET 0x00000000 @@ -448,24 +548,25 @@ enum ath10k_hw_rate_cck { #define CORE_CTRL_ADDRESS 0x0000 #define PCIE_INTR_ENABLE_ADDRESS 0x0008 #define PCIE_INTR_CAUSE_ADDRESS 0x000c -#define PCIE_INTR_CLR_ADDRESS 0x0014 +#define PCIE_INTR_CLR_ADDRESS ar->regs->pcie_intr_clr_address #define SCRATCH_3_ADDRESS ar->regs->scratch_3_address #define CPU_INTR_ADDRESS 0x0010 -/* Cycle counters are running at 88MHz */ -#define CCNT_TO_MSEC(x) ((x) / 88000) +#define CCNT_TO_MSEC(ar, x) ((x) / ar->hw_params.channel_counters_freq_hz) /* Firmware indications to the Host via SCRATCH_3 register. */ -#define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS) +#define FW_INDICATOR_ADDRESS ar->regs->fw_indicator_address #define FW_IND_EVENT_PENDING 1 #define FW_IND_INITIALIZED 2 /* HOST_REG interrupt from firmware */ -#define PCIE_INTR_FIRMWARE_MASK 0x00000400 -#define PCIE_INTR_CE_MASK_ALL 0x0007f800 +#define PCIE_INTR_FIRMWARE_MASK ar->regs->pcie_intr_fw_mask +#define PCIE_INTR_CE_MASK_ALL ar->regs->pcie_intr_ce_mask_all #define DRAM_BASE_ADDRESS 0x00400000 +#define PCIE_BAR_REG_ADDRESS 0x40030 + #define MISSING 0 #define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 0d3c474ff..e1d32de8b 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -197,6 +197,10 @@ static int ath10k_send_key(struct ath10k_vif *arvif, return -EOPNOTSUPP; } + if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + } + if (cmd == DISABLE_KEY) { arg.key_cipher = WMI_CIPHER_NONE; arg.key_data = NULL; @@ -218,6 +222,9 @@ static int ath10k_install_key(struct ath10k_vif *arvif, reinit_completion(&ar->install_key_done); + if (arvif->nohwcrypt) + return 1; + ret = ath10k_send_key(arvif, key, cmd, macaddr, flags); if (ret) return ret; @@ -240,6 +247,10 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, lockdep_assert_held(&ar->conf_mutex); + if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP && + arvif->vif->type != NL80211_IFTYPE_ADHOC)) + return -EINVAL; + spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find(ar, arvif->vdev_id, addr); spin_unlock_bh(&ar->data_lock); @@ -251,21 +262,34 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, if (arvif->wep_keys[i] == NULL) continue; - flags = 0; - flags |= WMI_KEY_PAIRWISE; + switch (arvif->vif->type) { + case NL80211_IFTYPE_AP: + flags = WMI_KEY_PAIRWISE; - ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY, - addr, flags); - if (ret) - return ret; + if (arvif->def_wep_key_idx == i) + flags |= WMI_KEY_TX_USAGE; - flags = 0; - flags |= WMI_KEY_GROUP; + ret = ath10k_install_key(arvif, arvif->wep_keys[i], + SET_KEY, addr, flags); + if (ret < 0) + return ret; + break; + case NL80211_IFTYPE_ADHOC: + ret = ath10k_install_key(arvif, arvif->wep_keys[i], + SET_KEY, addr, + WMI_KEY_PAIRWISE); + if (ret < 0) + return ret; - ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY, - addr, flags); - if (ret) - return ret; + ret = ath10k_install_key(arvif, arvif->wep_keys[i], + SET_KEY, addr, WMI_KEY_GROUP); + if (ret < 0) + return ret; + break; + default: + WARN_ON(1); + return -EINVAL; + } spin_lock_bh(&ar->data_lock); peer->keys[i] = arvif->wep_keys[i]; @@ -280,6 +304,9 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, * * FIXME: Revisit. Perhaps this can be done in a less hacky way. */ + if (arvif->vif->type != NL80211_IFTYPE_ADHOC) + return 0; + if (arvif->def_wep_key_idx == -1) return 0; @@ -322,10 +349,10 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, /* key flags are not required to delete the key */ ret = ath10k_install_key(arvif, peer->keys[i], DISABLE_KEY, addr, flags); - if (ret && first_errno == 0) + if (ret < 0 && first_errno == 0) first_errno = ret; - if (ret) + if (ret < 0) ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", i, ret); @@ -398,7 +425,7 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, break; /* key flags are not required to delete the key */ ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags); - if (ret && first_errno == 0) + if (ret < 0 && first_errno == 0) first_errno = ret; if (ret) @@ -679,20 +706,6 @@ static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); } -static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value) -{ - struct ath10k *ar = arvif->ar; - u32 vdev_param; - - if (value != 0xFFFFFFFF) - value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold, - ATH10K_FRAGMT_THRESHOLD_MIN, - ATH10K_FRAGMT_THRESHOLD_MAX); - - vdev_param = ar->wmi.vdev_param->fragmentation_threshold; - return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); -} - static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) { int ret; @@ -844,7 +857,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar) static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) { struct cfg80211_chan_def *chandef = NULL; - struct ieee80211_channel *channel = chandef->chan; + struct ieee80211_channel *channel = NULL; struct wmi_vdev_start_request_arg arg = {}; int ret = 0; @@ -1676,7 +1689,7 @@ static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif) return 0; } -static int ath10k_mac_ps_vif_count(struct ath10k *ar) +static int ath10k_mac_num_vifs_started(struct ath10k *ar) { struct ath10k_vif *arvif; int num = 0; @@ -1684,7 +1697,7 @@ static int ath10k_mac_ps_vif_count(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); list_for_each_entry(arvif, &ar->arvifs, list) - if (arvif->ps) + if (arvif->is_started) num++; return num; @@ -1708,7 +1721,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) enable_ps = arvif->ps; - if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 && + if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 && !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT, ar->fw_features)) { ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n", @@ -2070,7 +2083,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, enum ieee80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; - int i, n, max_nss; + int i, n; + u8 max_nss; u32 stbc; lockdep_assert_held(&ar->conf_mutex); @@ -2155,7 +2169,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, arg->peer_ht_rates.rates[i] = i; } else { arg->peer_ht_rates.num_rates = n; - arg->peer_num_spatial_streams = max_nss; + arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); } ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", @@ -2510,6 +2524,9 @@ static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar, u32 param; u32 value; + if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC) + return 0; + if (!(ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | @@ -3159,13 +3176,30 @@ ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif, * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for * NativeWifi txmode - it selects AP key instead of peer key. It seems * to work with Ethernet txmode so use it. + * + * FIXME: Check if raw mode works with TDLS. */ if (ieee80211_is_data_present(fc) && sta && sta->tdls) return ATH10K_HW_TXRX_ETHERNET; + if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) + return ATH10K_HW_TXRX_RAW; + return ATH10K_HW_TXRX_NATIVE_WIFI; } +static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif, + struct sk_buff *skb) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT | + IEEE80211_TX_CTL_INJECTED; + if ((info->flags & mask) == mask) + return false; + if (vif) + return !ath10k_vif_to_arvif(vif)->nohwcrypt; + return true; +} + /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS * Control in the header. */ @@ -3332,6 +3366,7 @@ void ath10k_offchan_tx_work(struct work_struct *work) int vdev_id; int ret; unsigned long time_left; + bool tmp_peer_created = false; /* FW requirement: We must create a peer before FW will send out * an offchannel frame. Otherwise the frame will be stuck and @@ -3369,6 +3404,7 @@ void ath10k_offchan_tx_work(struct work_struct *work) if (ret) ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", peer_addr, vdev_id, ret); + tmp_peer_created = (ret == 0); } spin_lock_bh(&ar->data_lock); @@ -3384,7 +3420,7 @@ void ath10k_offchan_tx_work(struct work_struct *work) ath10k_warn(ar, "timed out waiting for offchannel skb %p\n", skb); - if (!peer) { + if (!peer && tmp_peer_created) { ret = ath10k_peer_delete(ar, vdev_id, peer_addr); if (ret) ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n", @@ -3440,14 +3476,13 @@ void __ath10k_scan_finish(struct ath10k *ar) case ATH10K_SCAN_IDLE: break; case ATH10K_SCAN_RUNNING: - if (ar->scan.is_roc) - ieee80211_remain_on_channel_expired(ar->hw); - /* fall through */ case ATH10K_SCAN_ABORTING: if (!ar->scan.is_roc) ieee80211_scan_completed(ar->hw, (ar->scan.state == ATH10K_SCAN_ABORTING)); + else if (ar->scan.roc_notify) + ieee80211_remain_on_channel_expired(ar->hw); /* fall through */ case ATH10K_SCAN_STARTING: ar->scan.state = ATH10K_SCAN_IDLE; @@ -3611,6 +3646,7 @@ static void ath10k_tx(struct ieee80211_hw *hw, ATH10K_SKB_CB(skb)->htt.is_offchan = false; ATH10K_SKB_CB(skb)->htt.freq = 0; ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr); + ATH10K_SKB_CB(skb)->htt.nohwcrypt = !ath10k_tx_h_use_hwcrypto(vif, skb); ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif); ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb); ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc); @@ -3626,12 +3662,11 @@ static void ath10k_tx(struct ieee80211_hw *hw, ath10k_tx_h_8023(skb); break; case ATH10K_HW_TXRX_RAW: - /* FIXME: Packet injection isn't implemented. It should be - * doable with firmware 10.2 on qca988x. - */ - WARN_ON_ONCE(1); - ieee80211_free_txskb(hw, skb); - return; + if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + WARN_ON_ONCE(1); + ieee80211_free_txskb(hw, skb); + return; + } } if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { @@ -4021,7 +4056,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) static u32 get_nss_from_chainmask(u16 chain_mask) { - if ((chain_mask & 0x15) == 0x15) + if ((chain_mask & 0xf) == 0xf) return 4; else if ((chain_mask & 0x7) == 0x7) return 3; @@ -4030,6 +4065,43 @@ static u32 get_nss_from_chainmask(u16 chain_mask) return 1; } +static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif) +{ + u32 value = 0; + struct ath10k *ar = arvif->ar; + + if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC) + return 0; + + if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) + value |= SM((ar->num_rf_chains - 1), WMI_TXBF_STS_CAP_OFFSET); + + if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) + value |= SM((ar->num_rf_chains - 1), WMI_BF_SOUND_DIM_OFFSET); + + if (!value) + return 0; + + if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) + value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; + + if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) + value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER | + WMI_VDEV_PARAM_TXBF_SU_TX_BFER); + + if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) + value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; + + if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) + value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE | + WMI_VDEV_PARAM_TXBF_SU_TX_BFEE); + + return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + ar->wmi.vdev_param->txbf, value); +} + /* * TODO: * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, @@ -4073,7 +4145,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, if (ar->num_peers >= ar->max_num_peers) { ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n"); - return -ENOBUFS; + ret = -ENOBUFS; + goto err; } if (ar->free_vdev_map == 0) { @@ -4155,6 +4228,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, goto err; } } + if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags)) + arvif->nohwcrypt = true; + + if (arvif->nohwcrypt && + !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + ath10k_warn(ar, "cryptmode module param needed for sw crypto\n"); + goto err; + } ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n", arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, @@ -4253,16 +4334,16 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, } } - ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); + ret = ath10k_mac_set_txbf_conf(arvif); if (ret) { - ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", + ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } - ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold); + ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); if (ret) { - ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n", + ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } @@ -4642,9 +4723,6 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, arg.vdev_id = arvif->vdev_id; arg.scan_id = ATH10K_SCAN_ID; - if (!req->no_cck) - arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES; - if (req->ie_len) { arg.ie_len = req->ie_len; memcpy(arg.ie, req->ie, arg.ie_len); @@ -4752,6 +4830,9 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) return 1; + if (arvif->nohwcrypt) + return 1; + if (key->keyidx > WMI_MAX_KEY_INDEX) return -ENOSPC; @@ -4821,6 +4902,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags); if (ret) { + WARN_ON(ret > 0); ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", arvif->vdev_id, peer_addr, ret); goto exit; @@ -4836,13 +4918,16 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2); if (ret) { + WARN_ON(ret > 0); ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n", arvif->vdev_id, peer_addr, ret); ret2 = ath10k_install_key(arvif, key, DISABLE_KEY, peer_addr, flags); - if (ret2) + if (ret2) { + WARN_ON(ret2 > 0); ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n", arvif->vdev_id, peer_addr, ret2); + } goto exit; } } @@ -5463,6 +5548,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, ar->scan.is_roc = true; ar->scan.vdev_id = arvif->vdev_id; ar->scan.roc_freq = chan->center_freq; + ar->scan.roc_notify = true; ret = 0; break; case ATH10K_SCAN_STARTING: @@ -5526,7 +5612,13 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) struct ath10k *ar = hw->priv; mutex_lock(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + ar->scan.roc_notify = false; + spin_unlock_bh(&ar->data_lock); + ath10k_scan_abort(ar); + mutex_unlock(&ar->conf_mutex); cancel_delayed_work_sync(&ar->scan.timeout); @@ -5582,7 +5674,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, { struct ath10k *ar = hw->priv; bool skip; - int ret; + long time_left; /* mac80211 doesn't care if we really xmit queued frames or not * we'll collect those frames either way if we stop/delete vdevs */ @@ -5594,7 +5686,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (ar->state == ATH10K_STATE_WEDGED) goto skip; - ret = wait_event_timeout(ar->htt.empty_tx_wq, ({ + time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({ bool empty; spin_lock_bh(&ar->htt.tx_lock); @@ -5608,9 +5700,9 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, (empty || skip); }), ATH10K_FLUSH_TIMEOUT_HZ); - if (ret <= 0 || skip) - ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n", - skip, ar->state, ret); + if (time_left == 0 || skip) + ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n", + skip, ar->state, time_left); skip: mutex_unlock(&ar->conf_mutex); @@ -6235,6 +6327,13 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, arvif->is_started = true; + ret = ath10k_mac_vif_setup_ps(arvif); + if (ret) { + ath10k_warn(ar, "failed to update vdev %i ps: %d\n", + arvif->vdev_id, ret); + goto err_stop; + } + if (vif->type == NL80211_IFTYPE_MONITOR) { ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr); if (ret) { @@ -6252,6 +6351,7 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, err_stop: ath10k_vdev_stop(arvif); arvif->is_started = false; + ath10k_mac_vif_setup_ps(arvif); err: mutex_unlock(&ar->conf_mutex); @@ -6582,8 +6682,11 @@ static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = { { .max = 2, - .types = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP) | + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 2, + .types = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), }, @@ -6593,6 +6696,26 @@ static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = { }, }; +static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = { + { + .max = 2, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 2, + .types = BIT(NL80211_IFTYPE_P2P_CLIENT), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), + }, +}; + static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = { { .max = 1, @@ -6611,7 +6734,7 @@ static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = { { .limits = ath10k_tlv_if_limit, .num_different_channels = 1, - .max_interfaces = 3, + .max_interfaces = 4, .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), }, { @@ -6625,10 +6748,16 @@ static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = { static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = { { .limits = ath10k_tlv_if_limit, - .num_different_channels = 2, - .max_interfaces = 3, + .num_different_channels = 1, + .max_interfaces = 4, .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), }, + { + .limits = ath10k_tlv_qcs_if_limit, + .num_different_channels = 2, + .max_interfaces = 4, + .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit), + }, { .limits = ath10k_tlv_if_limit_ibss, .num_different_channels = 1, @@ -6637,6 +6766,33 @@ static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = { }, }; +static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 16, + .types = BIT(NL80211_IFTYPE_AP) + }, +}; + +static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = { + { + .limits = ath10k_10_4_if_limits, + .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits), + .max_interfaces = 16, + .num_different_channels = 1, + .beacon_int_infra_match = true, +#ifdef CONFIG_ATH10K_DFS_CERTIFIED + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80), +#endif + }, +}; + static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) { struct ieee80211_sta_vht_cap vht_cap = {0}; @@ -6861,7 +7017,6 @@ int ath10k_mac_register(struct ath10k *ar) ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL); ieee80211_hw_set(ar->hw, AP_LINK_PS); ieee80211_hw_set(ar->hw, SPECTRUM_MGMT); - ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(ar->hw, CONNECTION_MONITOR); ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK); @@ -6869,6 +7024,9 @@ int ath10k_mac_register(struct ath10k *ar) ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA); ieee80211_hw_set(ar->hw, QUEUE_CONTROL); + if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) + ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); + ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; @@ -6919,6 +7077,8 @@ int ath10k_mac_register(struct ath10k *ar) goto err_free; } + wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + /* * on LL hardware queues are managed entirely by the FW * so we only advertise to mac we can do the queues thing @@ -6958,6 +7118,11 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ath10k_10x_if_comb); break; + case ATH10K_FW_WMI_OP_VERSION_10_4: + ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb; + ar->hw->wiphy->n_iface_combinations = + ARRAY_SIZE(ath10k_10_4_if_comb); + break; case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: WARN_ON(1); @@ -6965,7 +7130,8 @@ int ath10k_mac_register(struct ath10k *ar) goto err_free; } - ar->hw->netdev_features = NETIF_F_HW_CSUM; + if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) + ar->hw->netdev_features = NETIF_F_HW_CSUM; if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) { /* Init ath dfs pattern detector */ diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 79b707138..1ae298b84 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -58,11 +58,15 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 #define QCA988X_2_0_DEVICE_ID (0x003c) +#define QCA6164_2_1_DEVICE_ID (0x0041) #define QCA6174_2_1_DEVICE_ID (0x003e) +#define QCA99X0_2_0_DEVICE_ID (0x0040) static const struct pci_device_id ath10k_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ + { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ + { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */ {0} }; @@ -72,16 +76,25 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { * because of that. */ { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, + + { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, + { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, + { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, + { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, + { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, + { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, + + { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, }; static void ath10k_pci_buffer_cleanup(struct ath10k *ar); static int ath10k_pci_cold_reset(struct ath10k *ar); -static int ath10k_pci_warm_reset(struct ath10k *ar); +static int ath10k_pci_safe_chip_reset(struct ath10k *ar); static int ath10k_pci_wait_for_target_init(struct ath10k *ar); static int ath10k_pci_init_irq(struct ath10k *ar); static int ath10k_pci_deinit_irq(struct ath10k *ar); @@ -90,6 +103,7 @@ static void ath10k_pci_free_irq(struct ath10k *ar); static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, struct ath10k_ce_pipe *rx_pipe, struct bmi_xfer *xfer); +static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar); static const struct ce_attr host_ce_config_wlan[] = { /* CE0: host->target HTC control and raw streams */ @@ -155,6 +169,38 @@ static const struct ce_attr host_ce_config_wlan[] = { .src_sz_max = DIAG_TRANSFER_LIMIT, .dest_nentries = 2, }, + + /* CE8: target->host pktlog */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 128, + }, + + /* CE9 target autonomous qcache memcpy */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE10: target autonomous hif memcpy */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE11: target autonomous hif memcpy */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, }; /* Target firmware's Copy Engine configuration. */ @@ -232,6 +278,38 @@ static const struct ce_pipe_config target_ce_config_wlan[] = { }, /* CE7 used only by Host */ + { + .pipenum = __cpu_to_le32(7), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(0), + .nbytes_max = __cpu_to_le32(0), + .flags = __cpu_to_le32(0), + .reserved = __cpu_to_le32(0), + }, + + /* CE8 target->host packtlog */ + { + .pipenum = __cpu_to_le32(8), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(64), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* CE9 target autonomous qcache memcpy */ + { + .pipenum = __cpu_to_le32(9), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* It not necessary to send target wlan configuration for CE10 & CE11 + * as these CEs are not actively used in target. + */ }; /* @@ -479,6 +557,12 @@ void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; + if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) { + ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", + offset, offset + sizeof(value), ar_pci->mem_len); + return; + } + ret = ath10k_pci_wake(ar); if (ret) { ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n", @@ -496,6 +580,12 @@ u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) u32 val; int ret; + if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) { + ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", + offset, offset + sizeof(val), ar_pci->mem_len); + return 0; + } + ret = ath10k_pci_wake(ar); if (ret) { ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n", @@ -678,6 +768,26 @@ static void ath10k_pci_rx_replenish_retry(unsigned long ptr) ath10k_pci_rx_post(ar); } +static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) +{ + u32 val = 0; + + switch (ar->hw_rev) { + case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA6174: + val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + CORE_CTRL_ADDRESS) & + 0x7ff) << 21; + break; + case ATH10K_HW_QCA99X0: + val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); + break; + } + + val |= 0x100000 | (addr & 0xfffff); + return val; +} + /* * Diagnostic read/write access is provided for startup/config/debug usage. * Caller must guarantee proper alignment, when applicable, and single user @@ -740,8 +850,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, * convert it from Target CPU virtual address space * to CE address space */ - address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, - address); + address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0, 0); @@ -899,7 +1008,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, * to * CE address space */ - address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address); + address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); remaining_bytes = orig_nbytes; ce_data = ce_data_base; @@ -1331,20 +1440,42 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) { u32 val; - val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS); - val &= ~CORE_CTRL_PCIE_REG_31_MASK; - - ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val); + switch (ar->hw_rev) { + case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA6174: + val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + CORE_CTRL_ADDRESS); + val &= ~CORE_CTRL_PCIE_REG_31_MASK; + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + + CORE_CTRL_ADDRESS, val); + break; + case ATH10K_HW_QCA99X0: + /* TODO: Find appropriate register configuration for QCA99X0 + * to mask irq/MSI. + */ + break; + } } static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) { u32 val; - val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS); - val |= CORE_CTRL_PCIE_REG_31_MASK; - - ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val); + switch (ar->hw_rev) { + case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA6174: + val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + CORE_CTRL_ADDRESS); + val |= CORE_CTRL_PCIE_REG_31_MASK; + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + + CORE_CTRL_ADDRESS, val); + break; + case ATH10K_HW_QCA99X0: + /* TODO: Find appropriate register configuration for QCA99X0 + * to unmask irq/MSI. + */ + break; + } } static void ath10k_pci_irq_disable(struct ath10k *ar) @@ -1506,7 +1637,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) * masked. To prevent the device from asserting the interrupt reset it * before proceeding with cleanup. */ - ath10k_pci_warm_reset(ar); + ath10k_pci_safe_chip_reset(ar); ath10k_pci_irq_disable(ar); ath10k_pci_irq_sync(ar); @@ -1691,7 +1822,9 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) switch (ar_pci->pdev->device) { case QCA988X_2_0_DEVICE_ID: + case QCA99X0_2_0_DEVICE_ID: return 1; + case QCA6164_2_1_DEVICE_ID: case QCA6174_2_1_DEVICE_ID: switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) { case QCA6174_HW_1_0_CHIP_ID_REV: @@ -1761,7 +1894,8 @@ static int ath10k_pci_init_config(struct ath10k *ar) ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, target_ce_config_wlan, - sizeof(target_ce_config_wlan)); + sizeof(struct ce_pipe_config) * + NUM_TARGET_CE_CONFIG_WLAN); if (ret != 0) { ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret); @@ -1875,7 +2009,7 @@ static int ath10k_pci_alloc_pipes(struct ath10k *ar) } /* Last CE is Diagnostic Window */ - if (i == CE_COUNT - 1) { + if (i == CE_DIAG_PIPE) { ar_pci->ce_diag = pipe->ce_hdl; continue; } @@ -2020,6 +2154,18 @@ static int ath10k_pci_warm_reset(struct ath10k *ar) return 0; } +static int ath10k_pci_safe_chip_reset(struct ath10k *ar) +{ + if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) { + return ath10k_pci_warm_reset(ar); + } else if (QCA_REV_99X0(ar)) { + ath10k_pci_irq_disable(ar); + return ath10k_pci_qca99x0_chip_reset(ar); + } else { + return -ENOTSUPP; + } +} + static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) { int i, ret; @@ -2126,12 +2272,38 @@ static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar) return 0; } +static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n"); + + ret = ath10k_pci_cold_reset(ar); + if (ret) { + ath10k_warn(ar, "failed to cold reset: %d\n", ret); + return ret; + } + + ret = ath10k_pci_wait_for_target_init(ar); + if (ret) { + ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", + ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n"); + + return 0; +} + static int ath10k_pci_chip_reset(struct ath10k *ar) { if (QCA_REV_988X(ar)) return ath10k_pci_qca988x_chip_reset(ar); else if (QCA_REV_6174(ar)) return ath10k_pci_qca6174_chip_reset(ar); + else if (QCA_REV_99X0(ar)) + return ath10k_pci_qca99x0_chip_reset(ar); else return -ENOTSUPP; } @@ -2606,7 +2778,6 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar) static int ath10k_pci_cold_reset(struct ath10k *ar) { - int i; u32 val; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n"); @@ -2622,23 +2793,18 @@ static int ath10k_pci_cold_reset(struct ath10k *ar) val |= 1; ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); - for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { - if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) & - RTC_STATE_COLD_RESET_MASK) - break; - msleep(1); - } + /* After writing into SOC_GLOBAL_RESET to put device into + * reset and pulling out of reset pcie may not be stable + * for any immediate pcie register access and cause bus error, + * add delay before any pcie access request to fix this issue. + */ + msleep(20); /* Pull Target, including PCIe, out of RESET. */ val &= ~1; ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); - for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { - if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) & - RTC_STATE_COLD_RESET_MASK)) - break; - msleep(1); - } + msleep(20); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n"); @@ -2683,6 +2849,7 @@ static int ath10k_pci_claim(struct ath10k *ar) pci_set_master(pdev); /* Arrange for access to Target SoC registers. */ + ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM); ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0); if (!ar_pci->mem) { ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM); @@ -2746,9 +2913,13 @@ static int ath10k_pci_probe(struct pci_dev *pdev, case QCA988X_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA988X; break; + case QCA6164_2_1_DEVICE_ID: case QCA6174_2_1_DEVICE_ID: hw_rev = ATH10K_HW_QCA6174; break; + case QCA99X0_2_0_DEVICE_ID: + hw_rev = ATH10K_HW_QCA99X0; + break; default: WARN_ON(1); return -ENOTSUPP; @@ -2767,6 +2938,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ar_pci->pdev = pdev; ar_pci->dev = &pdev->dev; ar_pci->ar = ar; + ar->dev_id = pci_dev->device; if (pdev->subsystem_vendor || pdev->subsystem_device) scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id), diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index d7696ddc0..8d364fb8f 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -162,6 +162,7 @@ struct ath10k_pci { struct device *dev; struct ath10k *ar; void __iomem *mem; + size_t mem_len; /* * Number of MSI interrupts granted, 0 --> using legacy PCI line @@ -236,18 +237,6 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) #define CDC_WAR_MAGIC_STR 0xceef0000 #define CDC_WAR_DATA_CE 4 -/* - * TODO: Should be a function call specific to each Target-type. - * This convoluted macro converts from Target CPU Virtual Address Space to CE - * Address Space. As part of this process, we conservatively fetch the current - * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space - * for this device; but that's not guaranteed. - */ -#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \ - (((ath10k_pci_read32(ar, (SOC_CORE_BASE_ADDRESS | \ - CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \ - 0x100000 | ((addr) & 0xfffff)) - /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */ #define DIAG_ACCESS_CE_TIMEOUT_MS 10 diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index 492b5a5af..ca8d16884 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -422,6 +422,12 @@ struct rx_mpdu_end { #define RX_MSDU_START_INFO1_IP_FRAG (1 << 14) #define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15) +#define RX_MSDU_START_INFO2_DA_IDX_MASK 0x000007ff +#define RX_MSDU_START_INFO2_DA_IDX_LSB 0 +#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_MASK 0x00ff0000 +#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_LSB 16 +#define RX_MSDU_START_INFO2_DA_BCAST_MCAST BIT(11) + /* The decapped header (rx_hdr_status) contains the following: * a) 802.11 header * [padding to 4 bytes] @@ -449,12 +455,23 @@ enum rx_msdu_decap_format { RX_MSDU_DECAP_8023_SNAP_LLC = 3 }; -struct rx_msdu_start { +struct rx_msdu_start_common { __le32 info0; /* %RX_MSDU_START_INFO0_ */ __le32 flow_id_crc; __le32 info1; /* %RX_MSDU_START_INFO1_ */ } __packed; +struct rx_msdu_start_qca99x0 { + __le32 info2; /* %RX_MSDU_START_INFO2_ */ +} __packed; + +struct rx_msdu_start { + struct rx_msdu_start_common common; + union { + struct rx_msdu_start_qca99x0 qca99x0; + } __packed; +} __packed; + /* * msdu_length * MSDU length in bytes after decapsulation. This field is @@ -540,7 +557,7 @@ struct rx_msdu_start { #define RX_MSDU_END_INFO0_PRE_DELIM_ERR (1 << 30) #define RX_MSDU_END_INFO0_RESERVED_3B (1 << 31) -struct rx_msdu_end { +struct rx_msdu_end_common { __le16 ip_hdr_cksum; __le16 tcp_hdr_cksum; u8 key_id_octet; @@ -549,6 +566,36 @@ struct rx_msdu_end { __le32 info0; } __packed; +#define RX_MSDU_END_INFO1_TCP_FLAG_MASK 0x000001ff +#define RX_MSDU_END_INFO1_TCP_FLAG_LSB 0 +#define RX_MSDU_END_INFO1_L3_HDR_PAD_MASK 0x00001c00 +#define RX_MSDU_END_INFO1_L3_HDR_PAD_LSB 10 +#define RX_MSDU_END_INFO1_WINDOW_SIZE_MASK 0xffff0000 +#define RX_MSDU_END_INFO1_WINDOW_SIZE_LSB 16 +#define RX_MSDU_END_INFO1_IRO_ELIGIBLE BIT(9) + +#define RX_MSDU_END_INFO2_DA_OFFSET_MASK 0x0000003f +#define RX_MSDU_END_INFO2_DA_OFFSET_LSB 0 +#define RX_MSDU_END_INFO2_SA_OFFSET_MASK 0x00000fc0 +#define RX_MSDU_END_INFO2_SA_OFFSET_LSB 6 +#define RX_MSDU_END_INFO2_TYPE_OFFSET_MASK 0x0003f000 +#define RX_MSDU_END_INFO2_TYPE_OFFSET_LSB 12 + +struct rx_msdu_end_qca99x0 { + __le32 ipv6_crc; + __le32 tcp_seq_no; + __le32 tcp_ack_no; + __le32 info1; + __le32 info2; +} __packed; + +struct rx_msdu_end { + struct rx_msdu_end_common common; + union { + struct rx_msdu_end_qca99x0 qca99x0; + } __packed; +} __packed; + /* *ip_hdr_chksum * This can include the IP header checksum or the pseudo header @@ -870,7 +917,11 @@ struct rx_ppdu_start { #define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24) #define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25) -#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15) +#define RX_PPDU_END_INFO1_PEER_IDX_MASK 0x1ffc +#define RX_PPDU_END_INFO1_PEER_IDX_LSB 2 +#define RX_PPDU_END_INFO1_BB_DATA BIT(0) +#define RX_PPDU_END_INFO1_PEER_IDX_VALID BIT(1) +#define RX_PPDU_END_INFO1_PPDU_DONE BIT(15) struct rx_ppdu_end_common { __le32 evm_p0; @@ -891,13 +942,13 @@ struct rx_ppdu_end_common { __le32 evm_p15; __le32 tsf_timestamp; __le32 wb_timestamp; +} __packed; + +struct rx_ppdu_end_qca988x { u8 locationing_timestamp; u8 phy_err_code; __le16 flags; /* %RX_PPDU_END_FLAGS_ */ __le32 info0; /* %RX_PPDU_END_INFO0_ */ -} __packed; - -struct rx_ppdu_end_qca988x { __le16 bb_length; __le16 info1; /* %RX_PPDU_END_INFO1_ */ } __packed; @@ -909,16 +960,126 @@ struct rx_ppdu_end_qca988x { #define RX_PPDU_END_RTT_NORMAL_MODE BIT(31) struct rx_ppdu_end_qca6174 { + u8 locationing_timestamp; + u8 phy_err_code; + __le16 flags; /* %RX_PPDU_END_FLAGS_ */ + __le32 info0; /* %RX_PPDU_END_INFO0_ */ __le32 rtt; /* %RX_PPDU_END_RTT_ */ __le16 bb_length; __le16 info1; /* %RX_PPDU_END_INFO1_ */ } __packed; +#define RX_PKT_END_INFO0_RX_SUCCESS BIT(0) +#define RX_PKT_END_INFO0_ERR_TX_INTERRUPT_RX BIT(3) +#define RX_PKT_END_INFO0_ERR_OFDM_POWER_DROP BIT(4) +#define RX_PKT_END_INFO0_ERR_OFDM_RESTART BIT(5) +#define RX_PKT_END_INFO0_ERR_CCK_POWER_DROP BIT(6) +#define RX_PKT_END_INFO0_ERR_CCK_RESTART BIT(7) + +#define RX_LOCATION_INFO_RTT_CORR_VAL_MASK 0x0001ffff +#define RX_LOCATION_INFO_RTT_CORR_VAL_LSB 0 +#define RX_LOCATION_INFO_FAC_STATUS_MASK 0x000c0000 +#define RX_LOCATION_INFO_FAC_STATUS_LSB 18 +#define RX_LOCATION_INFO_PKT_BW_MASK 0x00700000 +#define RX_LOCATION_INFO_PKT_BW_LSB 20 +#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_MASK 0x01800000 +#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_LSB 23 +#define RX_LOCATION_INFO_CIR_STATUS BIT(17) +#define RX_LOCATION_INFO_RTT_MAC_PHY_PHASE BIT(25) +#define RX_LOCATION_INFO_RTT_TX_DATA_START_X BIT(26) +#define RX_LOCATION_INFO_HW_IFFT_MODE BIT(30) +#define RX_LOCATION_INFO_RX_LOCATION_VALID BIT(31) + +struct rx_pkt_end { + __le32 info0; /* %RX_PKT_END_INFO0_ */ + __le32 phy_timestamp_1; + __le32 phy_timestamp_2; + __le32 rx_location_info; /* %RX_LOCATION_INFO_ */ +} __packed; + +enum rx_phy_ppdu_end_info0 { + RX_PHY_PPDU_END_INFO0_ERR_RADAR = BIT(2), + RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT = BIT(3), + RX_PHY_PPDU_END_INFO0_ERR_RX_NAP = BIT(4), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_TIMING = BIT(5), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_PARITY = BIT(6), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_RATE = BIT(7), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_LENGTH = BIT(8), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_RESTART = BIT(9), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_SERVICE = BIT(10), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_POWER_DROP = BIT(11), + RX_PHY_PPDU_END_INFO0_ERR_CCK_BLOCKER = BIT(12), + RX_PHY_PPDU_END_INFO0_ERR_CCK_TIMING = BIT(13), + RX_PHY_PPDU_END_INFO0_ERR_CCK_HEADER_CRC = BIT(14), + RX_PHY_PPDU_END_INFO0_ERR_CCK_RATE = BIT(15), + RX_PHY_PPDU_END_INFO0_ERR_CCK_LENGTH = BIT(16), + RX_PHY_PPDU_END_INFO0_ERR_CCK_RESTART = BIT(17), + RX_PHY_PPDU_END_INFO0_ERR_CCK_SERVICE = BIT(18), + RX_PHY_PPDU_END_INFO0_ERR_CCK_POWER_DROP = BIT(19), + RX_PHY_PPDU_END_INFO0_ERR_HT_CRC = BIT(20), + RX_PHY_PPDU_END_INFO0_ERR_HT_LENGTH = BIT(21), + RX_PHY_PPDU_END_INFO0_ERR_HT_RATE = BIT(22), + RX_PHY_PPDU_END_INFO0_ERR_HT_ZLF = BIT(23), + RX_PHY_PPDU_END_INFO0_ERR_FALSE_RADAR_EXT = BIT(24), + RX_PHY_PPDU_END_INFO0_ERR_GREEN_FIELD = BIT(25), + RX_PHY_PPDU_END_INFO0_ERR_SPECTRAL_SCAN = BIT(26), + RX_PHY_PPDU_END_INFO0_ERR_RX_DYN_BW = BIT(27), + RX_PHY_PPDU_END_INFO0_ERR_LEG_HT_MISMATCH = BIT(28), + RX_PHY_PPDU_END_INFO0_ERR_VHT_CRC = BIT(29), + RX_PHY_PPDU_END_INFO0_ERR_VHT_SIGA = BIT(30), + RX_PHY_PPDU_END_INFO0_ERR_VHT_LSIG = BIT(31), +}; + +enum rx_phy_ppdu_end_info1 { + RX_PHY_PPDU_END_INFO1_ERR_VHT_NDP = BIT(0), + RX_PHY_PPDU_END_INFO1_ERR_VHT_NSYM = BIT(1), + RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_EXT_SYM = BIT(2), + RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID0 = BIT(3), + RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID1_62 = BIT(4), + RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID63 = BIT(5), + RX_PHY_PPDU_END_INFO1_ERR_OFDM_LDPC_DECODER = BIT(6), + RX_PHY_PPDU_END_INFO1_ERR_DEFER_NAP = BIT(7), + RX_PHY_PPDU_END_INFO1_ERR_FDOMAIN_TIMEOUT = BIT(8), + RX_PHY_PPDU_END_INFO1_ERR_LSIG_REL_CHECK = BIT(9), + RX_PHY_PPDU_END_INFO1_ERR_BT_COLLISION = BIT(10), + RX_PHY_PPDU_END_INFO1_ERR_MU_FEEDBACK = BIT(11), + RX_PHY_PPDU_END_INFO1_ERR_TX_INTERRUPT_RX = BIT(12), + RX_PHY_PPDU_END_INFO1_ERR_RX_CBF = BIT(13), +}; + +struct rx_phy_ppdu_end { + __le32 info0; /* %RX_PHY_PPDU_END_INFO0_ */ + __le32 info1; /* %RX_PHY_PPDU_END_INFO1_ */ +} __packed; + +#define RX_PPDU_END_RX_TIMING_OFFSET_MASK 0x00000fff +#define RX_PPDU_END_RX_TIMING_OFFSET_LSB 0 + +#define RX_PPDU_END_RX_INFO_RX_ANTENNA_MASK 0x00ffffff +#define RX_PPDU_END_RX_INFO_RX_ANTENNA_LSB 0 +#define RX_PPDU_END_RX_INFO_TX_HT_VHT_ACK BIT(24) +#define RX_PPDU_END_RX_INFO_RX_PKT_END_VALID BIT(25) +#define RX_PPDU_END_RX_INFO_RX_PHY_PPDU_END_VALID BIT(26) +#define RX_PPDU_END_RX_INFO_RX_TIMING_OFFSET_VALID BIT(27) +#define RX_PPDU_END_RX_INFO_BB_CAPTURED_CHANNEL BIT(28) +#define RX_PPDU_END_RX_INFO_UNSUPPORTED_MU_NC BIT(29) +#define RX_PPDU_END_RX_INFO_OTP_TXBF_DISABLE BIT(30) + +struct rx_ppdu_end_qca99x0 { + struct rx_pkt_end rx_pkt_end; + struct rx_phy_ppdu_end rx_phy_ppdu_end; + __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */ + __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */ + __le16 bb_length; + __le16 info1; /* %RX_PPDU_END_INFO1_ */ +} __packed; + struct rx_ppdu_end { struct rx_ppdu_end_common common; union { struct rx_ppdu_end_qca988x qca988x; struct rx_ppdu_end_qca6174 qca6174; + struct rx_ppdu_end_qca99x0 qca99x0; } __packed; } __packed; diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c index 8dcd424aa..4671cfbcd 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.c +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -57,7 +57,7 @@ static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len, } int ath10k_spectral_process_fft(struct ath10k *ar, - const struct wmi_phyerr *phyerr, + struct wmi_phyerr_ev_arg *phyerr, const struct phyerr_fft_report *fftr, size_t bin_len, u64 tsf) { @@ -73,6 +73,15 @@ int ath10k_spectral_process_fft(struct ath10k *ar, if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS) return -EINVAL; + /* qca99x0 reports bin size as 68 bytes (64 bytes + 4 bytes) in + * report mode 2. First 64 bytes carries inband tones (-32 to +31) + * and last 4 byte carries band edge detection data (+32) mainly + * used in radar detection purpose. Strip last 4 byte to make bin + * size is valid one. + */ + if (bin_len == 68) + bin_len -= 4; + reg0 = __le32_to_cpu(fftr->reg0); reg1 = __le32_to_cpu(fftr->reg1); @@ -118,15 +127,14 @@ int ath10k_spectral_process_fft(struct ath10k *ar, fft_sample->total_gain_db = __cpu_to_be16(total_gain_db); fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db); - freq1 = __le16_to_cpu(phyerr->freq1); - freq2 = __le16_to_cpu(phyerr->freq2); + freq1 = phyerr->freq1; + freq2 = phyerr->freq2; fft_sample->freq1 = __cpu_to_be16(freq1); fft_sample->freq2 = __cpu_to_be16(freq2); chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX); - fft_sample->noise = __cpu_to_be16( - __le16_to_cpu(phyerr->nf_chains[chain_idx])); + fft_sample->noise = __cpu_to_be16(phyerr->nf_chains[chain_idx]); bins = (u8 *)fftr; bins += sizeof(*fftr); diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h index 042f5b302..89b0ad769 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.h +++ b/drivers/net/wireless/ath/ath10k/spectral.h @@ -47,7 +47,7 @@ enum ath10k_spectral_mode { #ifdef CONFIG_ATH10K_DEBUGFS int ath10k_spectral_process_fft(struct ath10k *ar, - const struct wmi_phyerr *phyerr, + struct wmi_phyerr_ev_arg *phyerr, const struct phyerr_fft_report *fftr, size_t bin_len, u64 tsf); int ath10k_spectral_start(struct ath10k *ar); @@ -59,7 +59,7 @@ void ath10k_spectral_destroy(struct ath10k *ar); static inline int ath10k_spectral_process_fft(struct ath10k *ar, - const struct wmi_phyerr *phyerr, + struct wmi_phyerr_ev_arg *phyerr, const struct phyerr_fft_report *fftr, size_t bin_len, u64 tsf) { diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c new file mode 100644 index 000000000..3ca3fae40 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/swap.c @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2015 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* This file has implementation for code swap logic. With code swap feature, + * target can run the fw binary with even smaller IRAM size by using host + * memory to store some of the code segments. + */ + +#include "core.h" +#include "bmi.h" +#include "debug.h" + +static int ath10k_swap_code_seg_fill(struct ath10k *ar, + struct ath10k_swap_code_seg_info *seg_info, + const void *data, size_t data_len) +{ + u8 *virt_addr = seg_info->virt_address[0]; + u8 swap_magic[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ] = {}; + const u8 *fw_data = data; + union ath10k_swap_code_seg_item *swap_item; + u32 length = 0; + u32 payload_len; + u32 total_payload_len = 0; + u32 size_left = data_len; + + /* Parse swap bin and copy the content to host allocated memory. + * The format is Address, length and value. The last 4-bytes is + * target write address. Currently address field is not used. + */ + seg_info->target_addr = -1; + while (size_left >= sizeof(*swap_item)) { + swap_item = (union ath10k_swap_code_seg_item *)fw_data; + payload_len = __le32_to_cpu(swap_item->tlv.length); + if ((payload_len > size_left) || + (payload_len == 0 && + size_left != sizeof(struct ath10k_swap_code_seg_tail))) { + ath10k_err(ar, "refusing to parse invalid tlv length %d\n", + payload_len); + return -EINVAL; + } + + if (payload_len == 0) { + if (memcmp(swap_item->tail.magic_signature, swap_magic, + ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ)) { + ath10k_err(ar, "refusing an invalid swap file\n"); + return -EINVAL; + } + seg_info->target_addr = + __le32_to_cpu(swap_item->tail.bmi_write_addr); + break; + } + + memcpy(virt_addr, swap_item->tlv.data, payload_len); + virt_addr += payload_len; + length = payload_len + sizeof(struct ath10k_swap_code_seg_tlv); + size_left -= length; + fw_data += length; + total_payload_len += payload_len; + } + + if (seg_info->target_addr == -1) { + ath10k_err(ar, "failed to parse invalid swap file\n"); + return -EINVAL; + } + seg_info->seg_hw_info.swap_size = __cpu_to_le32(total_payload_len); + + return 0; +} + +static void +ath10k_swap_code_seg_free(struct ath10k *ar, + struct ath10k_swap_code_seg_info *seg_info) +{ + u32 seg_size; + + if (!seg_info) + return; + + if (!seg_info->virt_address[0]) + return; + + seg_size = __le32_to_cpu(seg_info->seg_hw_info.size); + dma_free_coherent(ar->dev, seg_size, seg_info->virt_address[0], + seg_info->paddr[0]); +} + +static struct ath10k_swap_code_seg_info * +ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len) +{ + struct ath10k_swap_code_seg_info *seg_info; + void *virt_addr; + dma_addr_t paddr; + + swap_bin_len = roundup(swap_bin_len, 2); + if (swap_bin_len > ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX) { + ath10k_err(ar, "refusing code swap bin because it is too big %zu > %d\n", + swap_bin_len, ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX); + return NULL; + } + + seg_info = devm_kzalloc(ar->dev, sizeof(*seg_info), GFP_KERNEL); + if (!seg_info) + return NULL; + + virt_addr = dma_alloc_coherent(ar->dev, swap_bin_len, &paddr, + GFP_KERNEL); + if (!virt_addr) { + ath10k_err(ar, "failed to allocate dma coherent memory\n"); + return NULL; + } + + seg_info->seg_hw_info.bus_addr[0] = __cpu_to_le32(paddr); + seg_info->seg_hw_info.size = __cpu_to_le32(swap_bin_len); + seg_info->seg_hw_info.swap_size = __cpu_to_le32(swap_bin_len); + seg_info->seg_hw_info.num_segs = + __cpu_to_le32(ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED); + seg_info->seg_hw_info.size_log2 = __cpu_to_le32(ilog2(swap_bin_len)); + seg_info->virt_address[0] = virt_addr; + seg_info->paddr[0] = paddr; + + return seg_info; +} + +int ath10k_swap_code_seg_configure(struct ath10k *ar, + enum ath10k_swap_code_seg_bin_type type) +{ + int ret; + struct ath10k_swap_code_seg_info *seg_info = NULL; + + switch (type) { + case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW: + if (!ar->swap.firmware_swap_code_seg_info) + return 0; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n"); + seg_info = ar->swap.firmware_swap_code_seg_info; + break; + default: + case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP: + case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF: + ath10k_warn(ar, "ignoring unknown code swap binary type %d\n", + type); + return 0; + } + + ret = ath10k_bmi_write_memory(ar, seg_info->target_addr, + &seg_info->seg_hw_info, + sizeof(seg_info->seg_hw_info)); + if (ret) { + ath10k_err(ar, "failed to write Code swap segment information (%d)\n", + ret); + return ret; + } + + return 0; +} + +void ath10k_swap_code_seg_release(struct ath10k *ar) +{ + ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info); + ar->swap.firmware_codeswap_data = NULL; + ar->swap.firmware_codeswap_len = 0; + ar->swap.firmware_swap_code_seg_info = NULL; +} + +int ath10k_swap_code_seg_init(struct ath10k *ar) +{ + int ret; + struct ath10k_swap_code_seg_info *seg_info; + + if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data) + return 0; + + seg_info = ath10k_swap_code_seg_alloc(ar, + ar->swap.firmware_codeswap_len); + if (!seg_info) { + ath10k_err(ar, "failed to allocate fw code swap segment\n"); + return -ENOMEM; + } + + ret = ath10k_swap_code_seg_fill(ar, seg_info, + ar->swap.firmware_codeswap_data, + ar->swap.firmware_codeswap_len); + + if (ret) { + ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n", + ret); + ath10k_swap_code_seg_free(ar, seg_info); + return ret; + } + + ar->swap.firmware_swap_code_seg_info = seg_info; + + return 0; +} diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h new file mode 100644 index 000000000..5c89952dd --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/swap.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2015 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SWAP_H_ +#define _SWAP_H_ + +#define ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX (512 * 1024) +#define ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ 12 +#define ATH10K_SWAP_CODE_SEG_NUM_MAX 16 +/* Currently only one swap segment is supported */ +#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED 1 + +struct ath10k_swap_code_seg_tlv { + __le32 address; + __le32 length; + u8 data[0]; +} __packed; + +struct ath10k_swap_code_seg_tail { + u8 magic_signature[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ]; + __le32 bmi_write_addr; +} __packed; + +union ath10k_swap_code_seg_item { + struct ath10k_swap_code_seg_tlv tlv; + struct ath10k_swap_code_seg_tail tail; +} __packed; + +enum ath10k_swap_code_seg_bin_type { + ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP, + ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW, + ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF, +}; + +struct ath10k_swap_code_seg_hw_info { + /* Swap binary image size */ + __le32 swap_size; + __le32 num_segs; + + /* Swap data size */ + __le32 size; + __le32 size_log2; + __le32 bus_addr[ATH10K_SWAP_CODE_SEG_NUM_MAX]; + __le64 reserved[ATH10K_SWAP_CODE_SEG_NUM_MAX]; +} __packed; + +struct ath10k_swap_code_seg_info { + struct ath10k_swap_code_seg_hw_info seg_hw_info; + void *virt_address[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED]; + u32 target_addr; + dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED]; +}; + +int ath10k_swap_code_seg_configure(struct ath10k *ar, + enum ath10k_swap_code_seg_bin_type type); +void ath10k_swap_code_seg_release(struct ath10k *ar); +int ath10k_swap_code_seg_init(struct ath10k *ar); + +#endif diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h index a417aae52..768bef629 100644 --- a/drivers/net/wireless/ath/ath10k/targaddrs.h +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h @@ -450,4 +450,7 @@ Fw Mode/SubMode Mask #define QCA6174_BOARD_DATA_SZ 8192 #define QCA6174_BOARD_EXT_DATA_SZ 0 +#define QCA99X0_BOARD_DATA_SZ 12288 +#define QCA99X0_BOARD_EXT_DATA_SZ 0 + #endif /* __TARGADDRS_H__ */ diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 826500bb2..e4a9c4c8d 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -53,8 +53,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct ath10k_skb_cb *skb_cb; struct sk_buff *msdu; - lockdep_assert_held(&htt->tx_lock); - ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d success %d\n", tx_done->msdu_id, !!tx_done->discard, @@ -66,12 +64,19 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, return; } + spin_lock_bh(&htt->tx_lock); msdu = idr_find(&htt->pending_tx, tx_done->msdu_id); if (!msdu) { ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n", tx_done->msdu_id); + spin_unlock_bh(&htt->tx_lock); return; } + ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); + __ath10k_htt_tx_dec_pending(htt); + if (htt->num_pending_tx == 0) + wake_up(&htt->empty_tx_wq); + spin_unlock_bh(&htt->tx_lock); skb_cb = ATH10K_SKB_CB(msdu); @@ -90,7 +95,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, if (tx_done->discard) { ieee80211_free_txskb(htt->ar->hw, msdu); - goto exit; + return; } if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) @@ -104,12 +109,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, ieee80211_tx_status(htt->ar->hw, msdu); /* we do not own the msdu anymore */ - -exit: - ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); - __ath10k_htt_tx_dec_pending(htt); - if (htt->num_pending_tx == 0) - wake_up(&htt->empty_tx_wq); } struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, @@ -147,9 +146,9 @@ struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id) static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id, const u8 *addr, bool expect_mapped) { - int ret; + long time_left; - ret = wait_event_timeout(ar->peer_mapping_wq, ({ + time_left = wait_event_timeout(ar->peer_mapping_wq, ({ bool mapped; spin_lock_bh(&ar->data_lock); @@ -160,7 +159,7 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id, test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)); }), 3*HZ); - if (ret <= 0) + if (time_left == 0) return -ETIMEDOUT; return 0; diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index 47fe2e756..248ffc3d6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -37,8 +37,10 @@ struct wmi_ops { struct wmi_peer_kick_ev_arg *arg); int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb, struct wmi_swba_ev_arg *arg); - int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb, - struct wmi_phyerr_ev_arg *arg); + int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_phyerr_hdr_arg *arg); + int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf, + int left_len, struct wmi_phyerr_ev_arg *arg); int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb, struct wmi_svc_rdy_ev_arg *arg); int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb, @@ -49,6 +51,7 @@ struct wmi_ops { struct wmi_roam_ev_arg *arg); int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb, struct wmi_wow_ev_arg *arg); + enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar); struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar); @@ -260,13 +263,23 @@ ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb, } static inline int -ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb, - struct wmi_phyerr_ev_arg *arg) +ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb, + struct wmi_phyerr_hdr_arg *arg) +{ + if (!ar->wmi.ops->pull_phyerr_hdr) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf, + int left_len, struct wmi_phyerr_ev_arg *arg) { if (!ar->wmi.ops->pull_phyerr) return -EOPNOTSUPP; - return ar->wmi.ops->pull_phyerr(ar, skb, arg); + return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg); } static inline int @@ -319,6 +332,15 @@ ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb, return ar->wmi.ops->pull_wow_event(ar, skb, arg); } +static inline enum wmi_txbf_conf +ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) +{ + if (!ar->wmi.ops->get_txbf_conf_scheme) + return WMI_TXBF_CONF_UNSUPPORTED; + + return ar->wmi.ops->get_txbf_conf_scheme(ar); +} + static inline int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) { diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 6f477e830..b5849b3fd 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -519,7 +519,7 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) break; case WMI_TLV_SERVICE_READY_EVENTID: ath10k_wmi_event_service_ready(ar, skb); - break; + return; case WMI_TLV_READY_EVENTID: ath10k_wmi_event_ready(ar, skb); break; @@ -731,6 +731,8 @@ static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_swba_parse *swba = data; + struct wmi_tim_info_arg *tim_info_arg; + const struct wmi_tim_info *tim_info_ev = ptr; if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO) return -EPROTO; @@ -738,7 +740,21 @@ static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len, if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info)) return -ENOBUFS; - swba->arg->tim_info[swba->n_tim++] = ptr; + if (__le32_to_cpu(tim_info_ev->tim_len) > + sizeof(tim_info_ev->tim_bitmap)) { + ath10k_warn(ar, "refusing to parse invalid swba structure\n"); + return -EPROTO; + } + + tim_info_arg = &swba->arg->tim_info[swba->n_tim]; + tim_info_arg->tim_len = tim_info_ev->tim_len; + tim_info_arg->tim_mcast = tim_info_ev->tim_mcast; + tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap; + tim_info_arg->tim_changed = tim_info_ev->tim_changed; + tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending; + + swba->n_tim++; + return 0; } @@ -822,9 +838,9 @@ static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar, return 0; } -static int ath10k_wmi_tlv_op_pull_phyerr_ev(struct ath10k *ar, - struct sk_buff *skb, - struct wmi_phyerr_ev_arg *arg) +static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_phyerr_hdr_arg *arg) { const void **tb; const struct wmi_tlv_phyerr_ev *ev; @@ -846,10 +862,10 @@ static int ath10k_wmi_tlv_op_pull_phyerr_ev(struct ath10k *ar, return -EPROTO; } - arg->num_phyerrs = ev->num_phyerrs; - arg->tsf_l32 = ev->tsf_l32; - arg->tsf_u32 = ev->tsf_u32; - arg->buf_len = ev->buf_len; + arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs); + arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32); + arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32); + arg->buf_len = __le32_to_cpu(ev->buf_len); arg->phyerrs = phyerrs; kfree(tb); @@ -1263,6 +1279,11 @@ ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar, return skb; } +static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar) +{ + return WMI_TXBF_CONF_AFTER_ASSOC; +} + static struct sk_buff * ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id, u32 param_value) @@ -1357,7 +1378,7 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64); cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64); cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28); - cfg->rx_decap_mode = __cpu_to_le32(1); + cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); cfg->scan_max_pending_reqs = __cpu_to_le32(4); cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); @@ -3173,6 +3194,38 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = { .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID, .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID, .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID, + .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, + .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, + .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .oem_req_cmdid = WMI_CMD_UNSUPPORTED, + .nan_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, + .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, + .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, + .fwtest_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, }; static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = { @@ -3226,6 +3279,48 @@ static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = { .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR, .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE, .cal_period = WMI_PDEV_PARAM_UNSUPPORTED, + .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, + .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, + .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, + .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, + .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, + .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, + .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, + .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, + .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED, + .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, }; static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = { @@ -3284,6 +3379,22 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = { .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE, .ap_detect_out_of_sync_sleeping_sta_time_secs = WMI_TLV_VDEV_PARAM_UNSUPPORTED, + .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, + .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, + .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, + .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, + .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, + .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, + .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, + .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, + .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, }; static const struct wmi_ops wmi_tlv_ops = { @@ -3296,12 +3407,14 @@ static const struct wmi_ops wmi_tlv_ops = { .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev, .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev, .pull_swba = ath10k_wmi_tlv_op_pull_swba_ev, - .pull_phyerr = ath10k_wmi_tlv_op_pull_phyerr_ev, + .pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr, + .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev, .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev, .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats, .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev, .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev, + .get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme, .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend, .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume, diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 8dd84c160..ce01107ef 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -148,6 +148,48 @@ static struct wmi_cmd_map wmi_cmd_map = { .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID, .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID, .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED, + .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, + .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, + .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .oem_req_cmdid = WMI_CMD_UNSUPPORTED, + .nan_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, + .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, + .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, + .fwtest_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED, + .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED, + .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED, }; /* 10.X WMI cmd track */ @@ -271,6 +313,48 @@ static struct wmi_cmd_map wmi_10x_cmd_map = { .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID, .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID, .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED, + .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, + .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, + .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .oem_req_cmdid = WMI_CMD_UNSUPPORTED, + .nan_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, + .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, + .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, + .fwtest_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED, + .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED, + .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED, }; /* 10.2.4 WMI cmd track */ @@ -393,6 +477,231 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = { .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID, .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID, .pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID, + .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, + .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, + .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .oem_req_cmdid = WMI_CMD_UNSUPPORTED, + .nan_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, + .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, + .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, + .fwtest_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED, + .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED, + .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED, +}; + +/* 10.4 WMI cmd track */ +static struct wmi_cmd_map wmi_10_4_cmd_map = { + .init_cmdid = WMI_10_4_INIT_CMDID, + .start_scan_cmdid = WMI_10_4_START_SCAN_CMDID, + .stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID, + .scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID, + .scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID, + .pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID, + .pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID, + .pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID, + .pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID, + .pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID, + .pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID, + .pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID, + .pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID, + .pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID, + .pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID, + .pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID, + .pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID, + .pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID, + .vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID, + .vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID, + .vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID, + .vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID, + .vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID, + .vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID, + .vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID, + .vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID, + .vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID, + .peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID, + .peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID, + .peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID, + .peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID, + .peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID, + .peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID, + .peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID, + .peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID, + .bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID, + .pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID, + .bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID, + .bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID, + .prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID, + .mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID, + .prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID, + .addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID, + .addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID, + .addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID, + .delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID, + .addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID, + .send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID, + .sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID, + .sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID, + .sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID, + .pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID, + .pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID, + .roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE, + .roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD, + .roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD, + .roam_scan_rssi_change_threshold = + WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + .roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE, + .ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE, + .ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE, + .ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD, + .p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO, + .p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY, + .p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE, + .p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE, + .p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID, + .ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID, + .ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID, + .peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID, + .wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID, + .wlan_profile_set_hist_intvl_cmdid = + WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + .wlan_profile_get_profile_data_cmdid = + WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + .wlan_profile_enable_profile_id_cmdid = + WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + .wlan_profile_list_profile_id_cmdid = + WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + .pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID, + .pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID, + .add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID, + .rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID, + .wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID, + .wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID, + .wow_enable_disable_wake_event_cmdid = + WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + .wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID, + .wow_hostwakeup_from_sleep_cmdid = + WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + .rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID, + .rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID, + .vdev_spectral_scan_configure_cmdid = + WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + .vdev_spectral_scan_enable_cmdid = + WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + .request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID, + .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED, + .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED, + .gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID, + .csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID, + .csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID, + .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED, + .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED, + .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED, + .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED, + .echo_cmdid = WMI_10_4_ECHO_CMDID, + .pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID, + .dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID, + .pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID, + .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID, + .vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID, + .force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID, + .gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID, + .gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID, + .pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID, + .vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED, + .tdls_set_state_cmdid = WMI_CMD_UNSUPPORTED, + .tdls_peer_update_cmdid = WMI_CMD_UNSUPPORTED, + .adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED, + .scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID, + .vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID, + .vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID, + .wlan_peer_caching_add_peer_cmdid = + WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID, + .wlan_peer_caching_evict_peer_cmdid = + WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID, + .wlan_peer_caching_restore_peer_cmdid = + WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID, + .wlan_peer_caching_print_all_peers_info_cmdid = + WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID, + .peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID, + .peer_add_proxy_sta_entry_cmdid = + WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID, + .rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID, + .oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID, + .nan_cmdid = WMI_10_4_NAN_CMDID, + .vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID, + .qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID, + .pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID, + .pdev_smart_ant_set_rx_antenna_cmdid = + WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID, + .peer_smart_ant_set_tx_antenna_cmdid = + WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID, + .peer_smart_ant_set_train_info_cmdid = + WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID, + .peer_smart_ant_set_node_config_ops_cmdid = + WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID, + .pdev_set_antenna_switch_table_cmdid = + WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID, + .pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID, + .pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID, + .pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID, + .pdev_ratepwr_chainmsk_table_cmdid = + WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID, + .pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID, + .tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID, + .fwtest_cmdid = WMI_10_4_FWTEST_CMDID, + .vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID, + .peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID, + .pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID, + .pdev_get_ani_ofdm_config_cmdid = + WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID, + .pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID, + .pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID, + .pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID, + .pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID, + .vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID, + .pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID, + .vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID, + .vdev_filter_neighbor_rx_packets_cmdid = + WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID, + .mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID, + .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID, + .pdev_bss_chan_info_request_cmdid = + WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, }; /* MAIN WMI VDEV param map */ @@ -452,6 +761,22 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = { .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE, .ap_detect_out_of_sync_sleeping_sta_time_secs = WMI_VDEV_PARAM_UNSUPPORTED, + .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, + .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, + .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, + .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, + .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, + .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, + .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, + .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, + .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, }; /* 10.X WMI VDEV param map */ @@ -511,6 +836,22 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = { .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED, .ap_detect_out_of_sync_sleeping_sta_time_secs = WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, + .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, + .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, + .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, + .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, + .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, + .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, + .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, + .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, }; static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = { @@ -569,6 +910,97 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = { .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED, .ap_detect_out_of_sync_sleeping_sta_time_secs = WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, + .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, + .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, + .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, + .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, + .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, + .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, + .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, + .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, +}; + +static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = { + .rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD, + .fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + .beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL, + .listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL, + .multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE, + .mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE, + .slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME, + .preamble = WMI_10_4_VDEV_PARAM_PREAMBLE, + .swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME, + .wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD, + .wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME, + .wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL, + .dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD, + .wmi_vdev_oc_scheduler_air_time_limit = + WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + .wds = WMI_10_4_VDEV_PARAM_WDS, + .atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW, + .bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX, + .bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT, + .bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT, + .feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM, + .chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH, + .chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET, + .disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION, + .sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT, + .mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE, + .protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE, + .fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE, + .sgi = WMI_10_4_VDEV_PARAM_SGI, + .ldpc = WMI_10_4_VDEV_PARAM_LDPC, + .tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC, + .rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC, + .intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD, + .def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID, + .nss = WMI_10_4_VDEV_PARAM_NSS, + .bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE, + .mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE, + .mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE, + .dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE, + .unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + .ap_keepalive_min_idle_inactive_time_secs = + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_idle_inactive_time_secs = + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_unresponsive_time_secs = + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + .ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS, + .mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET, + .enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS, + .txbf = WMI_10_4_VDEV_PARAM_TXBF, + .packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE, + .drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY, + .tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE, + .ap_detect_out_of_sync_sleeping_sta_time_secs = + WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + .rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES, + .cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR, + .mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET, + .rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE, + .vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK, + .vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK, + .early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE, + .early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM, + .early_rx_bmiss_sample_cycle = + WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE, + .early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP, + .early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP, + .early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE, + .proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA, + .meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC, + .rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE, + .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK, }; static struct wmi_pdev_param_map wmi_pdev_param_map = { @@ -621,6 +1053,48 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = { .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED, .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED, .cal_period = WMI_PDEV_PARAM_UNSUPPORTED, + .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, + .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, + .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, + .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, + .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, + .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, + .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, + .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, + .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED, + .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, }; static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { @@ -674,6 +1148,48 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR, .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE, .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD, + .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, + .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, + .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, + .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, + .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, + .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, + .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, + .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, + .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED, + .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, }; static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = { @@ -727,6 +1243,48 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = { .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR, .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE, .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD, + .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, + .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, + .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, + .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, + .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, + .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, + .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, + .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, + .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED, + .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, }; /* firmware 10.2 specific mappings */ @@ -849,6 +1407,139 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = { .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID, .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID, .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED, + .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, + .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, + .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .oem_req_cmdid = WMI_CMD_UNSUPPORTED, + .nan_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, + .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, + .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, + .fwtest_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, +}; + +static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = { + .tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK, + .rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK, + .txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G, + .txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G, + .txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE, + .beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE, + .beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE, + .resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + .protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE, + .dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW, + .non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + .agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH, + .sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH, + .ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING, + .ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE, + .ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE, + .ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK, + .ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI, + .ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO, + .ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + .ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + .ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE, + .ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + .l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE, + .dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE, + .pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH, + .pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK, + .pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, + .pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, + .pdev_stats_update_period = + WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + .vdev_stats_update_period = + WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + .peer_stats_update_period = + WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + .bcnflt_stats_update_period = + WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + .pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS, + .arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE, + .dcs = WMI_10_4_PDEV_PARAM_DCS, + .ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE, + .ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD, + .ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD, + .ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL, + .ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL, + .dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN, + .proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA, + .idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG, + .power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP, + .fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET, + .burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR, + .burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE, + .cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD, + .aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST, + .rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE, + .smart_antenna_default_antenna = + WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA, + .igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE, + .igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID, + .antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN, + .rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER, + .set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID, + .proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE, + .set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE, + .set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER, + .remove_mcast2ucast_buffer = + WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER, + .peer_sta_ps_statechg_enable = + WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE, + .igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE, + .block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS, + .set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID, + .set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID, + .set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID, + .txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID, + .set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID, + .set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID, + .en_stats = WMI_10_4_PDEV_PARAM_EN_STATS, + .mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY, + .noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION, + .noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD, + .dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE, + .set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO, + .atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH, + .atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION, + .ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN, + .mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT, + .sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL, + .signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G, + .signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G, + .enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU, + .enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU, + .cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD, + .rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE, + .pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET, + .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET, + .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR, + .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR, }; void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch, @@ -1232,6 +1923,8 @@ ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type, return "completed [preempted]"; case WMI_SCAN_REASON_TIMEDOUT: return "completed [timedout]"; + case WMI_SCAN_REASON_INTERNAL_FAILURE: + return "completed [internal err]"; case WMI_SCAN_REASON_MAX: break; } @@ -1246,6 +1939,10 @@ ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type, return "preempted"; case WMI_SCAN_EVENT_START_FAILED: return "start failed"; + case WMI_SCAN_EVENT_RESTARTED: + return "restarted"; + case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT: + return "foreign channel exit"; default: return "unknown"; } @@ -1321,6 +2018,8 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) break; case WMI_SCAN_EVENT_DEQUEUED: case WMI_SCAN_EVENT_PREEMPTED: + case WMI_SCAN_EVENT_RESTARTED: + case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT: default: break; } @@ -1433,6 +2132,40 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb, return 0; } +static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_mgmt_rx_ev_arg *arg) +{ + struct wmi_10_4_mgmt_rx_event *ev; + struct wmi_10_4_mgmt_rx_hdr *ev_hdr; + size_t pull_len; + u32 msdu_len; + + ev = (struct wmi_10_4_mgmt_rx_event *)skb->data; + ev_hdr = &ev->hdr; + pull_len = sizeof(*ev); + + if (skb->len < pull_len) + return -EPROTO; + + skb_pull(skb, pull_len); + arg->channel = ev_hdr->channel; + arg->buf_len = ev_hdr->buf_len; + arg->status = ev_hdr->status; + arg->snr = ev_hdr->snr; + arg->phy_mode = ev_hdr->phy_mode; + arg->rate = ev_hdr->rate; + + msdu_len = __le32_to_cpu(arg->buf_len); + if (skb->len < msdu_len) + return -EPROTO; + + /* Make sure bytes added for padding are removed. */ + skb_trim(skb, msdu_len); + + return 0; +} + int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) { struct wmi_mgmt_rx_ev_arg arg = {}; @@ -1593,6 +2326,29 @@ static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb, return 0; } +static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_ch_info_ev_arg *arg) +{ + struct wmi_10_4_chan_info_event *ev = (void *)skb->data; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + skb_pull(skb, sizeof(*ev)); + arg->err_code = ev->err_code; + arg->freq = ev->freq; + arg->cmd_flags = ev->cmd_flags; + arg->noise_floor = ev->noise_floor; + arg->rx_clear_count = ev->rx_clear_count; + arg->cycle_count = ev->cycle_count; + arg->chan_tx_pwr_range = ev->chan_tx_pwr_range; + arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp; + arg->rx_frame_count = ev->rx_frame_count; + + return 0; +} + void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) { struct wmi_ch_info_ev_arg arg = {}; @@ -1656,8 +2412,10 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) ar->ch_info_can_report_survey = true; } - ar->survey_last_rx_clear_count = rx_clear_count; - ar->survey_last_cycle_count = cycle_count; + if (!(cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) { + ar->survey_last_rx_clear_count = rx_clear_count; + ar->survey_last_cycle_count = cycle_count; + } exit: spin_unlock_bh(&ar->data_lock); @@ -2149,33 +2907,42 @@ exit: static void ath10k_wmi_update_tim(struct ath10k *ar, struct ath10k_vif *arvif, struct sk_buff *bcn, - const struct wmi_tim_info *tim_info) + const struct wmi_tim_info_arg *tim_info) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data; struct ieee80211_tim_ie *tim; u8 *ies, *ie; u8 ie_len, pvm_len; __le32 t; - u32 v; + u32 v, tim_len; + + /* When FW reports 0 in tim_len, ensure atleast first byte + * in tim_bitmap is considered for pvm calculation. + */ + tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1; /* if next SWBA has no tim_changed the tim_bitmap is garbage. * we must copy the bitmap upon change and reuse it later */ if (__le32_to_cpu(tim_info->tim_changed)) { int i; - BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) != - sizeof(tim_info->tim_bitmap)); + if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) { + ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu", + tim_len, sizeof(arvif->u.ap.tim_bitmap)); + tim_len = sizeof(arvif->u.ap.tim_bitmap); + } - for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) { + for (i = 0; i < tim_len; i++) { t = tim_info->tim_bitmap[i / 4]; v = __le32_to_cpu(t); arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF; } - /* FW reports either length 0 or 16 - * so we calculate this on our own */ + /* FW reports either length 0 or length based on max supported + * station. so we calculate this on our own + */ arvif->u.ap.tim_len = 0; - for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) + for (i = 0; i < tim_len; i++) if (arvif->u.ap.tim_bitmap[i]) arvif->u.ap.tim_len = i; @@ -2199,7 +2966,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */ if (pvm_len < arvif->u.ap.tim_len) { - int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len; + int expand_size = tim_len - pvm_len; int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len); void *next_ie = ie + 2 + ie_len; @@ -2214,7 +2981,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, } } - if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) { + if (pvm_len > tim_len) { ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len); return; } @@ -2278,7 +3045,21 @@ static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb, if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info))) break; - arg->tim_info[i] = &ev->bcn_info[i].tim_info; + if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) > + sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) { + ath10k_warn(ar, "refusing to parse invalid swba structure\n"); + return -EPROTO; + } + + arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len; + arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast; + arg->tim_info[i].tim_bitmap = + ev->bcn_info[i].tim_info.tim_bitmap; + arg->tim_info[i].tim_changed = + ev->bcn_info[i].tim_info.tim_changed; + arg->tim_info[i].tim_num_ps_pending = + ev->bcn_info[i].tim_info.tim_num_ps_pending; + arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info; i++; } @@ -2286,12 +3067,74 @@ static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb, return 0; } +static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_swba_ev_arg *arg) +{ + struct wmi_10_4_host_swba_event *ev = (void *)skb->data; + u32 map, tim_len; + size_t i; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + skb_pull(skb, sizeof(*ev)); + arg->vdev_map = ev->vdev_map; + + for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) { + if (!(map & BIT(0))) + continue; + + /* If this happens there were some changes in firmware and + * ath10k should update the max size of tim_info array. + */ + if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info))) + break; + + if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) > + sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) { + ath10k_warn(ar, "refusing to parse invalid swba structure\n"); + return -EPROTO; + } + + tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len); + if (tim_len) { + /* Exclude 4 byte guard length */ + tim_len -= 4; + arg->tim_info[i].tim_len = __cpu_to_le32(tim_len); + } else { + arg->tim_info[i].tim_len = 0; + } + + arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast; + arg->tim_info[i].tim_bitmap = + ev->bcn_info[i].tim_info.tim_bitmap; + arg->tim_info[i].tim_changed = + ev->bcn_info[i].tim_info.tim_changed; + arg->tim_info[i].tim_num_ps_pending = + ev->bcn_info[i].tim_info.tim_num_ps_pending; + + /* 10.4 firmware doesn't have p2p support. notice of absence + * info can be ignored for now. + */ + + i++; + } + + return 0; +} + +static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar) +{ + return WMI_TXBF_CONF_BEFORE_ASSOC; +} + void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) { struct wmi_swba_ev_arg arg = {}; u32 map; int i = -1; - const struct wmi_tim_info *tim_info; + const struct wmi_tim_info_arg *tim_info; const struct wmi_p2p_noa_info *noa_info; struct ath10k_vif *arvif; struct sk_buff *bcn; @@ -2320,7 +3163,7 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) break; } - tim_info = arg.tim_info[i]; + tim_info = &arg.tim_info[i]; noa_info = arg.noa_info[i]; ath10k_dbg(ar, ATH10K_DBG_MGMT, @@ -2335,6 +3178,10 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) __le32_to_cpu(tim_info->tim_bitmap[1]), __le32_to_cpu(tim_info->tim_bitmap[0])); + /* TODO: Only first 4 word from tim_bitmap is dumped. + * Extend debug code to dump full tim_bitmap. + */ + arvif = ath10k_get_arvif(ar, vdev_id); if (arvif == NULL) { ath10k_warn(ar, "no vif for vdev_id %d found\n", @@ -2425,7 +3272,7 @@ void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb) } static void ath10k_dfs_radar_report(struct ath10k *ar, - const struct wmi_phyerr *phyerr, + struct wmi_phyerr_ev_arg *phyerr, const struct phyerr_radar_report *rr, u64 tsf) { @@ -2469,7 +3316,7 @@ static void ath10k_dfs_radar_report(struct ath10k *ar, } /* report event to DFS pattern detector */ - tsf32l = __le32_to_cpu(phyerr->tsf_timestamp); + tsf32l = phyerr->tsf_timestamp; tsf64 = tsf & (~0xFFFFFFFFULL); tsf64 |= tsf32l; @@ -2514,7 +3361,7 @@ radar_detected: } static int ath10k_dfs_fft_report(struct ath10k *ar, - const struct wmi_phyerr *phyerr, + struct wmi_phyerr_ev_arg *phyerr, const struct phyerr_fft_report *fftr, u64 tsf) { @@ -2552,7 +3399,7 @@ static int ath10k_dfs_fft_report(struct ath10k *ar, } void ath10k_wmi_event_dfs(struct ath10k *ar, - const struct wmi_phyerr *phyerr, + struct wmi_phyerr_ev_arg *phyerr, u64 tsf) { int buf_len, tlv_len, res, i = 0; @@ -2561,11 +3408,11 @@ void ath10k_wmi_event_dfs(struct ath10k *ar, const struct phyerr_fft_report *fftr; const u8 *tlv_buf; - buf_len = __le32_to_cpu(phyerr->buf_len); + buf_len = phyerr->buf_len; ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n", phyerr->phy_err_code, phyerr->rssi_combined, - __le32_to_cpu(phyerr->tsf_timestamp), tsf, buf_len); + phyerr->tsf_timestamp, tsf, buf_len); /* Skip event if DFS disabled */ if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) @@ -2617,7 +3464,7 @@ void ath10k_wmi_event_dfs(struct ath10k *ar, } void ath10k_wmi_event_spectral_scan(struct ath10k *ar, - const struct wmi_phyerr *phyerr, + struct wmi_phyerr_ev_arg *phyerr, u64 tsf) { int buf_len, tlv_len, res, i = 0; @@ -2626,7 +3473,7 @@ void ath10k_wmi_event_spectral_scan(struct ath10k *ar, const struct phyerr_fft_report *fftr; size_t fftr_len; - buf_len = __le32_to_cpu(phyerr->buf_len); + buf_len = phyerr->buf_len; while (i < buf_len) { if (i + sizeof(*tlv) > buf_len) { @@ -2659,7 +3506,7 @@ void ath10k_wmi_event_spectral_scan(struct ath10k *ar, fftr, fftr_len, tsf); if (res < 0) { - ath10k_warn(ar, "failed to process fft report: %d\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n", res); return; } @@ -2670,65 +3517,169 @@ void ath10k_wmi_event_spectral_scan(struct ath10k *ar, } } -static int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, struct sk_buff *skb, - struct wmi_phyerr_ev_arg *arg) +static int ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_phyerr_hdr_arg *arg) { struct wmi_phyerr_event *ev = (void *)skb->data; if (skb->len < sizeof(*ev)) return -EPROTO; - arg->num_phyerrs = ev->num_phyerrs; - arg->tsf_l32 = ev->tsf_l32; - arg->tsf_u32 = ev->tsf_u32; - arg->buf_len = __cpu_to_le32(skb->len - sizeof(*ev)); + arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs); + arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32); + arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32); + arg->buf_len = skb->len - sizeof(*ev); arg->phyerrs = ev->phyerrs; return 0; } +static int ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_phyerr_hdr_arg *arg) +{ + struct wmi_10_4_phyerr_event *ev = (void *)skb->data; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + /* 10.4 firmware always reports only one phyerr */ + arg->num_phyerrs = 1; + + arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32); + arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32); + arg->buf_len = skb->len; + arg->phyerrs = skb->data; + + return 0; +} + +int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, + const void *phyerr_buf, + int left_len, + struct wmi_phyerr_ev_arg *arg) +{ + const struct wmi_phyerr *phyerr = phyerr_buf; + int i; + + if (left_len < sizeof(*phyerr)) { + ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n", + left_len, sizeof(*phyerr)); + return -EINVAL; + } + + arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp); + arg->freq1 = __le16_to_cpu(phyerr->freq1); + arg->freq2 = __le16_to_cpu(phyerr->freq2); + arg->rssi_combined = phyerr->rssi_combined; + arg->chan_width_mhz = phyerr->chan_width_mhz; + arg->buf_len = __le32_to_cpu(phyerr->buf_len); + arg->buf = phyerr->buf; + arg->hdr_len = sizeof(*phyerr); + + for (i = 0; i < 4; i++) + arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]); + + switch (phyerr->phy_err_code) { + case PHY_ERROR_GEN_SPECTRAL_SCAN: + arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN; + break; + case PHY_ERROR_GEN_FALSE_RADAR_EXT: + arg->phy_err_code = PHY_ERROR_FALSE_RADAR_EXT; + break; + case PHY_ERROR_GEN_RADAR: + arg->phy_err_code = PHY_ERROR_RADAR; + break; + default: + arg->phy_err_code = PHY_ERROR_UNKNOWN; + break; + } + + return 0; +} + +static int ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k *ar, + const void *phyerr_buf, + int left_len, + struct wmi_phyerr_ev_arg *arg) +{ + const struct wmi_10_4_phyerr_event *phyerr = phyerr_buf; + u32 phy_err_mask; + int i; + + if (left_len < sizeof(*phyerr)) { + ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n", + left_len, sizeof(*phyerr)); + return -EINVAL; + } + + arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp); + arg->freq1 = __le16_to_cpu(phyerr->freq1); + arg->freq2 = __le16_to_cpu(phyerr->freq2); + arg->rssi_combined = phyerr->rssi_combined; + arg->chan_width_mhz = phyerr->chan_width_mhz; + arg->buf_len = __le32_to_cpu(phyerr->buf_len); + arg->buf = phyerr->buf; + arg->hdr_len = sizeof(*phyerr); + + for (i = 0; i < 4; i++) + arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]); + + phy_err_mask = __le32_to_cpu(phyerr->phy_err_mask[0]); + + if (phy_err_mask & PHY_ERROR_10_4_SPECTRAL_SCAN_MASK) + arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN; + else if (phy_err_mask & PHY_ERROR_10_4_RADAR_MASK) + arg->phy_err_code = PHY_ERROR_RADAR; + else + arg->phy_err_code = PHY_ERROR_UNKNOWN; + + return 0; +} + void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) { - struct wmi_phyerr_ev_arg arg = {}; - const struct wmi_phyerr *phyerr; + struct wmi_phyerr_hdr_arg hdr_arg = {}; + struct wmi_phyerr_ev_arg phyerr_arg = {}; + const void *phyerr; u32 count, i, buf_len, phy_err_code; u64 tsf; int left_len, ret; ATH10K_DFS_STAT_INC(ar, phy_errors); - ret = ath10k_wmi_pull_phyerr(ar, skb, &arg); + ret = ath10k_wmi_pull_phyerr_hdr(ar, skb, &hdr_arg); if (ret) { - ath10k_warn(ar, "failed to parse phyerr event: %d\n", ret); + ath10k_warn(ar, "failed to parse phyerr event hdr: %d\n", ret); return; } - left_len = __le32_to_cpu(arg.buf_len); - /* Check number of included events */ - count = __le32_to_cpu(arg.num_phyerrs); + count = hdr_arg.num_phyerrs; + + left_len = hdr_arg.buf_len; - tsf = __le32_to_cpu(arg.tsf_u32); + tsf = hdr_arg.tsf_u32; tsf <<= 32; - tsf |= __le32_to_cpu(arg.tsf_l32); + tsf |= hdr_arg.tsf_l32; ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event phyerr count %d tsf64 0x%llX\n", count, tsf); - phyerr = arg.phyerrs; + phyerr = hdr_arg.phyerrs; for (i = 0; i < count; i++) { - /* Check if we can read event header */ - if (left_len < sizeof(*phyerr)) { - ath10k_warn(ar, "single event (%d) wrong head len\n", + ret = ath10k_wmi_pull_phyerr(ar, phyerr, left_len, &phyerr_arg); + if (ret) { + ath10k_warn(ar, "failed to parse phyerr event (%d)\n", i); return; } - left_len -= sizeof(*phyerr); - - buf_len = __le32_to_cpu(phyerr->buf_len); - phy_err_code = phyerr->phy_err_code; + left_len -= phyerr_arg.hdr_len; + buf_len = phyerr_arg.buf_len; + phy_err_code = phyerr_arg.phy_err_code; if (left_len < buf_len) { ath10k_warn(ar, "single event (%d) wrong buf len\n", i); @@ -2739,20 +3690,20 @@ void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) switch (phy_err_code) { case PHY_ERROR_RADAR: - ath10k_wmi_event_dfs(ar, phyerr, tsf); + ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf); break; case PHY_ERROR_SPECTRAL_SCAN: - ath10k_wmi_event_spectral_scan(ar, phyerr, tsf); + ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf); break; case PHY_ERROR_FALSE_RADAR_EXT: - ath10k_wmi_event_dfs(ar, phyerr, tsf); - ath10k_wmi_event_spectral_scan(ar, phyerr, tsf); + ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf); + ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf); break; default: break; } - phyerr = (void *)phyerr + sizeof(*phyerr) + buf_len; + phyerr = phyerr + phyerr_arg.hdr_len + buf_len; } } @@ -2950,7 +3901,7 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, - GFP_ATOMIC); + GFP_KERNEL); if (!ar->wmi.mem_chunks[idx].vaddr) { ath10k_warn(ar, "failed to allocate memory chunk\n"); return -ENOMEM; @@ -3039,12 +3990,19 @@ ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb, return 0; } -void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb) +static void ath10k_wmi_event_service_ready_work(struct work_struct *work) { + struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work); + struct sk_buff *skb = ar->svc_rdy_skb; struct wmi_svc_rdy_ev_arg arg = {}; u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i; int ret; + if (!skb) { + ath10k_warn(ar, "invalid service ready event skb\n"); + return; + } + ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg); if (ret) { ath10k_warn(ar, "failed to parse service ready: %d\n", ret); @@ -3076,10 +4034,10 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb) if (ar->fw_api == 1 && ar->fw_version_build > 636) set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features); - if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { + if (ar->num_rf_chains > ar->max_spatial_stream) { ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n", - ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); - ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; + ar->num_rf_chains, ar->max_spatial_stream); + ar->num_rf_chains = ar->max_spatial_stream; } ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1; @@ -3102,20 +4060,39 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb) return; } + if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) { + ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX + + TARGET_10_4_NUM_VDEVS; + ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS + + TARGET_10_4_NUM_VDEVS; + ar->num_tids = ar->num_active_peers * 2; + ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX; + } + + /* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE + * and WMI_SERVICE_IRAM_TIDS, etc. + */ + for (i = 0; i < num_mem_reqs; ++i) { req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id); num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units); unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size); num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info); - if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) + if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) { + if (ar->num_active_peers) + num_units = ar->num_active_peers + 1; + else + num_units = ar->max_num_peers + 1; + } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) { /* number of units to allocate is number of * peers, 1 extra for self peer on target */ /* this needs to be tied, host and target * can get out of sync */ - num_units = TARGET_10X_NUM_PEERS + 1; - else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) - num_units = TARGET_10X_NUM_VDEVS + 1; + num_units = ar->max_num_peers + 1; + } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) { + num_units = ar->max_num_vdevs + 1; + } ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n", @@ -3145,9 +4122,17 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb) __le32_to_cpu(arg.eeprom_rd), __le32_to_cpu(arg.num_mem_reqs)); + dev_kfree_skb(skb); + ar->svc_rdy_skb = NULL; complete(&ar->wmi.service_ready); } +void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb) +{ + ar->svc_rdy_skb = skb; + queue_work(ar->workqueue_aux, &ar->svc_rdy_work); +} + static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_rdy_ev_arg *arg) { @@ -3319,7 +4304,7 @@ static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb) break; case WMI_SERVICE_READY_EVENTID: ath10k_wmi_event_service_ready(ar, skb); - break; + return; case WMI_READY_EVENTID: ath10k_wmi_event_ready(ar, skb); break; @@ -3440,7 +4425,7 @@ static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb) break; case WMI_10X_SERVICE_READY_EVENTID: ath10k_wmi_event_service_ready(ar, skb); - break; + return; case WMI_10X_READY_EVENTID: ath10k_wmi_event_ready(ar, skb); break; @@ -3551,7 +4536,7 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb) break; case WMI_10_2_SERVICE_READY_EVENTID: ath10k_wmi_event_service_ready(ar, skb); - break; + return; case WMI_10_2_READY_EVENTID: ath10k_wmi_event_ready(ar, skb); break; @@ -3577,6 +4562,76 @@ out: dev_kfree_skb(skb); } +static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_cmd_hdr *cmd_hdr; + enum wmi_10_4_event_id id; + + cmd_hdr = (struct wmi_cmd_hdr *)skb->data; + id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); + + if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr))) + goto out; + + trace_ath10k_wmi_event(ar, id, skb->data, skb->len); + + switch (id) { + case WMI_10_4_MGMT_RX_EVENTID: + ath10k_wmi_event_mgmt_rx(ar, skb); + /* mgmt_rx() owns the skb now! */ + return; + case WMI_10_4_ECHO_EVENTID: + ath10k_wmi_event_echo(ar, skb); + break; + case WMI_10_4_DEBUG_MESG_EVENTID: + ath10k_wmi_event_debug_mesg(ar, skb); + break; + case WMI_10_4_SERVICE_READY_EVENTID: + ath10k_wmi_event_service_ready(ar, skb); + return; + case WMI_10_4_SCAN_EVENTID: + ath10k_wmi_event_scan(ar, skb); + break; + case WMI_10_4_CHAN_INFO_EVENTID: + ath10k_wmi_event_chan_info(ar, skb); + break; + case WMI_10_4_PHYERR_EVENTID: + ath10k_wmi_event_phyerr(ar, skb); + break; + case WMI_10_4_READY_EVENTID: + ath10k_wmi_event_ready(ar, skb); + break; + case WMI_10_4_PEER_STA_KICKOUT_EVENTID: + ath10k_wmi_event_peer_sta_kickout(ar, skb); + break; + case WMI_10_4_HOST_SWBA_EVENTID: + ath10k_wmi_event_host_swba(ar, skb); + break; + case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID: + ath10k_wmi_event_tbttoffset_update(ar, skb); + break; + case WMI_10_4_DEBUG_PRINT_EVENTID: + ath10k_wmi_event_debug_print(ar, skb); + break; + case WMI_10_4_VDEV_START_RESP_EVENTID: + ath10k_wmi_event_vdev_start_resp(ar, skb); + break; + case WMI_10_4_VDEV_STOPPED_EVENTID: + ath10k_wmi_event_vdev_stopped(ar, skb); + break; + case WMI_10_4_WOW_WAKEUP_HOST_EVENTID: + ath10k_dbg(ar, ATH10K_DBG_WMI, + "received event id %d not implemented\n", id); + break; + default: + ath10k_warn(ar, "Unknown eventid: %d\n", id); + break; + } + +out: + dev_kfree_skb(skb); +} + static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) { int ret; @@ -3763,8 +4818,7 @@ static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar) config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI); - config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE); - + config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); config.scan_max_pending_reqs = __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS); @@ -3832,8 +4886,7 @@ static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar) config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI); - config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE); - + config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); config.scan_max_pending_reqs = __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS); @@ -3898,7 +4951,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI); - config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE); + config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); config.scan_max_pending_reqs = __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS); @@ -3951,6 +5004,88 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) return buf; } +static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar) +{ + struct wmi_init_cmd_10_4 *cmd; + struct sk_buff *buf; + struct wmi_resource_config_10_4 config = {}; + u32 len; + + config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs); + config.num_peers = __cpu_to_le32(ar->max_num_peers); + config.num_active_peers = __cpu_to_le32(ar->num_active_peers); + config.num_tids = __cpu_to_le32(ar->num_tids); + + config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS); + config.num_offload_reorder_buffs = + __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS); + config.num_peer_keys = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS); + config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT); + config.tx_chain_mask = __cpu_to_le32(TARGET_10_4_TX_CHAIN_MASK); + config.rx_chain_mask = __cpu_to_le32(TARGET_10_4_RX_CHAIN_MASK); + + config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI); + + config.rx_decap_mode = __cpu_to_le32(TARGET_10_4_RX_DECAP_MODE); + config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS); + config.bmiss_offload_max_vdev = + __cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV); + config.roam_offload_max_vdev = + __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV); + config.roam_offload_max_ap_profiles = + __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES); + config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS); + config.num_mcast_table_elems = + __cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS); + + config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE); + config.tx_dbg_log_size = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE); + config.num_wds_entries = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES); + config.dma_burst_size = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE); + config.mac_aggr_delim = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM); + + config.rx_skip_defrag_timeout_dup_detection_check = + __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK); + + config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG); + config.gtk_offload_max_vdev = + __cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV); + config.num_msdu_desc = __cpu_to_le32(TARGET_10_4_NUM_MSDU_DESC); + config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS); + config.max_peer_ext_stats = + __cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS); + config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP); + + config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE); + config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE); + config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE); + config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE); + + config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE); + config.tt_support = + __cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG); + config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG); + config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG); + config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG); + + len = sizeof(*cmd) + + (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); + + buf = ath10k_wmi_alloc_skb(ar, len); + if (!buf) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_init_cmd_10_4 *)buf->data; + memcpy(&cmd->resource_config, &config, sizeof(config)); + ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n"); + return buf; +} + int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg) { if (arg->ie_len && !arg->ie) @@ -4173,7 +5308,6 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar, | WMI_SCAN_EVENT_BSS_CHANNEL | WMI_SCAN_EVENT_FOREIGN_CHANNEL | WMI_SCAN_EVENT_DEQUEUED; - arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES; arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT; arg->n_bssids = 1; arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF"; @@ -5171,6 +6305,7 @@ static const struct wmi_ops wmi_ops = { .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, .pull_swba = ath10k_wmi_op_pull_swba_ev, + .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr, .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev, .pull_rdy = ath10k_wmi_op_pull_rdy_ev, @@ -5242,6 +6377,7 @@ static const struct wmi_ops wmi_10_1_ops = { .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, .pull_swba = ath10k_wmi_op_pull_swba_ev, + .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr, .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, .pull_rdy = ath10k_wmi_op_pull_rdy_ev, .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, @@ -5307,6 +6443,7 @@ static const struct wmi_ops wmi_10_2_ops = { .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, .pull_swba = ath10k_wmi_op_pull_swba_ev, + .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr, .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, .pull_rdy = ath10k_wmi_op_pull_rdy_ev, .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, @@ -5368,6 +6505,7 @@ static const struct wmi_ops wmi_10_2_4_ops = { .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, .pull_swba = ath10k_wmi_op_pull_swba_ev, + .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr, .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, .pull_rdy = ath10k_wmi_op_pull_rdy_ev, .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, @@ -5413,9 +6551,73 @@ static const struct wmi_ops wmi_10_2_4_ops = { /* .gen_adaptive_qcs not implemented */ }; +static const struct wmi_ops wmi_10_4_ops = { + .rx = ath10k_wmi_10_4_op_rx, + .map_svc = wmi_10_4_svc_map, + + .pull_scan = ath10k_wmi_op_pull_scan_ev, + .pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev, + .pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev, + .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, + .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, + .pull_swba = ath10k_wmi_10_4_op_pull_swba_ev, + .pull_phyerr_hdr = ath10k_wmi_10_4_op_pull_phyerr_ev_hdr, + .pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev, + .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev, + .pull_rdy = ath10k_wmi_op_pull_rdy_ev, + .get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme, + + .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, + .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, + .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd, + .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param, + .gen_init = ath10k_wmi_10_4_op_gen_init, + .gen_start_scan = ath10k_wmi_op_gen_start_scan, + .gen_stop_scan = ath10k_wmi_op_gen_stop_scan, + .gen_vdev_create = ath10k_wmi_op_gen_vdev_create, + .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete, + .gen_vdev_start = ath10k_wmi_op_gen_vdev_start, + .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop, + .gen_vdev_up = ath10k_wmi_op_gen_vdev_up, + .gen_vdev_down = ath10k_wmi_op_gen_vdev_down, + .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param, + .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, + .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf, + .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable, + .gen_peer_create = ath10k_wmi_op_gen_peer_create, + .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, + .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, + .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param, + .gen_set_psmode = ath10k_wmi_op_gen_set_psmode, + .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps, + .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps, + .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list, + .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma, + .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm, + .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang, + .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx, + .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg, + .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, + .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, + .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, + .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp, + .gen_addba_send = ath10k_wmi_op_gen_addba_send, + .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, + .gen_delba_send = ath10k_wmi_op_gen_delba_send, + + /* shared with 10.2 */ + .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc, +}; + int ath10k_wmi_attach(struct ath10k *ar) { switch (ar->wmi.op_version) { + case ATH10K_FW_WMI_OP_VERSION_10_4: + ar->wmi.ops = &wmi_10_4_ops; + ar->wmi.cmd = &wmi_10_4_cmd_map; + ar->wmi.vdev_param = &wmi_10_4_vdev_param_map; + ar->wmi.pdev_param = &wmi_10_4_pdev_param_map; + break; case ATH10K_FW_WMI_OP_VERSION_10_2_4: ar->wmi.cmd = &wmi_10_2_4_cmd_map; ar->wmi.ops = &wmi_10_2_4_ops; @@ -5453,6 +6655,8 @@ int ath10k_wmi_attach(struct ath10k *ar) init_completion(&ar->wmi.service_ready); init_completion(&ar->wmi.unified_ready); + INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work); + return 0; } @@ -5460,6 +6664,11 @@ void ath10k_wmi_detach(struct ath10k *ar) { int i; + cancel_work_sync(&ar->svc_rdy_work); + + if (ar->svc_rdy_skb) + dev_kfree_skb(ar->svc_rdy_skb); + /* free the host memory chunks requested by firmware */ for (i = 0; i < ar->wmi.num_mem_chunks; i++) { dma_free_coherent(ar->dev, diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index cf44a3d08..52d35032d 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -150,6 +150,12 @@ enum wmi_service { WMI_SERVICE_SAP_AUTH_OFFLOAD, WMI_SERVICE_ATF, WMI_SERVICE_COEX_GPIO, + WMI_SERVICE_ENHANCED_PROXY_STA, + WMI_SERVICE_TT, + WMI_SERVICE_PEER_CACHING, + WMI_SERVICE_AUX_SPECTRAL_INTF, + WMI_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_SERVICE_BSS_CHANNEL_INFO_64, /* keep last */ WMI_SERVICE_MAX, @@ -218,6 +224,51 @@ enum wmi_main_service { WMI_MAIN_SERVICE_TX_ENCAP, }; +enum wmi_10_4_service { + WMI_10_4_SERVICE_BEACON_OFFLOAD = 0, + WMI_10_4_SERVICE_SCAN_OFFLOAD, + WMI_10_4_SERVICE_ROAM_OFFLOAD, + WMI_10_4_SERVICE_BCN_MISS_OFFLOAD, + WMI_10_4_SERVICE_STA_PWRSAVE, + WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_10_4_SERVICE_AP_UAPSD, + WMI_10_4_SERVICE_AP_DFS, + WMI_10_4_SERVICE_11AC, + WMI_10_4_SERVICE_BLOCKACK, + WMI_10_4_SERVICE_PHYERR, + WMI_10_4_SERVICE_BCN_FILTER, + WMI_10_4_SERVICE_RTT, + WMI_10_4_SERVICE_RATECTRL, + WMI_10_4_SERVICE_WOW, + WMI_10_4_SERVICE_RATECTRL_CACHE, + WMI_10_4_SERVICE_IRAM_TIDS, + WMI_10_4_SERVICE_BURST, + WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_10_4_SERVICE_GTK_OFFLOAD, + WMI_10_4_SERVICE_SCAN_SCH, + WMI_10_4_SERVICE_CSA_OFFLOAD, + WMI_10_4_SERVICE_CHATTER, + WMI_10_4_SERVICE_COEX_FREQAVOID, + WMI_10_4_SERVICE_PACKET_POWER_SAVE, + WMI_10_4_SERVICE_FORCE_FW_HANG, + WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT, + WMI_10_4_SERVICE_GPIO, + WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, + WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, + WMI_10_4_SERVICE_STA_KEEP_ALIVE, + WMI_10_4_SERVICE_TX_ENCAP, + WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, + WMI_10_4_SERVICE_EARLY_RX, + WMI_10_4_SERVICE_ENHANCED_PROXY_STA, + WMI_10_4_SERVICE_TT, + WMI_10_4_SERVICE_ATF, + WMI_10_4_SERVICE_PEER_CACHING, + WMI_10_4_SERVICE_COEX_GPIO, + WMI_10_4_SERVICE_AUX_SPECTRAL_INTF, + WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64, +}; + static inline char *wmi_service_name(int service_id) { #define SVCSTR(x) case x: return #x @@ -299,6 +350,12 @@ static inline char *wmi_service_name(int service_id) SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD); SVCSTR(WMI_SERVICE_ATF); SVCSTR(WMI_SERVICE_COEX_GPIO); + SVCSTR(WMI_SERVICE_ENHANCED_PROXY_STA); + SVCSTR(WMI_SERVICE_TT); + SVCSTR(WMI_SERVICE_PEER_CACHING); + SVCSTR(WMI_SERVICE_AUX_SPECTRAL_INTF); + SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF); + SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64); default: return NULL; } @@ -437,6 +494,95 @@ static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_TX_ENCAP, len); } +static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out, + size_t len) +{ + SVCMAP(WMI_10_4_SERVICE_BEACON_OFFLOAD, + WMI_SERVICE_BEACON_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_SCAN_OFFLOAD, + WMI_SERVICE_SCAN_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_ROAM_OFFLOAD, + WMI_SERVICE_ROAM_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_BCN_MISS_OFFLOAD, + WMI_SERVICE_BCN_MISS_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_STA_PWRSAVE, + WMI_SERVICE_STA_PWRSAVE, len); + SVCMAP(WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_SERVICE_STA_ADVANCED_PWRSAVE, len); + SVCMAP(WMI_10_4_SERVICE_AP_UAPSD, + WMI_SERVICE_AP_UAPSD, len); + SVCMAP(WMI_10_4_SERVICE_AP_DFS, + WMI_SERVICE_AP_DFS, len); + SVCMAP(WMI_10_4_SERVICE_11AC, + WMI_SERVICE_11AC, len); + SVCMAP(WMI_10_4_SERVICE_BLOCKACK, + WMI_SERVICE_BLOCKACK, len); + SVCMAP(WMI_10_4_SERVICE_PHYERR, + WMI_SERVICE_PHYERR, len); + SVCMAP(WMI_10_4_SERVICE_BCN_FILTER, + WMI_SERVICE_BCN_FILTER, len); + SVCMAP(WMI_10_4_SERVICE_RTT, + WMI_SERVICE_RTT, len); + SVCMAP(WMI_10_4_SERVICE_RATECTRL, + WMI_SERVICE_RATECTRL, len); + SVCMAP(WMI_10_4_SERVICE_WOW, + WMI_SERVICE_WOW, len); + SVCMAP(WMI_10_4_SERVICE_RATECTRL_CACHE, + WMI_SERVICE_RATECTRL_CACHE, len); + SVCMAP(WMI_10_4_SERVICE_IRAM_TIDS, + WMI_SERVICE_IRAM_TIDS, len); + SVCMAP(WMI_10_4_SERVICE_BURST, + WMI_SERVICE_BURST, len); + SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_GTK_OFFLOAD, + WMI_SERVICE_GTK_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_SCAN_SCH, + WMI_SERVICE_SCAN_SCH, len); + SVCMAP(WMI_10_4_SERVICE_CSA_OFFLOAD, + WMI_SERVICE_CSA_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_CHATTER, + WMI_SERVICE_CHATTER, len); + SVCMAP(WMI_10_4_SERVICE_COEX_FREQAVOID, + WMI_SERVICE_COEX_FREQAVOID, len); + SVCMAP(WMI_10_4_SERVICE_PACKET_POWER_SAVE, + WMI_SERVICE_PACKET_POWER_SAVE, len); + SVCMAP(WMI_10_4_SERVICE_FORCE_FW_HANG, + WMI_SERVICE_FORCE_FW_HANG, len); + SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT, + WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_GPIO, + WMI_SERVICE_GPIO, len); + SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, + WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len); + SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, + WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len); + SVCMAP(WMI_10_4_SERVICE_STA_KEEP_ALIVE, + WMI_SERVICE_STA_KEEP_ALIVE, len); + SVCMAP(WMI_10_4_SERVICE_TX_ENCAP, + WMI_SERVICE_TX_ENCAP, len); + SVCMAP(WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, + WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len); + SVCMAP(WMI_10_4_SERVICE_EARLY_RX, + WMI_SERVICE_EARLY_RX, len); + SVCMAP(WMI_10_4_SERVICE_ENHANCED_PROXY_STA, + WMI_SERVICE_ENHANCED_PROXY_STA, len); + SVCMAP(WMI_10_4_SERVICE_TT, + WMI_SERVICE_TT, len); + SVCMAP(WMI_10_4_SERVICE_ATF, + WMI_SERVICE_ATF, len); + SVCMAP(WMI_10_4_SERVICE_PEER_CACHING, + WMI_SERVICE_PEER_CACHING, len); + SVCMAP(WMI_10_4_SERVICE_COEX_GPIO, + WMI_SERVICE_COEX_GPIO, len); + SVCMAP(WMI_10_4_SERVICE_AUX_SPECTRAL_INTF, + WMI_SERVICE_AUX_SPECTRAL_INTF, len); + SVCMAP(WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_SERVICE_AUX_CHAN_LOAD_INTF, len); + SVCMAP(WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64, + WMI_SERVICE_BSS_CHANNEL_INFO_64, len); +} + #undef SVCMAP /* 2 word representation of MAC addr */ @@ -565,6 +711,48 @@ struct wmi_cmd_map { u32 tdls_set_state_cmdid; u32 tdls_peer_update_cmdid; u32 adaptive_qcs_cmdid; + u32 scan_update_request_cmdid; + u32 vdev_standby_response_cmdid; + u32 vdev_resume_response_cmdid; + u32 wlan_peer_caching_add_peer_cmdid; + u32 wlan_peer_caching_evict_peer_cmdid; + u32 wlan_peer_caching_restore_peer_cmdid; + u32 wlan_peer_caching_print_all_peers_info_cmdid; + u32 peer_update_wds_entry_cmdid; + u32 peer_add_proxy_sta_entry_cmdid; + u32 rtt_keepalive_cmdid; + u32 oem_req_cmdid; + u32 nan_cmdid; + u32 vdev_ratemask_cmdid; + u32 qboost_cfg_cmdid; + u32 pdev_smart_ant_enable_cmdid; + u32 pdev_smart_ant_set_rx_antenna_cmdid; + u32 peer_smart_ant_set_tx_antenna_cmdid; + u32 peer_smart_ant_set_train_info_cmdid; + u32 peer_smart_ant_set_node_config_ops_cmdid; + u32 pdev_set_antenna_switch_table_cmdid; + u32 pdev_set_ctl_table_cmdid; + u32 pdev_set_mimogain_table_cmdid; + u32 pdev_ratepwr_table_cmdid; + u32 pdev_ratepwr_chainmsk_table_cmdid; + u32 pdev_fips_cmdid; + u32 tt_set_conf_cmdid; + u32 fwtest_cmdid; + u32 vdev_atf_request_cmdid; + u32 peer_atf_request_cmdid; + u32 pdev_get_ani_cck_config_cmdid; + u32 pdev_get_ani_ofdm_config_cmdid; + u32 pdev_reserve_ast_entry_cmdid; + u32 pdev_get_nfcal_power_cmdid; + u32 pdev_get_tpc_cmdid; + u32 pdev_get_ast_info_cmdid; + u32 vdev_set_dscp_tid_map_cmdid; + u32 pdev_get_info_cmdid; + u32 vdev_get_info_cmdid; + u32 vdev_filter_neighbor_rx_packets_cmdid; + u32 mu_cal_start_cmdid; + u32 set_cca_params_cmdid; + u32 pdev_bss_chan_info_request_cmdid; }; /* @@ -1220,6 +1408,216 @@ enum wmi_10_2_event_id { WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1, }; +enum wmi_10_4_cmd_id { + WMI_10_4_START_CMDID = 0x9000, + WMI_10_4_END_CMDID = 0x9FFF, + WMI_10_4_INIT_CMDID, + WMI_10_4_START_SCAN_CMDID = WMI_10_4_START_CMDID, + WMI_10_4_STOP_SCAN_CMDID, + WMI_10_4_SCAN_CHAN_LIST_CMDID, + WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID, + WMI_10_4_SCAN_UPDATE_REQUEST_CMDID, + WMI_10_4_ECHO_CMDID, + WMI_10_4_PDEV_SET_REGDOMAIN_CMDID, + WMI_10_4_PDEV_SET_CHANNEL_CMDID, + WMI_10_4_PDEV_SET_PARAM_CMDID, + WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID, + WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID, + WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID, + WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID, + WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID, + WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID, + WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID, + WMI_10_4_PDEV_SET_QUIET_MODE_CMDID, + WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID, + WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID, + WMI_10_4_VDEV_CREATE_CMDID, + WMI_10_4_VDEV_DELETE_CMDID, + WMI_10_4_VDEV_START_REQUEST_CMDID, + WMI_10_4_VDEV_RESTART_REQUEST_CMDID, + WMI_10_4_VDEV_UP_CMDID, + WMI_10_4_VDEV_STOP_CMDID, + WMI_10_4_VDEV_DOWN_CMDID, + WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID, + WMI_10_4_VDEV_RESUME_RESPONSE_CMDID, + WMI_10_4_VDEV_SET_PARAM_CMDID, + WMI_10_4_VDEV_INSTALL_KEY_CMDID, + WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID, + WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID, + WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID, + WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID, + WMI_10_4_PEER_CREATE_CMDID, + WMI_10_4_PEER_DELETE_CMDID, + WMI_10_4_PEER_FLUSH_TIDS_CMDID, + WMI_10_4_PEER_SET_PARAM_CMDID, + WMI_10_4_PEER_ASSOC_CMDID, + WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID, + WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID, + WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID, + WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID, + WMI_10_4_PEER_MCAST_GROUP_CMDID, + WMI_10_4_BCN_TX_CMDID, + WMI_10_4_PDEV_SEND_BCN_CMDID, + WMI_10_4_BCN_PRB_TMPL_CMDID, + WMI_10_4_BCN_FILTER_RX_CMDID, + WMI_10_4_PRB_REQ_FILTER_RX_CMDID, + WMI_10_4_MGMT_TX_CMDID, + WMI_10_4_PRB_TMPL_CMDID, + WMI_10_4_ADDBA_CLEAR_RESP_CMDID, + WMI_10_4_ADDBA_SEND_CMDID, + WMI_10_4_ADDBA_STATUS_CMDID, + WMI_10_4_DELBA_SEND_CMDID, + WMI_10_4_ADDBA_SET_RESP_CMDID, + WMI_10_4_SEND_SINGLEAMSDU_CMDID, + WMI_10_4_STA_POWERSAVE_MODE_CMDID, + WMI_10_4_STA_POWERSAVE_PARAM_CMDID, + WMI_10_4_STA_MIMO_PS_MODE_CMDID, + WMI_10_4_DBGLOG_CFG_CMDID, + WMI_10_4_PDEV_DFS_ENABLE_CMDID, + WMI_10_4_PDEV_DFS_DISABLE_CMDID, + WMI_10_4_PDEV_QVIT_CMDID, + WMI_10_4_ROAM_SCAN_MODE, + WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD, + WMI_10_4_ROAM_SCAN_PERIOD, + WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + WMI_10_4_ROAM_AP_PROFILE, + WMI_10_4_OFL_SCAN_ADD_AP_PROFILE, + WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE, + WMI_10_4_OFL_SCAN_PERIOD, + WMI_10_4_P2P_DEV_SET_DEVICE_INFO, + WMI_10_4_P2P_DEV_SET_DISCOVERABILITY, + WMI_10_4_P2P_GO_SET_BEACON_IE, + WMI_10_4_P2P_GO_SET_PROBE_RESP_IE, + WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID, + WMI_10_4_AP_PS_PEER_PARAM_CMDID, + WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID, + WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID, + WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID, + WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + WMI_10_4_PDEV_SUSPEND_CMDID, + WMI_10_4_PDEV_RESUME_CMDID, + WMI_10_4_ADD_BCN_FILTER_CMDID, + WMI_10_4_RMV_BCN_FILTER_CMDID, + WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID, + WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID, + WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + WMI_10_4_WOW_ENABLE_CMDID, + WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + WMI_10_4_RTT_MEASREQ_CMDID, + WMI_10_4_RTT_TSF_CMDID, + WMI_10_4_RTT_KEEPALIVE_CMDID, + WMI_10_4_OEM_REQ_CMDID, + WMI_10_4_NAN_CMDID, + WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + WMI_10_4_REQUEST_STATS_CMDID, + WMI_10_4_GPIO_CONFIG_CMDID, + WMI_10_4_GPIO_OUTPUT_CMDID, + WMI_10_4_VDEV_RATEMASK_CMDID, + WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID, + WMI_10_4_GTK_OFFLOAD_CMDID, + WMI_10_4_QBOOST_CFG_CMDID, + WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID, + WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID, + WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID, + WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID, + WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID, + WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID, + WMI_10_4_VDEV_SET_KEEPALIVE_CMDID, + WMI_10_4_VDEV_GET_KEEPALIVE_CMDID, + WMI_10_4_FORCE_FW_HANG_CMDID, + WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID, + WMI_10_4_PDEV_SET_CTL_TABLE_CMDID, + WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID, + WMI_10_4_PDEV_RATEPWR_TABLE_CMDID, + WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID, + WMI_10_4_PDEV_FIPS_CMDID, + WMI_10_4_TT_SET_CONF_CMDID, + WMI_10_4_FWTEST_CMDID, + WMI_10_4_VDEV_ATF_REQUEST_CMDID, + WMI_10_4_PEER_ATF_REQUEST_CMDID, + WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID, + WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID, + WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID, + WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID, + WMI_10_4_PDEV_GET_TPC_CMDID, + WMI_10_4_PDEV_GET_AST_INFO_CMDID, + WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID, + WMI_10_4_PDEV_GET_TEMPERATURE_CMDID, + WMI_10_4_PDEV_GET_INFO_CMDID, + WMI_10_4_VDEV_GET_INFO_CMDID, + WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID, + WMI_10_4_MU_CAL_START_CMDID, + WMI_10_4_SET_CCA_PARAMS_CMDID, + WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, + WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1, +}; + +enum wmi_10_4_event_id { + WMI_10_4_SERVICE_READY_EVENTID = 0x8000, + WMI_10_4_READY_EVENTID, + WMI_10_4_DEBUG_MESG_EVENTID, + WMI_10_4_START_EVENTID = 0x9000, + WMI_10_4_END_EVENTID = 0x9FFF, + WMI_10_4_SCAN_EVENTID = WMI_10_4_START_EVENTID, + WMI_10_4_ECHO_EVENTID, + WMI_10_4_UPDATE_STATS_EVENTID, + WMI_10_4_INST_RSSI_STATS_EVENTID, + WMI_10_4_VDEV_START_RESP_EVENTID, + WMI_10_4_VDEV_STANDBY_REQ_EVENTID, + WMI_10_4_VDEV_RESUME_REQ_EVENTID, + WMI_10_4_VDEV_STOPPED_EVENTID, + WMI_10_4_PEER_STA_KICKOUT_EVENTID, + WMI_10_4_HOST_SWBA_EVENTID, + WMI_10_4_TBTTOFFSET_UPDATE_EVENTID, + WMI_10_4_MGMT_RX_EVENTID, + WMI_10_4_CHAN_INFO_EVENTID, + WMI_10_4_PHYERR_EVENTID, + WMI_10_4_ROAM_EVENTID, + WMI_10_4_PROFILE_MATCH, + WMI_10_4_DEBUG_PRINT_EVENTID, + WMI_10_4_PDEV_QVIT_EVENTID, + WMI_10_4_WLAN_PROFILE_DATA_EVENTID, + WMI_10_4_RTT_MEASUREMENT_REPORT_EVENTID, + WMI_10_4_TSF_MEASUREMENT_REPORT_EVENTID, + WMI_10_4_RTT_ERROR_REPORT_EVENTID, + WMI_10_4_RTT_KEEPALIVE_EVENTID, + WMI_10_4_OEM_CAPABILITY_EVENTID, + WMI_10_4_OEM_MEASUREMENT_REPORT_EVENTID, + WMI_10_4_OEM_ERROR_REPORT_EVENTID, + WMI_10_4_NAN_EVENTID, + WMI_10_4_WOW_WAKEUP_HOST_EVENTID, + WMI_10_4_GTK_OFFLOAD_STATUS_EVENTID, + WMI_10_4_GTK_REKEY_FAIL_EVENTID, + WMI_10_4_DCS_INTERFERENCE_EVENTID, + WMI_10_4_PDEV_TPC_CONFIG_EVENTID, + WMI_10_4_CSA_HANDLING_EVENTID, + WMI_10_4_GPIO_INPUT_EVENTID, + WMI_10_4_PEER_RATECODE_LIST_EVENTID, + WMI_10_4_GENERIC_BUFFER_EVENTID, + WMI_10_4_MCAST_BUF_RELEASE_EVENTID, + WMI_10_4_MCAST_LIST_AGEOUT_EVENTID, + WMI_10_4_VDEV_GET_KEEPALIVE_EVENTID, + WMI_10_4_WDS_PEER_EVENTID, + WMI_10_4_PEER_STA_PS_STATECHG_EVENTID, + WMI_10_4_PDEV_FIPS_EVENTID, + WMI_10_4_TT_STATS_EVENTID, + WMI_10_4_PDEV_CHANNEL_HOPPING_EVENTID, + WMI_10_4_PDEV_ANI_CCK_LEVEL_EVENTID, + WMI_10_4_PDEV_ANI_OFDM_LEVEL_EVENTID, + WMI_10_4_PDEV_RESERVE_AST_ENTRY_EVENTID, + WMI_10_4_PDEV_NFCAL_POWER_EVENTID, + WMI_10_4_PDEV_TPC_EVENTID, + WMI_10_4_PDEV_GET_AST_INFO_EVENTID, + WMI_10_4_PDEV_TEMPERATURE_EVENTID, + WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID, + WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID, + WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1, +}; + enum wmi_phy_mode { MODE_11A = 0, /* 11a Mode */ MODE_11G = 1, /* 11b/g Mode */ @@ -1349,7 +1747,8 @@ enum wmi_channel_change_cause { /* Indicate reason for channel switch */ #define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13) -#define WMI_MAX_SPATIAL_STREAM 3 +#define WMI_MAX_SPATIAL_STREAM 3 /* default max ss */ +#define WMI_10_4_MAX_SPATIAL_STREAM 4 /* HT Capabilities*/ #define WMI_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */ @@ -1979,8 +2378,224 @@ struct wmi_resource_config_10_2 { __le32 feature_mask; } __packed; -#define NUM_UNITS_IS_NUM_VDEVS 0x1 -#define NUM_UNITS_IS_NUM_PEERS 0x2 +#define NUM_UNITS_IS_NUM_VDEVS BIT(0) +#define NUM_UNITS_IS_NUM_PEERS BIT(1) +#define NUM_UNITS_IS_NUM_ACTIVE_PEERS BIT(2) + +struct wmi_resource_config_10_4 { + /* Number of virtual devices (VAPs) to support */ + __le32 num_vdevs; + + /* Number of peer nodes to support */ + __le32 num_peers; + + /* Number of active peer nodes to support */ + __le32 num_active_peers; + + /* In offload mode, target supports features like WOW, chatter and other + * protocol offloads. In order to support them some functionalities like + * reorder buffering, PN checking need to be done in target. + * This determines maximum number of peers supported by target in + * offload mode. + */ + __le32 num_offload_peers; + + /* Number of reorder buffers available for doing target based reorder + * Rx reorder buffering + */ + __le32 num_offload_reorder_buffs; + + /* Number of keys per peer */ + __le32 num_peer_keys; + + /* Total number of TX/RX data TIDs */ + __le32 num_tids; + + /* Max skid for resolving hash collisions. + * The address search table is sparse, so that if two MAC addresses + * result in the same hash value, the second of these conflicting + * entries can slide to the next index in the address search table, + * and use it, if it is unoccupied. This ast_skid_limit parameter + * specifies the upper bound on how many subsequent indices to search + * over to find an unoccupied space. + */ + __le32 ast_skid_limit; + + /* The nominal chain mask for transmit. + * The chain mask may be modified dynamically, e.g. to operate AP tx + * with a reduced number of chains if no clients are associated. + * This configuration parameter specifies the nominal chain-mask that + * should be used when not operating with a reduced set of tx chains. + */ + __le32 tx_chain_mask; + + /* The nominal chain mask for receive. + * The chain mask may be modified dynamically, e.g. for a client to use + * a reduced number of chains for receive if the traffic to the client + * is low enough that it doesn't require downlink MIMO or antenna + * diversity. This configuration parameter specifies the nominal + * chain-mask that should be used when not operating with a reduced + * set of rx chains. + */ + __le32 rx_chain_mask; + + /* What rx reorder timeout (ms) to use for the AC. + * Each WMM access class (voice, video, best-effort, background) will + * have its own timeout value to dictate how long to wait for missing + * rx MPDUs to arrive before flushing subsequent MPDUs that have already + * been received. This parameter specifies the timeout in milliseconds + * for each class. + */ + __le32 rx_timeout_pri[4]; + + /* What mode the rx should decap packets to. + * MAC can decap to RAW (no decap), native wifi or Ethernet types. + * This setting also determines the default TX behavior, however TX + * behavior can be modified on a per VAP basis during VAP init + */ + __le32 rx_decap_mode; + + __le32 scan_max_pending_req; + + __le32 bmiss_offload_max_vdev; + + __le32 roam_offload_max_vdev; + + __le32 roam_offload_max_ap_profiles; + + /* How many groups to use for mcast->ucast conversion. + * The target's WAL maintains a table to hold information regarding + * which peers belong to a given multicast group, so that if + * multicast->unicast conversion is enabled, the target can convert + * multicast tx frames to a series of unicast tx frames, to each peer + * within the multicast group. This num_mcast_groups configuration + * parameter tells the target how many multicast groups to provide + * storage for within its multicast group membership table. + */ + __le32 num_mcast_groups; + + /* Size to alloc for the mcast membership table. + * This num_mcast_table_elems configuration parameter tells the target + * how many peer elements it needs to provide storage for in its + * multicast group membership table. These multicast group membership + * table elements are shared by the multicast groups stored within + * the table. + */ + __le32 num_mcast_table_elems; + + /* Whether/how to do multicast->unicast conversion. + * This configuration parameter specifies whether the target should + * perform multicast --> unicast conversion on transmit, and if so, + * what to do if it finds no entries in its multicast group membership + * table for the multicast IP address in the tx frame. + * Configuration value: + * 0 -> Do not perform multicast to unicast conversion. + * 1 -> Convert multicast frames to unicast, if the IP multicast address + * from the tx frame is found in the multicast group membership + * table. If the IP multicast address is not found, drop the frame + * 2 -> Convert multicast frames to unicast, if the IP multicast address + * from the tx frame is found in the multicast group membership + * table. If the IP multicast address is not found, transmit the + * frame as multicast. + */ + __le32 mcast2ucast_mode; + + /* How much memory to allocate for a tx PPDU dbg log. + * This parameter controls how much memory the target will allocate to + * store a log of tx PPDU meta-information (how large the PPDU was, + * when it was sent, whether it was successful, etc.) + */ + __le32 tx_dbg_log_size; + + /* How many AST entries to be allocated for WDS */ + __le32 num_wds_entries; + + /* MAC DMA burst size. 0 -default, 1 -256B */ + __le32 dma_burst_size; + + /* Fixed delimiters to be inserted after every MPDU to account for + * interface latency to avoid underrun. + */ + __le32 mac_aggr_delim; + + /* Determine whether target is responsible for detecting duplicate + * non-aggregate MPDU and timing out stale fragments. A-MPDU reordering + * is always performed on the target. + * + * 0: target responsible for frag timeout and dup checking + * 1: host responsible for frag timeout and dup checking + */ + __le32 rx_skip_defrag_timeout_dup_detection_check; + + /* Configuration for VoW : No of Video nodes to be supported and max + * no of descriptors for each video link (node). + */ + __le32 vow_config; + + /* Maximum vdev that could use gtk offload */ + __le32 gtk_offload_max_vdev; + + /* Number of msdu descriptors target should use */ + __le32 num_msdu_desc; + + /* Max number of tx fragments per MSDU. + * This parameter controls the max number of tx fragments per MSDU. + * This will passed by target as part of the WMI_SERVICE_READY event + * and is overridden by the OS shim as required. + */ + __le32 max_frag_entries; + + /* Max number of extended peer stats. + * This parameter controls the max number of peers for which extended + * statistics are supported by target + */ + __le32 max_peer_ext_stats; + + /* Smart antenna capabilities information. + * 1 - Smart antenna is enabled + * 0 - Smart antenna is disabled + * In future this can contain smart antenna specific capabilities. + */ + __le32 smart_ant_cap; + + /* User can configure the buffers allocated for each AC (BE, BK, VI, VO) + * during init. + */ + __le32 bk_minfree; + __le32 be_minfree; + __le32 vi_minfree; + __le32 vo_minfree; + + /* Rx batch mode capability. + * 1 - Rx batch mode enabled + * 0 - Rx batch mode disabled + */ + __le32 rx_batchmode; + + /* Thermal throttling capability. + * 1 - Capable of thermal throttling + * 0 - Not capable of thermal throttling + */ + __le32 tt_support; + + /* ATF configuration. + * 1 - Enable ATF + * 0 - Disable ATF + */ + __le32 atf_config; + + /* Configure padding to manage IP header un-alignment + * 1 - Enable padding + * 0 - Disable padding + */ + __le32 iphdr_pad_config; + + /* qwrap configuration + * 1 - This is qwrap configuration + * 0 - This is not qwrap + */ + __le32 qwrap_config; +} __packed; /* strucutre describing host memory chunk. */ struct host_memory_chunk { @@ -2014,6 +2629,11 @@ struct wmi_init_cmd_10_2 { struct wmi_host_mem_chunks mem_chunks; } __packed; +struct wmi_init_cmd_10_4 { + struct wmi_resource_config_10_4 resource_config; + struct wmi_host_mem_chunks mem_chunks; +} __packed; + struct wmi_chan_list_entry { __le16 freq; u8 phy_mode; /* valid for 10.2 only */ @@ -2260,15 +2880,17 @@ enum wmi_bss_filter { }; enum wmi_scan_event_type { - WMI_SCAN_EVENT_STARTED = 0x1, - WMI_SCAN_EVENT_COMPLETED = 0x2, - WMI_SCAN_EVENT_BSS_CHANNEL = 0x4, - WMI_SCAN_EVENT_FOREIGN_CHANNEL = 0x8, - WMI_SCAN_EVENT_DEQUEUED = 0x10, - WMI_SCAN_EVENT_PREEMPTED = 0x20, /* possibly by high-prio scan */ - WMI_SCAN_EVENT_START_FAILED = 0x40, - WMI_SCAN_EVENT_RESTARTED = 0x80, - WMI_SCAN_EVENT_MAX = 0x8000 + WMI_SCAN_EVENT_STARTED = BIT(0), + WMI_SCAN_EVENT_COMPLETED = BIT(1), + WMI_SCAN_EVENT_BSS_CHANNEL = BIT(2), + WMI_SCAN_EVENT_FOREIGN_CHANNEL = BIT(3), + WMI_SCAN_EVENT_DEQUEUED = BIT(4), + /* possibly by high-prio scan */ + WMI_SCAN_EVENT_PREEMPTED = BIT(5), + WMI_SCAN_EVENT_START_FAILED = BIT(6), + WMI_SCAN_EVENT_RESTARTED = BIT(7), + WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT = BIT(8), + WMI_SCAN_EVENT_MAX = BIT(15), }; enum wmi_scan_completion_reason { @@ -2276,6 +2898,7 @@ enum wmi_scan_completion_reason { WMI_SCAN_REASON_CANCELLED, WMI_SCAN_REASON_PREEMPTED, WMI_SCAN_REASON_TIMEDOUT, + WMI_SCAN_REASON_INTERNAL_FAILURE, WMI_SCAN_REASON_MAX, }; @@ -2329,15 +2952,40 @@ struct wmi_mgmt_rx_event_v2 { u8 buf[0]; } __packed; +struct wmi_10_4_mgmt_rx_hdr { + __le32 channel; + __le32 snr; + u8 rssi_ctl[4]; + __le32 rate; + __le32 phy_mode; + __le32 buf_len; + __le32 status; +} __packed; + +struct wmi_10_4_mgmt_rx_event { + struct wmi_10_4_mgmt_rx_hdr hdr; + u8 buf[0]; +} __packed; + #define WMI_RX_STATUS_OK 0x00 #define WMI_RX_STATUS_ERR_CRC 0x01 #define WMI_RX_STATUS_ERR_DECRYPT 0x08 #define WMI_RX_STATUS_ERR_MIC 0x10 #define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20 -#define PHY_ERROR_SPECTRAL_SCAN 0x26 -#define PHY_ERROR_FALSE_RADAR_EXT 0x24 -#define PHY_ERROR_RADAR 0x05 +#define PHY_ERROR_GEN_SPECTRAL_SCAN 0x26 +#define PHY_ERROR_GEN_FALSE_RADAR_EXT 0x24 +#define PHY_ERROR_GEN_RADAR 0x05 + +#define PHY_ERROR_10_4_RADAR_MASK 0x4 +#define PHY_ERROR_10_4_SPECTRAL_SCAN_MASK 0x4000000 + +enum phy_err_type { + PHY_ERROR_UNKNOWN, + PHY_ERROR_SPECTRAL_SCAN, + PHY_ERROR_FALSE_RADAR_EXT, + PHY_ERROR_RADAR +}; struct wmi_phyerr { __le32 tsf_timestamp; @@ -2360,6 +3008,23 @@ struct wmi_phyerr_event { struct wmi_phyerr phyerrs[0]; } __packed; +struct wmi_10_4_phyerr_event { + __le32 tsf_l32; + __le32 tsf_u32; + __le16 freq1; + __le16 freq2; + u8 rssi_combined; + u8 chan_width_mhz; + u8 phy_err_code; + u8 rsvd0; + __le32 rssi_chains[4]; + __le16 nf_chains[4]; + __le32 phy_err_mask[2]; + __le32 tsf_timestamp; + __le32 buf_len; + u8 buf[0]; +} __packed; + #define PHYERR_TLV_SIG 0xBB #define PHYERR_TLV_TAG_SEARCH_FFT_REPORT 0xFB #define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY 0xF8 @@ -2613,6 +3278,48 @@ struct wmi_pdev_param_map { u32 burst_dur; u32 burst_enable; u32 cal_period; + u32 aggr_burst; + u32 rx_decap_mode; + u32 smart_antenna_default_antenna; + u32 igmpmld_override; + u32 igmpmld_tid; + u32 antenna_gain; + u32 rx_filter; + u32 set_mcast_to_ucast_tid; + u32 proxy_sta_mode; + u32 set_mcast2ucast_mode; + u32 set_mcast2ucast_buffer; + u32 remove_mcast2ucast_buffer; + u32 peer_sta_ps_statechg_enable; + u32 igmpmld_ac_override; + u32 block_interbss; + u32 set_disable_reset_cmdid; + u32 set_msdu_ttl_cmdid; + u32 set_ppdu_duration_cmdid; + u32 txbf_sound_period_cmdid; + u32 set_promisc_mode_cmdid; + u32 set_burst_mode_cmdid; + u32 en_stats; + u32 mu_group_policy; + u32 noise_detection; + u32 noise_threshold; + u32 dpd_enable; + u32 set_mcast_bcast_echo; + u32 atf_strict_sch; + u32 atf_sched_duration; + u32 ant_plzn; + u32 mgmt_retry_limit; + u32 sensitivity_level; + u32 signed_txpower_2g; + u32 signed_txpower_5g; + u32 enable_per_tid_amsdu; + u32 enable_per_tid_ampdu; + u32 cca_threshold; + u32 rts_fixed_rate; + u32 pdev_reset; + u32 wapi_mbssid_offset; + u32 arp_srcaddr; + u32 arp_dstaddr; }; #define WMI_PDEV_PARAM_UNSUPPORTED 0 @@ -2828,6 +3535,100 @@ enum wmi_10x_pdev_param { WMI_10X_PDEV_PARAM_CAL_PERIOD }; +enum wmi_10_4_pdev_param { + WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK = 0x1, + WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK, + WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G, + WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G, + WMI_10_4_PDEV_PARAM_TXPOWER_SCALE, + WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE, + WMI_10_4_PDEV_PARAM_BEACON_TX_MODE, + WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + WMI_10_4_PDEV_PARAM_PROTECTION_MODE, + WMI_10_4_PDEV_PARAM_DYNAMIC_BW, + WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH, + WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH, + WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING, + WMI_10_4_PDEV_PARAM_LTR_ENABLE, + WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE, + WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK, + WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI, + WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO, + WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE, + WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + WMI_10_4_PDEV_PARAM_L1SS_ENABLE, + WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE, + WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH, + WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK, + WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, + WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, + WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + WMI_10_4_PDEV_PARAM_PMF_QOS, + WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE, + WMI_10_4_PDEV_PARAM_DCS, + WMI_10_4_PDEV_PARAM_ANI_ENABLE, + WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD, + WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD, + WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL, + WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL, + WMI_10_4_PDEV_PARAM_DYNTXCHAIN, + WMI_10_4_PDEV_PARAM_PROXY_STA, + WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG, + WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP, + WMI_10_4_PDEV_PARAM_AGGR_BURST, + WMI_10_4_PDEV_PARAM_RX_DECAP_MODE, + WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET, + WMI_10_4_PDEV_PARAM_BURST_DUR, + WMI_10_4_PDEV_PARAM_BURST_ENABLE, + WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA, + WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE, + WMI_10_4_PDEV_PARAM_IGMPMLD_TID, + WMI_10_4_PDEV_PARAM_ANTENNA_GAIN, + WMI_10_4_PDEV_PARAM_RX_FILTER, + WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID, + WMI_10_4_PDEV_PARAM_PROXY_STA_MODE, + WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE, + WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER, + WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER, + WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE, + WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE, + WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS, + WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID, + WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID, + WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID, + WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID, + WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID, + WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID, + WMI_10_4_PDEV_PARAM_EN_STATS, + WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY, + WMI_10_4_PDEV_PARAM_NOISE_DETECTION, + WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD, + WMI_10_4_PDEV_PARAM_DPD_ENABLE, + WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO, + WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH, + WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION, + WMI_10_4_PDEV_PARAM_ANT_PLZN, + WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT, + WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL, + WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G, + WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G, + WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU, + WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU, + WMI_10_4_PDEV_PARAM_CCA_THRESHOLD, + WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE, + WMI_10_4_PDEV_PARAM_CAL_PERIOD, + WMI_10_4_PDEV_PARAM_PDEV_RESET, + WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET, + WMI_10_4_PDEV_PARAM_ARP_SRCADDR, + WMI_10_4_PDEV_PARAM_ARP_DSTADDR, +}; + struct wmi_pdev_set_param_cmd { __le32 param_id; __le32 param_value; @@ -3506,6 +4307,22 @@ struct wmi_vdev_param_map { u32 drop_unencry; u32 tx_encap_type; u32 ap_detect_out_of_sync_sleeping_sta_time_secs; + u32 rc_num_retries; + u32 cabq_maxdur; + u32 mfptest_set; + u32 rts_fixed_rate; + u32 vht_sgimask; + u32 vht80_ratemask; + u32 early_rx_adjust_enable; + u32 early_rx_tgt_bmiss_num; + u32 early_rx_bmiss_sample_cycle; + u32 early_rx_slop_step; + u32 early_rx_init_slop; + u32 early_rx_adjust_pause; + u32 proxy_sta; + u32 meru_vc; + u32 rx_decap_type; + u32 bw_nss_ratemask; }; #define WMI_VDEV_PARAM_UNSUPPORTED 0 @@ -3764,11 +4581,85 @@ enum wmi_10x_vdev_param { WMI_10X_VDEV_PARAM_VHT80_RATEMASK, }; +enum wmi_10_4_vdev_param { + WMI_10_4_VDEV_PARAM_RTS_THRESHOLD = 0x1, + WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + WMI_10_4_VDEV_PARAM_BEACON_INTERVAL, + WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL, + WMI_10_4_VDEV_PARAM_MULTICAST_RATE, + WMI_10_4_VDEV_PARAM_MGMT_TX_RATE, + WMI_10_4_VDEV_PARAM_SLOT_TIME, + WMI_10_4_VDEV_PARAM_PREAMBLE, + WMI_10_4_VDEV_PARAM_SWBA_TIME, + WMI_10_4_VDEV_STATS_UPDATE_PERIOD, + WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME, + WMI_10_4_VDEV_HOST_SWBA_INTERVAL, + WMI_10_4_VDEV_PARAM_DTIM_PERIOD, + WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + WMI_10_4_VDEV_PARAM_WDS, + WMI_10_4_VDEV_PARAM_ATIM_WINDOW, + WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX, + WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT, + WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT, + WMI_10_4_VDEV_PARAM_FEATURE_WMM, + WMI_10_4_VDEV_PARAM_CHWIDTH, + WMI_10_4_VDEV_PARAM_CHEXTOFFSET, + WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION, + WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT, + WMI_10_4_VDEV_PARAM_MGMT_RATE, + WMI_10_4_VDEV_PARAM_PROTECTION_MODE, + WMI_10_4_VDEV_PARAM_FIXED_RATE, + WMI_10_4_VDEV_PARAM_SGI, + WMI_10_4_VDEV_PARAM_LDPC, + WMI_10_4_VDEV_PARAM_TX_STBC, + WMI_10_4_VDEV_PARAM_RX_STBC, + WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD, + WMI_10_4_VDEV_PARAM_DEF_KEYID, + WMI_10_4_VDEV_PARAM_NSS, + WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE, + WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE, + WMI_10_4_VDEV_PARAM_MCAST_INDICATE, + WMI_10_4_VDEV_PARAM_DHCP_INDICATE, + WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS, + WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET, + WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS, + WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES, + WMI_10_4_VDEV_PARAM_TXBF, + WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE, + WMI_10_4_VDEV_PARAM_DROP_UNENCRY, + WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE, + WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + WMI_10_4_VDEV_PARAM_CABQ_MAXDUR, + WMI_10_4_VDEV_PARAM_MFPTEST_SET, + WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE, + WMI_10_4_VDEV_PARAM_VHT_SGIMASK, + WMI_10_4_VDEV_PARAM_VHT80_RATEMASK, + WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE, + WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM, + WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE, + WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP, + WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP, + WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE, + WMI_10_4_VDEV_PARAM_PROXY_STA, + WMI_10_4_VDEV_PARAM_MERU_VC, + WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE, + WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK, +}; + #define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0) #define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1) #define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2) #define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3) +#define WMI_TXBF_STS_CAP_OFFSET_LSB 4 +#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0 +#define WMI_BF_SOUND_DIM_OFFSET_LSB 8 +#define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00 + /* slot time long */ #define WMI_VDEV_SLOT_TIME_LONG 0x1 /* slot time short */ @@ -4305,6 +5196,14 @@ struct wmi_tim_info { __le32 tim_num_ps_pending; } __packed; +struct wmi_tim_info_arg { + __le32 tim_len; + __le32 tim_mcast; + const __le32 *tim_bitmap; + __le32 tim_changed; + __le32 tim_num_ps_pending; +} __packed; + /* Maximum number of NOA Descriptors supported */ #define WMI_P2P_MAX_NOA_DESCRIPTORS 4 #define WMI_P2P_OPPPS_ENABLE_BIT BIT(0) @@ -4336,6 +5235,47 @@ struct wmi_host_swba_event { struct wmi_bcn_info bcn_info[0]; } __packed; +/* 16 words = 512 client + 1 word = for guard */ +#define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17 + +struct wmi_10_4_tim_info { + __le32 tim_len; + __le32 tim_mcast; + __le32 tim_bitmap[WMI_10_4_TIM_BITMAP_ARRAY_SIZE]; + __le32 tim_changed; + __le32 tim_num_ps_pending; +} __packed; + +#define WMI_10_4_P2P_MAX_NOA_DESCRIPTORS 1 + +struct wmi_10_4_p2p_noa_info { + /* Bit 0 - Flag to indicate an update in NOA schedule + * Bits 7-1 - Reserved + */ + u8 changed; + /* NOA index */ + u8 index; + /* Bit 0 - Opp PS state of the AP + * Bits 1-7 - Ctwindow in TUs + */ + u8 ctwindow_oppps; + /* Number of NOA descriptors */ + u8 num_descriptors; + + struct wmi_p2p_noa_descriptor + noa_descriptors[WMI_10_4_P2P_MAX_NOA_DESCRIPTORS]; +} __packed; + +struct wmi_10_4_bcn_info { + struct wmi_10_4_tim_info tim_info; + struct wmi_10_4_p2p_noa_info p2p_noa_info; +} __packed; + +struct wmi_10_4_host_swba_event { + __le32 vdev_map; + struct wmi_10_4_bcn_info bcn_info[0]; +} __packed; + #define WMI_MAX_AP_VDEV 16 struct wmi_tbtt_offset_event { @@ -4660,11 +5600,24 @@ struct wmi_chan_info_event { __le32 cycle_count; } __packed; +struct wmi_10_4_chan_info_event { + __le32 err_code; + __le32 freq; + __le32 cmd_flags; + __le32 noise_floor; + __le32 rx_clear_count; + __le32 cycle_count; + __le32 chan_tx_pwr_range; + __le32 chan_tx_pwr_tp; + __le32 rx_frame_count; +} __packed; + struct wmi_peer_sta_kickout_event { struct wmi_mac_addr peer_macaddr; } __packed; #define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0) +#define WMI_CHAN_INFO_FLAG_PRE_COMPLETE BIT(1) /* Beacon filter wmi command info */ #define BCN_FLT_MAX_SUPPORTED_IES 256 @@ -4840,6 +5793,9 @@ struct wmi_ch_info_ev_arg { __le32 noise_floor; __le32 rx_clear_count; __le32 cycle_count; + __le32 chan_tx_pwr_range; + __le32 chan_tx_pwr_tp; + __le32 rx_frame_count; }; struct wmi_vdev_start_ev_arg { @@ -4855,16 +5811,29 @@ struct wmi_peer_kick_ev_arg { struct wmi_swba_ev_arg { __le32 vdev_map; - const struct wmi_tim_info *tim_info[WMI_MAX_AP_VDEV]; + struct wmi_tim_info_arg tim_info[WMI_MAX_AP_VDEV]; const struct wmi_p2p_noa_info *noa_info[WMI_MAX_AP_VDEV]; }; struct wmi_phyerr_ev_arg { - __le32 num_phyerrs; - __le32 tsf_l32; - __le32 tsf_u32; - __le32 buf_len; - const struct wmi_phyerr *phyerrs; + u32 tsf_timestamp; + u16 freq1; + u16 freq2; + u8 rssi_combined; + u8 chan_width_mhz; + u8 phy_err_code; + u16 nf_chains[4]; + u32 buf_len; + const u8 *buf; + u8 hdr_len; +}; + +struct wmi_phyerr_hdr_arg { + u32 num_phyerrs; + u32 tsf_l32; + u32 tsf_u32; + u32 buf_len; + const void *phyerrs; }; struct wmi_svc_rdy_ev_arg { @@ -5085,6 +6054,12 @@ struct wmi_tdls_peer_capab_arg { u32 pref_offchan_bw; }; +enum wmi_txbf_conf { + WMI_TXBF_CONF_UNSUPPORTED, + WMI_TXBF_CONF_BEFORE_ASSOC, + WMI_TXBF_CONF_AFTER_ASSOC, +}; + struct ath10k; struct ath10k_vif; struct ath10k_fw_stats_pdev; @@ -5136,9 +6111,9 @@ void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_dfs(struct ath10k *ar, - const struct wmi_phyerr *phyerr, u64 tsf); + struct wmi_phyerr_ev_arg *phyerr, u64 tsf); void ath10k_wmi_event_spectral_scan(struct ath10k *ar, - const struct wmi_phyerr *phyerr, + struct wmi_phyerr_ev_arg *phyerr, u64 tsf); void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb); @@ -5167,5 +6142,6 @@ void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb); int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb); - +int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, const void *phyerr_buf, + int left_len, struct wmi_phyerr_ev_arg *arg); #endif /* _WMI_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c index a68d8fd85..8e02b3819 100644 --- a/drivers/net/wireless/ath/ath10k/wow.c +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -301,8 +301,26 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw) ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret); exit: + if (ret) { + switch (ar->state) { + case ATH10K_STATE_ON: + ar->state = ATH10K_STATE_RESTARTING; + ret = 1; + break; + case ATH10K_STATE_OFF: + case ATH10K_STATE_RESTARTING: + case ATH10K_STATE_RESTARTED: + case ATH10K_STATE_UTF: + case ATH10K_STATE_WEDGED: + ath10k_warn(ar, "encountered unexpected device state %d on resume, cannot recover\n", + ar->state); + ret = -EIO; + break; + } + } + mutex_unlock(&ar->conf_mutex); - return ret ? 1 : 0; + return ret; } int ath10k_wow_init(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig index 2399a3921..b1278f9f2 100644 --- a/drivers/net/wireless/ath/ath5k/Kconfig +++ b/drivers/net/wireless/ath/ath5k/Kconfig @@ -5,7 +5,6 @@ config ATH5K select MAC80211_LEDS select LEDS_CLASS select NEW_LEDS - select AVERAGE select ATH5K_AHB if ATH25 select ATH5K_PCI if !ATH25 ---help--- diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c index 5c0087576..38be2702c 100644 --- a/drivers/net/wireless/ath/ath5k/ani.c +++ b/drivers/net/wireless/ath/ath5k/ani.c @@ -223,7 +223,7 @@ static void ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as, bool ofdm_trigger) { - int rssi = ewma_read(&ah->ah_beacon_rssi_avg); + int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg); ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "raise immunity (%s)", ofdm_trigger ? "ODFM" : "CCK"); @@ -309,7 +309,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as, static void ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as) { - int rssi = ewma_read(&ah->ah_beacon_rssi_avg); + int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg); ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "lower immunity"); diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h index e22b0e778..fa6e89e5c 100644 --- a/drivers/net/wireless/ath/ath5k/ath5k.h +++ b/drivers/net/wireless/ath/ath5k/ath5k.h @@ -1252,6 +1252,8 @@ struct ath5k_statistics { #define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */ #define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */ +DECLARE_EWMA(beacon_rssi, 1024, 8) + /* Driver state associated with an instance of a device */ struct ath5k_hw { struct ath_common common; @@ -1432,7 +1434,7 @@ struct ath5k_hw { struct ath5k_nfcal_hist ah_nfcal_hist; /* average beacon RSSI in our BSS (used by ANI) */ - struct ewma ah_beacon_rssi_avg; + struct ewma_beacon_rssi ah_beacon_rssi_avg; /* noise floor from last periodic calibration */ s32 ah_noise_floor; diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 23552f43d..342563a37 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -1430,7 +1430,7 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb, trace_ath5k_rx(ah, skb); if (ath_is_mybeacon(common, (struct ieee80211_hdr *)skb->data)) { - ewma_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi); + ewma_beacon_rssi_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi); /* check beacons in IBSS mode */ if (ah->opmode == NL80211_IFTYPE_ADHOC) @@ -2936,7 +2936,7 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan, ah->ah_cal_next_short = jiffies + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT); - ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8); + ewma_beacon_rssi_init(&ah->ah_beacon_rssi_avg); /* clear survey data and cycle counters */ memset(&ah->survey, 0, sizeof(ah->survey)); diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c index c70782e8f..654a1e33f 100644 --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -722,7 +722,7 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf, st->mib_intr); len += snprintf(buf + len, sizeof(buf) - len, "beacon RSSI average:\t%d\n", - (int)ewma_read(&ah->ah_beacon_rssi_avg)); + (int)ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg)); #define CC_PRINT(_struct, _field) \ _struct._field, \ diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h index 14cab1403..112d8a9b8 100644 --- a/drivers/net/wireless/ath/ath6kl/htc.h +++ b/drivers/net/wireless/ath/ath6kl/htc.h @@ -427,7 +427,7 @@ struct htc_endpoint_credit_dist { }; /* - * credit distibution code that is passed into the distrbution function, + * credit distribution code that is passed into the distribution function, * there are mandatory and optional codes that must be handled */ enum htc_credit_dist_reason { diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c index ba5087d59..d4c56e9fd 100644 --- a/drivers/net/wireless/ath/ath6kl/init.c +++ b/drivers/net/wireless/ath/ath6kl/init.c @@ -712,6 +712,7 @@ static bool check_device_tree(struct ath6kl *ar) board_filename, ret); continue; } + of_node_put(node); return true; } return false; diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index b921005ad..a5e1de75a 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -154,7 +154,7 @@ struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx) } /* Performs DIX to 802.3 encapsulation for transmit packets. - * Assumes the entire DIX header is contigous and that there is + * Assumes the entire DIX header is contiguous and that there is * enough room in the buffer for a 802.3 mac header and LLC+SNAP headers. */ int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb) @@ -449,7 +449,7 @@ int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb) /* * Performs 802.3 to DIX encapsulation for received packets. - * Assumes the entire 802.3 header is contigous. + * Assumes the entire 802.3 header is contiguous. */ int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb) { diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index fc595b92a..c5f8bc4b5 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -455,7 +455,7 @@ #define AR_PHY_MODE (AR_SM_BASE + 0x8) #define AR_PHY_ACTIVE (AR_SM_BASE + 0xc) #define AR_PHY_SPUR_MASK_A (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x18 : 0x20)) -#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + 0x24) +#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x1c : 0x24)) #define AR_PHY_SPECTRAL_SCAN (AR_SM_BASE + 0x28) #define AR_PHY_RADAR_BW_FILTER (AR_SM_BASE + 0x2c) #define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30) @@ -495,7 +495,7 @@ #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S 0 -#define AR_PHY_TEST (AR_SM_BASE + 0x160) +#define AR_PHY_TEST (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x15c : 0x160)) #define AR_PHY_TEST_BBB_OBS_SEL 0x780000 #define AR_PHY_TEST_BBB_OBS_SEL_S 19 @@ -521,24 +521,29 @@ #define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S 29 -#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168) +#define AR_PHY_TSTDAC (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x164 : 0x168)) -#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c) +#define AR_PHY_CHAN_STATUS (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x168 : 0x16c)) #define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x16c : 0x170)) #define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ 0x00000008 #define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S 3 -#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + 0x174) -#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + 0x178) -#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + 0x17c) -#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + 0x180) -#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + 0x190) -#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + 0x194) +#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x170 : 0x174)) +#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x174 : 0x178)) +#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x178 : 0x17c)) +#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x17c : 0x180)) +#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x184 : 0x190)) +#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x188 : 0x194)) #define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x198 : 0x1a4)) #define AR_PHY_HEAVYCLIP_20 (AR_SM_BASE + 0x1a8) #define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac) +#define AR_PHY_HEAVYCLIP_1 (AR_SM_BASE + 0x19c) +#define AR_PHY_HEAVYCLIP_2 (AR_SM_BASE + 0x1a0) +#define AR_PHY_HEAVYCLIP_3 (AR_SM_BASE + 0x1a4) +#define AR_PHY_HEAVYCLIP_4 (AR_SM_BASE + 0x1a8) +#define AR_PHY_HEAVYCLIP_5 (AR_SM_BASE + 0x1ac) #define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0) #define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2)) diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index a7a81b396..c85c47978 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -172,14 +172,6 @@ struct ath_txq { struct sk_buff_head complete_q; }; -struct ath_atx_ac { - struct ath_txq *txq; - struct list_head list; - struct list_head tid_q; - bool clear_ps_filter; - bool sched; -}; - struct ath_frame_info { struct ath_buf *bf; u16 framelen; @@ -242,7 +234,7 @@ struct ath_atx_tid { struct sk_buff_head buf_q; struct sk_buff_head retry_q; struct ath_node *an; - struct ath_atx_ac *ac; + struct ath_txq *txq; unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)]; u16 seq_start; u16 seq_next; @@ -252,8 +244,8 @@ struct ath_atx_tid { int baw_tail; /* next unused tx buffer slot */ s8 bar_index; - bool sched; bool active; + bool clear_ps_filter; }; struct ath_node { @@ -261,7 +253,6 @@ struct ath_node { struct ieee80211_sta *sta; /* station struct we're part of */ struct ieee80211_vif *vif; /* interface with which we're associated */ struct ath_atx_tid tid[IEEE80211_NUM_TIDS]; - struct ath_atx_ac ac[IEEE80211_NUM_ACS]; u16 maxampdu; u8 mpdudensity; @@ -410,6 +401,12 @@ enum ath_offchannel_state { ATH_OFFCHANNEL_ROC_DONE, }; +enum ath_roc_complete_reason { + ATH_ROC_COMPLETE_EXPIRE, + ATH_ROC_COMPLETE_ABORT, + ATH_ROC_COMPLETE_CANCEL, +}; + struct ath_offchannel { struct ath_chanctx chan; struct timer_list timer; @@ -471,7 +468,8 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, void ath_chanctx_set_next(struct ath_softc *sc, bool force); void ath_offchannel_next(struct ath_softc *sc); void ath_scan_complete(struct ath_softc *sc, bool abort); -void ath_roc_complete(struct ath_softc *sc, bool abort); +void ath_roc_complete(struct ath_softc *sc, + enum ath_roc_complete_reason reason); struct ath_chanctx* ath_is_go_chanctx_present(struct ath_softc *sc); #else diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index 206665059..90f5773a1 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -915,18 +915,27 @@ void ath_offchannel_next(struct ath_softc *sc) } } -void ath_roc_complete(struct ath_softc *sc, bool abort) +void ath_roc_complete(struct ath_softc *sc, enum ath_roc_complete_reason reason) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); - if (abort) + sc->offchannel.roc_vif = NULL; + sc->offchannel.roc_chan = NULL; + + switch (reason) { + case ATH_ROC_COMPLETE_ABORT: ath_dbg(common, CHAN_CTX, "RoC aborted\n"); - else + ieee80211_remain_on_channel_expired(sc->hw); + break; + case ATH_ROC_COMPLETE_EXPIRE: ath_dbg(common, CHAN_CTX, "RoC expired\n"); + ieee80211_remain_on_channel_expired(sc->hw); + break; + case ATH_ROC_COMPLETE_CANCEL: + ath_dbg(common, CHAN_CTX, "RoC canceled\n"); + break; + } - sc->offchannel.roc_vif = NULL; - sc->offchannel.roc_chan = NULL; - ieee80211_remain_on_channel_expired(sc->hw); ath_offchannel_next(sc); ath9k_ps_restore(sc); } @@ -1058,7 +1067,7 @@ static void ath_offchannel_timer(unsigned long data) case ATH_OFFCHANNEL_ROC_START: case ATH_OFFCHANNEL_ROC_WAIT: sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE; - ath_roc_complete(sc, false); + ath_roc_complete(sc, ATH_ROC_COMPLETE_EXPIRE); break; default: break; diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index dbf8f4959..da32c8faa 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -765,6 +765,8 @@ static int read_file_reset(struct seq_file *file, void *data) [RESET_TYPE_BEACON_STUCK] = "Stuck Beacon", [RESET_TYPE_MCI] = "MCI Reset", [RESET_TYPE_CALIBRATION] = "Calibration error", + [RESET_TX_DMA_ERROR] = "Tx DMA stop error", + [RESET_RX_DMA_ERROR] = "Rx DMA stop error", }; int i; diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index a8e931995..cd68c5f0e 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -50,6 +50,8 @@ enum ath_reset_type { RESET_TYPE_BEACON_STUCK, RESET_TYPE_MCI, RESET_TYPE_CALIBRATION, + RESET_TX_DMA_ERROR, + RESET_RX_DMA_ERROR, __RESET_TYPE_MAX }; diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c index ffca918ff..c2ca57a2e 100644 --- a/drivers/net/wireless/ath/ath9k/debug_sta.c +++ b/drivers/net/wireless/ath/ath9k/debug_sta.c @@ -26,12 +26,11 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf, struct ath_node *an = file->private_data; struct ath_softc *sc = an->sc; struct ath_atx_tid *tid; - struct ath_atx_ac *ac; struct ath_txq *txq; u32 len = 0, size = 4096; char *buf; size_t retval; - int tidno, acno; + int tidno; buf = kzalloc(size, GFP_KERNEL); if (buf == NULL) @@ -48,19 +47,6 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf, len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n", an->mpdudensity); - len += scnprintf(buf + len, size - len, - "%2s%7s\n", "AC", "SCHED"); - - for (acno = 0, ac = &an->ac[acno]; - acno < IEEE80211_NUM_ACS; acno++, ac++) { - txq = ac->txq; - ath_txq_lock(sc, txq); - len += scnprintf(buf + len, size - len, - "%2d%7d\n", - acno, ac->sched); - ath_txq_unlock(sc, txq); - } - len += scnprintf(buf + len, size - len, "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n", "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE", @@ -68,7 +54,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf, for (tidno = 0, tid = &an->tid[tidno]; tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { - txq = tid->ac->txq; + txq = tid->txq; ath_txq_lock(sc, txq); if (tid->active) { len += scnprintf(buf + len, size - len, @@ -80,7 +66,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf, tid->baw_head, tid->baw_tail, tid->bar_index, - tid->sched); + !list_empty(&tid->list)); } ath_txq_unlock(sc, txq); } diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c index e98a9eaba..1ece42c24 100644 --- a/drivers/net/wireless/ath/ath9k/dfs.c +++ b/drivers/net/wireless/ath/ath9k/dfs.c @@ -30,6 +30,157 @@ struct ath_radar_data { u8 pulse_length_pri; }; +/**** begin: CHIRP ************************************************************/ + +/* min and max gradients for defined FCC chirping pulses, given by + * - 20MHz chirp width over a pulse width of 50us + * - 5MHz chirp width over a pulse width of 100us + */ +static const int BIN_DELTA_MIN = 1; +static const int BIN_DELTA_MAX = 10; + +/* we need at least 3 deltas / 4 samples for a reliable chirp detection */ +#define NUM_DIFFS 3 +static const int FFT_NUM_SAMPLES = (NUM_DIFFS + 1); + +/* Threshold for difference of delta peaks */ +static const int MAX_DIFF = 2; + +/* width range to be checked for chirping */ +static const int MIN_CHIRP_PULSE_WIDTH = 20; +static const int MAX_CHIRP_PULSE_WIDTH = 110; + +struct ath9k_dfs_fft_20 { + u8 bin[28]; + u8 lower_bins[3]; +} __packed; +struct ath9k_dfs_fft_40 { + u8 bin[64]; + u8 lower_bins[3]; + u8 upper_bins[3]; +} __packed; + +static inline int fft_max_index(u8 *bins) +{ + return (bins[2] & 0xfc) >> 2; +} +static inline int fft_max_magnitude(u8 *bins) +{ + return (bins[0] & 0xc0) >> 6 | bins[1] << 2 | (bins[2] & 0x03) << 10; +} +static inline u8 fft_bitmap_weight(u8 *bins) +{ + return bins[0] & 0x3f; +} + +static int ath9k_get_max_index_ht40(struct ath9k_dfs_fft_40 *fft, + bool is_ctl, bool is_ext) +{ + const int DFS_UPPER_BIN_OFFSET = 64; + /* if detected radar on both channels, select the significant one */ + if (is_ctl && is_ext) { + /* first check wether channels have 'strong' bins */ + is_ctl = fft_bitmap_weight(fft->lower_bins) != 0; + is_ext = fft_bitmap_weight(fft->upper_bins) != 0; + + /* if still unclear, take higher magnitude */ + if (is_ctl && is_ext) { + int mag_lower = fft_max_magnitude(fft->lower_bins); + int mag_upper = fft_max_magnitude(fft->upper_bins); + if (mag_upper > mag_lower) + is_ctl = false; + else + is_ext = false; + } + } + if (is_ctl) + return fft_max_index(fft->lower_bins); + return fft_max_index(fft->upper_bins) + DFS_UPPER_BIN_OFFSET; +} +static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data, + int datalen, bool is_ctl, bool is_ext) +{ + int i; + int max_bin[FFT_NUM_SAMPLES]; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + int prev_delta; + + if (IS_CHAN_HT40(ah->curchan)) { + struct ath9k_dfs_fft_40 *fft = (struct ath9k_dfs_fft_40 *) data; + int num_fft_packets = datalen / sizeof(*fft); + if (num_fft_packets == 0) + return false; + + ath_dbg(common, DFS, "HT40: datalen=%d, num_fft_packets=%d\n", + datalen, num_fft_packets); + if (num_fft_packets < (FFT_NUM_SAMPLES)) { + ath_dbg(common, DFS, "not enough packets for chirp\n"); + return false; + } + /* HW sometimes adds 2 garbage bytes in front of FFT samples */ + if ((datalen % sizeof(*fft)) == 2) { + fft = (struct ath9k_dfs_fft_40 *) (data + 2); + ath_dbg(common, DFS, "fixing datalen by 2\n"); + } + if (IS_CHAN_HT40MINUS(ah->curchan)) { + int temp = is_ctl; + is_ctl = is_ext; + is_ext = temp; + } + for (i = 0; i < FFT_NUM_SAMPLES; i++) + max_bin[i] = ath9k_get_max_index_ht40(fft + i, is_ctl, + is_ext); + } else { + struct ath9k_dfs_fft_20 *fft = (struct ath9k_dfs_fft_20 *) data; + int num_fft_packets = datalen / sizeof(*fft); + if (num_fft_packets == 0) + return false; + ath_dbg(common, DFS, "HT20: datalen=%d, num_fft_packets=%d\n", + datalen, num_fft_packets); + if (num_fft_packets < (FFT_NUM_SAMPLES)) { + ath_dbg(common, DFS, "not enough packets for chirp\n"); + return false; + } + /* in ht20, this is a 6-bit signed number => shift it to 0 */ + for (i = 0; i < FFT_NUM_SAMPLES; i++) + max_bin[i] = fft_max_index(fft[i].lower_bins) ^ 0x20; + } + ath_dbg(common, DFS, "bin_max = [%d, %d, %d, %d]\n", + max_bin[0], max_bin[1], max_bin[2], max_bin[3]); + + /* Check for chirp attributes within specs + * a) delta of adjacent max_bins is within range + * b) delta of adjacent deltas are within tolerance + */ + prev_delta = 0; + for (i = 0; i < NUM_DIFFS; i++) { + int ddelta = -1; + int delta = max_bin[i + 1] - max_bin[i]; + + /* ensure gradient is within valid range */ + if (abs(delta) < BIN_DELTA_MIN || abs(delta) > BIN_DELTA_MAX) { + ath_dbg(common, DFS, "CHIRP: invalid delta %d " + "in sample %d\n", delta, i); + return false; + } + if (i == 0) + goto done; + ddelta = delta - prev_delta; + if (abs(ddelta) > MAX_DIFF) { + ath_dbg(common, DFS, "CHIRP: ddelta %d too high\n", + ddelta); + return false; + } +done: + ath_dbg(common, DFS, "CHIRP - %d: delta=%d, ddelta=%d\n", + i, delta, ddelta); + prev_delta = delta; + } + return true; +} +/**** end: CHIRP **************************************************************/ + /* convert pulse duration to usecs, considering clock mode */ static u32 dur_to_usecs(struct ath_hw *ah, u32 dur) { @@ -113,12 +264,6 @@ ath9k_postprocess_radar_event(struct ath_softc *sc, return false; } - /* - * TODO: check chirping pulses - * checks for chirping are dependent on the DFS regulatory domain - * used, which is yet TBD - */ - /* convert duration to usecs */ pe->width = dur_to_usecs(sc->sc_ah, dur); pe->rssi = rssi; @@ -190,6 +335,16 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, if (!ath9k_postprocess_radar_event(sc, &ard, &pe)) return; + if (pe.width > MIN_CHIRP_PULSE_WIDTH && + pe.width < MAX_CHIRP_PULSE_WIDTH) { + bool is_ctl = !!(ard.pulse_bw_info & PRI_CH_RADAR_FOUND); + bool is_ext = !!(ard.pulse_bw_info & EXT_CH_RADAR_FOUND); + int clen = datalen - 3; + pe.chirp = ath9k_check_chirping(sc, data, clen, is_ctl, is_ext); + } else { + pe.chirp = false; + } + ath_dbg(common, DFS, "ath9k_dfs_process_phyerr: type=%d, freq=%d, ts=%llu, " "width=%d, rssi=%d, delta_ts=%llu\n", @@ -198,7 +353,8 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, sc->dfs_prev_pulse_ts = pe.ts; if (ard.pulse_bw_info & PRI_CH_RADAR_FOUND) ath9k_dfs_process_radar_pulse(sc, &pe); - if (ard.pulse_bw_info & EXT_CH_RADAR_FOUND) { + if (IS_CHAN_HT40(ah->curchan) && + ard.pulse_bw_info & EXT_CH_RADAR_FOUND) { pe.freq += IS_CHAN_HT40PLUS(ah->curchan) ? 20 : -20; ath9k_dfs_process_radar_pulse(sc, &pe); } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 39eaf9b6e..1e84882f8 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -74,7 +74,7 @@ static struct ath_ps_ops ath9k_htc_ps_ops = { static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv) { - int time_left; + unsigned long time_left; if (atomic_read(&priv->htc->tgt_ready) > 0) { atomic_dec(&priv->htc->tgt_ready); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index dab1323df..172a9ff4a 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -794,8 +794,11 @@ void ath9k_htc_ani_work(struct work_struct *work) common->ani.longcal_timer = timestamp; } - /* Short calibration applies only while caldone is false */ - if (!common->ani.caldone) { + /* + * Short calibration applies only while caldone + * is false or -ETIMEDOUT + */ + if (common->ani.caldone <= 0) { if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) { shortcal = true; @@ -844,7 +847,11 @@ set_timer: */ cal_interval = ATH_LONG_CALINTERVAL; cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL); - if (!common->ani.caldone) + /* + * Short calibration applies only while caldone + * is false or -ETIMEDOUT + */ + if (common->ani.caldone <= 0) cal_interval = min(cal_interval, (u32)short_cal_interval); ieee80211_queue_delayed_work(common->hw, &priv->ani_work, diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index d2408da38..2294709ee 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -146,7 +146,8 @@ static int htc_config_pipe_credits(struct htc_target *target) { struct sk_buff *skb; struct htc_config_pipe_msg *cp_msg; - int ret, time_left; + int ret; + unsigned long time_left; skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC); if (!skb) { @@ -184,7 +185,8 @@ static int htc_setup_complete(struct htc_target *target) { struct sk_buff *skb; struct htc_comp_msg *comp_msg; - int ret = 0, time_left; + int ret = 0; + unsigned long time_left; skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC); if (!skb) { @@ -236,7 +238,8 @@ int htc_connect_service(struct htc_target *target, struct sk_buff *skb; struct htc_endpoint *endpoint; struct htc_conn_svc_msg *conn_msg; - int ret, time_left; + int ret; + unsigned long time_left; /* Find an available endpoint */ endpoint = get_next_avail_ep(target->endpoint); diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index a31a6804d..1dd0339de 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -3186,6 +3186,7 @@ static struct { { AR_SREV_VERSION_9550, "9550" }, { AR_SREV_VERSION_9565, "9565" }, { AR_SREV_VERSION_9531, "9531" }, + { AR_SREV_VERSION_9561, "9561" }, }; /* For devices with external radios */ diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index eff0e5325..90eb75012 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -736,13 +736,14 @@ static const struct ieee80211_iface_limit if_limits_multi[] = { BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) }, { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) }, + { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) }, }; static const struct ieee80211_iface_combination if_comb_multi[] = { { .limits = if_limits_multi, .n_limits = ARRAY_SIZE(if_limits_multi), - .max_interfaces = 2, + .max_interfaces = 3, .num_different_channels = 2, .beacon_int_infra_match = true, }, @@ -826,6 +827,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING); + ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); if (ath9k_ps_enable) ieee80211_hw_set(hw, SUPPORTS_PS); @@ -855,6 +857,10 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) BIT(NL80211_IFTYPE_MESH_POINT) | BIT(NL80211_IFTYPE_WDS); + if (ath9k_is_chanctx_enabled()) + hw->wiphy->interface_modes |= + BIT(NL80211_IFTYPE_P2P_DEVICE); + hw->wiphy->iface_combinations = if_comb; hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); } @@ -874,6 +880,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->max_rate_tries = 10; hw->sta_data_size = sizeof(struct ath_node); hw->vif_data_size = sizeof(struct ath_vif); + hw->extra_tx_headroom = 4; hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1; hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1; diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c index 90631d768..5ad0feeeb 100644 --- a/drivers/net/wireless/ath/ath9k/link.c +++ b/drivers/net/wireless/ath/ath9k/link.c @@ -172,7 +172,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ath_tx_control txctl; - int time_left; + unsigned long time_left; memset(&txctl, 0, sizeof(txctl)); txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE]; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index cfd45cb8c..c27143ba9 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1459,13 +1459,18 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw, u64 multicast) { struct ath_softc *sc = hw->priv; + struct ath_chanctx *ctx; u32 rfilt; changed_flags &= SUPPORTED_FILTERS; *total_flags &= SUPPORTED_FILTERS; spin_lock_bh(&sc->chan_lock); - sc->cur_chan->rxfilter = *total_flags; + ath_for_each_chanctx(sc, ctx) + ctx->rxfilter = *total_flags; +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT + sc->offchannel.chan.rxfilter = *total_flags; +#endif spin_unlock_bh(&sc->chan_lock); ath9k_ps_wakeup(sc); @@ -2246,7 +2251,7 @@ static void ath9k_cancel_pending_offchannel(struct ath_softc *sc) del_timer_sync(&sc->offchannel.timer); if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START) - ath_roc_complete(sc, true); + ath_roc_complete(sc, ATH_ROC_COMPLETE_ABORT); } if (test_bit(ATH_OP_SCANNING, &common->op_flags)) { @@ -2355,7 +2360,7 @@ static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw) if (sc->offchannel.roc_vif) { if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START) - ath_roc_complete(sc, true); + ath_roc_complete(sc, ATH_ROC_COMPLETE_CANCEL); } mutex_unlock(&sc->mutex); diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 6c75fb1ab..d3189daf9 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -491,10 +491,9 @@ bool ath_stoprecv(struct ath_softc *sc) if (!(ah->ah_flags & AH_UNPLUGGED) && unlikely(!stopped)) { - ath_err(ath9k_hw_common(sc->sc_ah), - "Could not stop RX, we could be " - "confusing the DMA engine when we start RX up\n"); - ATH_DBG_WARN_ON_ONCE(!stopped); + ath_dbg(ath9k_hw_common(sc->sc_ah), RESET, + "Failed to stop Rx DMA\n"); + RESET_STAT_INC(sc, RESET_RX_DMA_ERROR); } return stopped && !reset; } diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index ca533b432..9c16e2a6d 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -299,7 +299,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, sizeof(struct wmi_cmd_hdr); struct sk_buff *skb; u8 *data; - int time_left, ret = 0; + unsigned long time_left; + int ret = 0; if (ah->ah_flags & AH_UNPLUGGED) return 0; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 3ad79bb4f..3e3dac3d7 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -106,7 +106,6 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq, struct ath_atx_tid *tid) { - struct ath_atx_ac *ac = tid->ac; struct list_head *list; struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv; struct ath_chanctx *ctx = avp->chanctx; @@ -114,19 +113,9 @@ static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq, if (!ctx) return; - if (tid->sched) - return; - - tid->sched = true; - list_add_tail(&tid->list, &ac->tid_q); - - if (ac->sched) - return; - - ac->sched = true; - list = &ctx->acq[TID_TO_WME_AC(tid->tidno)]; - list_add_tail(&ac->list, list); + if (list_empty(&tid->list)) + list_add_tail(&tid->list, list); } static struct ath_frame_info *get_frame_info(struct sk_buff *skb) @@ -208,7 +197,7 @@ static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid) static void ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid) { - struct ath_txq *txq = tid->ac->txq; + struct ath_txq *txq = tid->txq; struct ieee80211_tx_info *tx_info; struct sk_buff *skb, *tskb; struct ath_buf *bf; @@ -237,7 +226,7 @@ ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid) static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) { - struct ath_txq *txq = tid->ac->txq; + struct ath_txq *txq = tid->txq; struct sk_buff *skb; struct ath_buf *bf; struct list_head bf_head; @@ -644,7 +633,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ath_tx_queue_tid(sc, txq, tid); if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY)) - tid->ac->clear_ps_filter = true; + tid->clear_ps_filter = true; } } @@ -734,7 +723,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, struct ieee80211_tx_rate *rates; u32 max_4ms_framelen, frmlen; u16 aggr_limit, bt_aggr_limit, legacy = 0; - int q = tid->ac->txq->mac80211_qnum; + int q = tid->txq->mac80211_qnum; int i; skb = bf->bf_mpdu; @@ -1471,8 +1460,8 @@ static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, if (list_empty(&bf_q)) return false; - if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) { - tid->ac->clear_ps_filter = false; + if (tid->clear_ps_filter || tid->an->no_ps_filter) { + tid->clear_ps_filter = false; tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; } @@ -1491,7 +1480,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, an = (struct ath_node *)sta->drv_priv; txtid = ATH_AN_2_TID(an, tid); - txq = txtid->ac->txq; + txq = txtid->txq; ath_txq_lock(sc, txq); @@ -1525,7 +1514,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) { struct ath_node *an = (struct ath_node *)sta->drv_priv; struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); - struct ath_txq *txq = txtid->ac->txq; + struct ath_txq *txq = txtid->txq; ath_txq_lock(sc, txq); txtid->active = false; @@ -1538,7 +1527,6 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, struct ath_node *an) { struct ath_atx_tid *tid; - struct ath_atx_ac *ac; struct ath_txq *txq; bool buffered; int tidno; @@ -1546,25 +1534,18 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, for (tidno = 0, tid = &an->tid[tidno]; tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { - ac = tid->ac; - txq = ac->txq; + txq = tid->txq; ath_txq_lock(sc, txq); - if (!tid->sched) { + if (list_empty(&tid->list)) { ath_txq_unlock(sc, txq); continue; } buffered = ath_tid_has_buffered(tid); - tid->sched = false; - list_del(&tid->list); - - if (ac->sched) { - ac->sched = false; - list_del(&ac->list); - } + list_del_init(&tid->list); ath_txq_unlock(sc, txq); @@ -1575,18 +1556,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) { struct ath_atx_tid *tid; - struct ath_atx_ac *ac; struct ath_txq *txq; int tidno; for (tidno = 0, tid = &an->tid[tidno]; tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { - ac = tid->ac; - txq = ac->txq; + txq = tid->txq; ath_txq_lock(sc, txq); - ac->clear_ps_filter = true; + tid->clear_ps_filter = true; if (ath_tid_has_buffered(tid)) { ath_tx_queue_tid(sc, txq, tid); @@ -1606,7 +1585,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, an = (struct ath_node *)sta->drv_priv; tid = ATH_AN_2_TID(an, tidno); - txq = tid->ac->txq; + txq = tid->txq; ath_txq_lock(sc, txq); @@ -1645,7 +1624,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, tid = ATH_AN_2_TID(an, i); - ath_txq_lock(sc, tid->ac->txq); + ath_txq_lock(sc, tid->txq); while (nframes > 0) { bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q); if (!bf) @@ -1669,7 +1648,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, if (an->sta && !ath_tid_has_buffered(tid)) ieee80211_sta_set_buffered(an->sta, i, false); } - ath_txq_unlock_complete(sc, tid->ac->txq); + ath_txq_unlock_complete(sc, tid->txq); } if (list_empty(&bf_q)) @@ -1883,8 +1862,11 @@ bool ath_drain_all_txq(struct ath_softc *sc) npend |= BIT(i); } - if (npend) - ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend); + if (npend) { + RESET_STAT_INC(sc, RESET_TX_DMA_ERROR); + ath_dbg(common, RESET, + "Failed to stop TX DMA, queues=0x%03x!\n", npend); + } for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { if (!ATH_TXQ_SETUP(sc, i)) @@ -1915,9 +1897,8 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath_atx_ac *ac, *last_ac; struct ath_atx_tid *tid, *last_tid; - struct list_head *ac_list; + struct list_head *tid_list; bool sent = false; if (txq->mac80211_qnum < 0) @@ -1927,63 +1908,45 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) return; spin_lock_bh(&sc->chan_lock); - ac_list = &sc->cur_chan->acq[txq->mac80211_qnum]; + tid_list = &sc->cur_chan->acq[txq->mac80211_qnum]; - if (list_empty(ac_list)) { + if (list_empty(tid_list)) { spin_unlock_bh(&sc->chan_lock); return; } rcu_read_lock(); - last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list); - while (!list_empty(ac_list)) { + last_tid = list_entry(tid_list->prev, struct ath_atx_tid, list); + while (!list_empty(tid_list)) { bool stop = false; if (sc->cur_chan->stopped) break; - ac = list_first_entry(ac_list, struct ath_atx_ac, list); - last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list); - list_del(&ac->list); - ac->sched = false; - - while (!list_empty(&ac->tid_q)) { - - tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, - list); - list_del(&tid->list); - tid->sched = false; - - if (ath_tx_sched_aggr(sc, txq, tid, &stop)) - sent = true; - - /* - * add tid to round-robin queue if more frames - * are pending for the tid - */ - if (ath_tid_has_buffered(tid)) - ath_tx_queue_tid(sc, txq, tid); + tid = list_first_entry(tid_list, struct ath_atx_tid, list); + list_del_init(&tid->list); - if (stop || tid == last_tid) - break; - } + if (ath_tx_sched_aggr(sc, txq, tid, &stop)) + sent = true; - if (!list_empty(&ac->tid_q) && !ac->sched) { - ac->sched = true; - list_add_tail(&ac->list, ac_list); - } + /* + * add tid to round-robin queue if more frames + * are pending for the tid + */ + if (ath_tid_has_buffered(tid)) + ath_tx_queue_tid(sc, txq, tid); if (stop) break; - if (ac == last_ac) { + if (tid == last_tid) { if (!sent) break; sent = false; - last_ac = list_entry(ac_list->prev, - struct ath_atx_ac, list); + last_tid = list_entry(tid_list->prev, + struct ath_atx_tid, list); } } @@ -2373,10 +2336,10 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, txq = sc->tx.uapsdq; ath_txq_lock(sc, txq); } else if (txctl->an && queue) { - WARN_ON(tid->ac->txq != txctl->txq); + WARN_ON(tid->txq != txctl->txq); if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) - tid->ac->clear_ps_filter = true; + tid->clear_ps_filter = true; /* * Add this frame to software queue for scheduling later @@ -2470,8 +2433,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif, bf = list_first_entry(&bf_q, struct ath_buf, list); hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data; - if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) { - hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA; + if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) { + hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA); dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, sizeof(*hdr), DMA_TO_DEVICE); } @@ -2870,7 +2833,6 @@ int ath_tx_init(struct ath_softc *sc, int nbufs) void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) { struct ath_atx_tid *tid; - struct ath_atx_ac *ac; int tidno, acno; for (tidno = 0, tid = &an->tid[tidno]; @@ -2881,26 +2843,18 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) tid->seq_start = tid->seq_next = 0; tid->baw_size = WME_MAX_BA; tid->baw_head = tid->baw_tail = 0; - tid->sched = false; tid->active = false; + tid->clear_ps_filter = true; __skb_queue_head_init(&tid->buf_q); __skb_queue_head_init(&tid->retry_q); + INIT_LIST_HEAD(&tid->list); acno = TID_TO_WME_AC(tidno); - tid->ac = &an->ac[acno]; - } - - for (acno = 0, ac = &an->ac[acno]; - acno < IEEE80211_NUM_ACS; acno++, ac++) { - ac->sched = false; - ac->clear_ps_filter = true; - ac->txq = sc->tx.txq_map[acno]; - INIT_LIST_HEAD(&ac->tid_q); + tid->txq = sc->tx.txq_map[acno]; } } void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) { - struct ath_atx_ac *ac; struct ath_atx_tid *tid; struct ath_txq *txq; int tidno; @@ -2908,20 +2862,12 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) for (tidno = 0, tid = &an->tid[tidno]; tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { - ac = tid->ac; - txq = ac->txq; + txq = tid->txq; ath_txq_lock(sc, txq); - if (tid->sched) { - list_del(&tid->list); - tid->sched = false; - } - - if (ac->sched) { - list_del(&ac->list); - tid->ac->sched = false; - } + if (!list_empty(&tid->list)) + list_del_init(&tid->list); ath_tid_drain(sc, txq, tid); tid->active = false; diff --git a/drivers/net/wireless/ath/debug.c b/drivers/net/wireless/ath/debug.c index 508eccf5d..d59d83e0c 100644 --- a/drivers/net/wireless/ath/debug.c +++ b/drivers/net/wireless/ath/debug.c @@ -40,6 +40,8 @@ const char *ath_opmode_to_string(enum nl80211_iftype opmode) return "P2P-CLIENT"; case NL80211_IFTYPE_P2P_GO: return "P2P-GO"; + case NL80211_IFTYPE_OCB: + return "OCB"; default: return "UNKNOWN"; } diff --git a/drivers/net/wireless/ath/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c index 1b5ad1965..cc5c592fc 100644 --- a/drivers/net/wireless/ath/dfs_pri_detector.c +++ b/drivers/net/wireless/ath/dfs_pri_detector.c @@ -273,7 +273,7 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde, tmp_false_count++; } } - if (ps.count < min_count) + if (ps.count <= min_count) /* did not reach minimum count, drop sequence */ continue; diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile index 050506f84..64b432625 100644 --- a/drivers/net/wireless/ath/wil6210/Makefile +++ b/drivers/net/wireless/ath/wil6210/Makefile @@ -12,6 +12,7 @@ wil6210-y += debug.o wil6210-y += rx_reorder.o wil6210-y += ioctl.o wil6210-y += fw.o +wil6210-y += pm.o wil6210-y += pmc.o wil6210-$(CONFIG_WIL6210_TRACING) += trace.o wil6210-y += wil_platform.o diff --git a/drivers/net/wireless/ath/wil6210/boot_loader.h b/drivers/net/wireless/ath/wil6210/boot_loader.h new file mode 100644 index 000000000..c131b5e12 --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/boot_loader.h @@ -0,0 +1,61 @@ +/* Copyright (c) 2015 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* This file contains the definitions for the boot loader + * for the Qualcomm "Sparrow" 60 Gigabit wireless solution. + */ +#ifndef BOOT_LOADER_EXPORT_H_ +#define BOOT_LOADER_EXPORT_H_ + +struct bl_dedicated_registers_v1 { + __le32 boot_loader_ready; /* 0x880A3C driver will poll + * this Dword until BL will + * set it to 1 (initial value + * should be 0) + */ + __le32 boot_loader_struct_version; /* 0x880A40 BL struct ver. */ + __le16 rf_type; /* 0x880A44 connected RF ID */ + __le16 rf_status; /* 0x880A46 RF status, + * 0 is OK else error + */ + __le32 baseband_type; /* 0x880A48 board type ID */ + u8 mac_address[6]; /* 0x880A4c BL mac address */ + u8 bl_version_major; /* 0x880A52 BL ver. major */ + u8 bl_version_minor; /* 0x880A53 BL ver. minor */ + __le16 bl_version_subminor; /* 0x880A54 BL ver. subminor */ + __le16 bl_version_build; /* 0x880A56 BL ver. build */ + /* valid only for version 2 and above */ + __le32 bl_assert_code; /* 0x880A58 BL Assert code */ + __le32 bl_assert_blink; /* 0x880A5C BL Assert Branch */ + __le32 bl_reserved[22]; /* 0x880A60 - 0x880AB4 */ + __le32 bl_magic_number; /* 0x880AB8 BL Magic number */ +} __packed; + +/* the following struct is the version 0 struct */ + +struct bl_dedicated_registers_v0 { + __le32 boot_loader_ready; /* 0x880A3C driver will poll + * this Dword until BL will + * set it to 1 (initial value + * should be 0) + */ +#define BL_READY (1) /* ready indication */ + __le32 boot_loader_struct_version; /* 0x880A40 BL struct ver. */ + __le32 rf_type; /* 0x880A44 connected RF ID */ + __le32 baseband_type; /* 0x880A48 board type ID */ + u8 mac_address[6]; /* 0x880A4c BL mac address */ +} __packed; + +#endif /* BOOT_LOADER_EXPORT_H_ */ diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index c79cfe02e..20d07ef67 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -336,12 +336,9 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, else wil_dbg_misc(wil, "Scan has no IE's\n"); - rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, - request->ie); - if (rc) { - wil_err(wil, "Aborting scan, set_ie failed: %d\n", rc); + rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie); + if (rc) goto out; - } rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) + cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0])); @@ -462,10 +459,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, * ies in FW. */ rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie); - if (rc) { - wil_err(wil, "WMI_SET_APPIE_CMD failed\n"); + if (rc) goto out; - } /* WMI_CONNECT_CMD */ memset(&conn, 0, sizeof(conn)); @@ -722,17 +717,98 @@ static int wil_fix_bcon(struct wil6210_priv *wil, { struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); - int rc = 0; if (bcon->probe_resp_len <= hlen) return 0; +/* always use IE's from full probe frame, they has more info + * notable RSN + */ + bcon->proberesp_ies = f->u.probe_resp.variable; + bcon->proberesp_ies_len = bcon->probe_resp_len - hlen; if (!bcon->assocresp_ies) { - bcon->assocresp_ies = f->u.probe_resp.variable; - bcon->assocresp_ies_len = bcon->probe_resp_len - hlen; - rc = 1; + bcon->assocresp_ies = bcon->proberesp_ies; + bcon->assocresp_ies_len = bcon->proberesp_ies_len; } + return 1; +} + +/* internal functions for device reset and starting AP */ +static int _wil_cfg80211_set_ies(struct wiphy *wiphy, + struct cfg80211_beacon_data *bcon) +{ + int rc; + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len, + bcon->proberesp_ies); + if (rc) + return rc; + + rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len, + bcon->assocresp_ies); +#if 0 /* to use beacon IE's, remove this #if 0 */ + if (rc) + return rc; + + rc = wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->tail_len, bcon->tail); +#endif + + return rc; +} + +static int _wil_cfg80211_start_ap(struct wiphy *wiphy, + struct net_device *ndev, + const u8 *ssid, size_t ssid_len, u32 privacy, + int bi, u8 chan, + struct cfg80211_beacon_data *bcon, + u8 hidden_ssid) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + int rc; + struct wireless_dev *wdev = ndev->ieee80211_ptr; + u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); + + wil_set_recovery_state(wil, fw_recovery_idle); + + mutex_lock(&wil->mutex); + + __wil_down(wil); + rc = __wil_up(wil); + if (rc) + goto out; + + rc = wmi_set_ssid(wil, ssid_len, ssid); + if (rc) + goto out; + + rc = _wil_cfg80211_set_ies(wiphy, bcon); + if (rc) + goto out; + + wil->privacy = privacy; + wil->channel = chan; + wil->hidden_ssid = hidden_ssid; + + netif_carrier_on(ndev); + + rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid); + if (rc) + goto err_pcp_start; + + rc = wil_bcast_init(wil); + if (rc) + goto err_bcast; + + goto out; /* success */ + +err_bcast: + wmi_pcp_stop(wil); +err_pcp_start: + netif_carrier_off(ndev); +out: + mutex_unlock(&wil->mutex); return rc; } @@ -741,63 +817,50 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, struct cfg80211_beacon_data *bcon) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; - size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); - const u8 *pr_ies = NULL; - size_t pr_ies_len = 0; int rc; + u32 privacy = 0; wil_dbg_misc(wil, "%s()\n", __func__); wil_print_bcon_data(bcon); - if (bcon->probe_resp_len > hlen) { - pr_ies = f->u.probe_resp.variable; - pr_ies_len = bcon->probe_resp_len - hlen; - } - if (wil_fix_bcon(wil, bcon)) { wil_dbg_misc(wil, "Fixed bcon\n"); wil_print_bcon_data(bcon); } - /* FW do not form regular beacon, so bcon IE's are not set - * For the DMG bcon, when it will be supported, bcon IE's will - * be reused; add something like: - * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len, - * bcon->beacon_ies); - */ - rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies); - if (rc) { - wil_err(wil, "set_ie(PROBE_RESP) failed\n"); - return rc; - } + if (bcon->proberesp_ies && + cfg80211_find_ie(WLAN_EID_RSN, bcon->proberesp_ies, + bcon->proberesp_ies_len)) + privacy = 1; - rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, - bcon->assocresp_ies_len, - bcon->assocresp_ies); - if (rc) { - wil_err(wil, "set_ie(ASSOC_RESP) failed\n"); - return rc; + /* in case privacy has changed, need to restart the AP */ + if (wil->privacy != privacy) { + struct wireless_dev *wdev = ndev->ieee80211_ptr; + + wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n", + wil->privacy, privacy); + + rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid, + wdev->ssid_len, privacy, + wdev->beacon_interval, + wil->channel, bcon, + wil->hidden_ssid); + } else { + rc = _wil_cfg80211_set_ies(wiphy, bcon); } - return 0; + return rc; } static int wil_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_ap_settings *info) { - int rc = 0; + int rc; struct wil6210_priv *wil = wiphy_to_wil(wiphy); - struct wireless_dev *wdev = ndev->ieee80211_ptr; struct ieee80211_channel *channel = info->chandef.chan; struct cfg80211_beacon_data *bcon = &info->beacon; struct cfg80211_crypto_settings *crypto = &info->crypto; - u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); - struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; - size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); - const u8 *pr_ies = NULL; - size_t pr_ies_len = 0; u8 hidden_ssid; wil_dbg_misc(wil, "%s()\n", __func__); @@ -807,6 +870,23 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, return -EINVAL; } + switch (info->hidden_ssid) { + case NL80211_HIDDEN_SSID_NOT_IN_USE: + hidden_ssid = WMI_HIDDEN_SSID_DISABLED; + break; + + case NL80211_HIDDEN_SSID_ZERO_LEN: + hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY; + break; + + case NL80211_HIDDEN_SSID_ZERO_CONTENTS: + hidden_ssid = WMI_HIDDEN_SSID_CLEAR; + break; + + default: + wil_err(wil, "AP: Invalid hidden SSID %d\n", info->hidden_ssid); + return -EOPNOTSUPP; + } wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value, channel->center_freq, info->privacy ? "secure" : "open"); wil_dbg_misc(wil, "Privacy: %d auth_type %d\n", @@ -820,80 +900,16 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, wil_print_bcon_data(bcon); wil_print_crypto(wil, crypto); - if (bcon->probe_resp_len > hlen) { - pr_ies = f->u.probe_resp.variable; - pr_ies_len = bcon->probe_resp_len - hlen; - } - if (wil_fix_bcon(wil, bcon)) { wil_dbg_misc(wil, "Fixed bcon\n"); wil_print_bcon_data(bcon); } - wil_set_recovery_state(wil, fw_recovery_idle); - - mutex_lock(&wil->mutex); - - __wil_down(wil); - rc = __wil_up(wil); - if (rc) - goto out; - - rc = wmi_set_ssid(wil, info->ssid_len, info->ssid); - if (rc) - goto out; - - /* IE's */ - /* bcon 'head IE's are not relevant for 60g band */ - /* - * FW do not form regular beacon, so bcon IE's are not set - * For the DMG bcon, when it will be supported, bcon IE's will - * be reused; add something like: - * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len, - * bcon->beacon_ies); - */ - wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies); - wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len, - bcon->assocresp_ies); - - wil->privacy = info->privacy; - - switch (info->hidden_ssid) { - case NL80211_HIDDEN_SSID_NOT_IN_USE: - hidden_ssid = WMI_HIDDEN_SSID_DISABLED; - break; - - case NL80211_HIDDEN_SSID_ZERO_LEN: - hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY; - break; - - case NL80211_HIDDEN_SSID_ZERO_CONTENTS: - hidden_ssid = WMI_HIDDEN_SSID_CLEAR; - break; - - default: - rc = -EOPNOTSUPP; - goto out; - } - - netif_carrier_on(ndev); - - rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype, - channel->hw_value, hidden_ssid); - if (rc) - goto err_pcp_start; + rc = _wil_cfg80211_start_ap(wiphy, ndev, + info->ssid, info->ssid_len, info->privacy, + info->beacon_interval, channel->hw_value, + bcon, hidden_ssid); - rc = wil_bcast_init(wil); - if (rc) - goto err_bcast; - - goto out; /* success */ -err_bcast: - wmi_pcp_stop(wil); -err_pcp_start: - netif_carrier_off(ndev); -out: - mutex_unlock(&wil->mutex); return rc; } diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 75219a1b8..d1a1e160e 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -62,7 +62,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil, seq_printf(s, " swhead = %d\n", vring->swhead); seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail); if (x) { - v = ioread32(x); + v = readl(x); seq_printf(s, "0x%08x = %d\n", v, v); } else { seq_puts(s, "???\n"); @@ -156,6 +156,12 @@ static const struct file_operations fops_vring = { .llseek = seq_lseek, }; +static void wil_seq_hexdump(struct seq_file *s, void *p, int len, + const char *prefix) +{ + seq_hex_dump(s, prefix, DUMP_PREFIX_NONE, 16, 1, p, len, false); +} + static void wil_print_ring(struct seq_file *s, const char *prefix, void __iomem *off) { @@ -212,8 +218,6 @@ static void wil_print_ring(struct seq_file *s, const char *prefix, le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type), hdr.flags); if (len <= MAX_MBOXITEM_SIZE) { - int n = 0; - char printbuf[16 * 3 + 2]; unsigned char databuf[MAX_MBOXITEM_SIZE]; void __iomem *src = wmi_buffer(wil, d.addr) + sizeof(struct wil6210_mbox_hdr); @@ -223,16 +227,7 @@ static void wil_print_ring(struct seq_file *s, const char *prefix, * reading header */ wil_memcpy_fromio_32(databuf, src, len); - while (n < len) { - int l = min(len - n, 16); - - hex_dump_to_buffer(databuf + n, l, - 16, 1, printbuf, - sizeof(printbuf), - false); - seq_printf(s, " : %s\n", printbuf); - n += l; - } + wil_seq_hexdump(s, databuf, len, " : "); } } else { seq_puts(s, "\n"); @@ -268,7 +263,7 @@ static const struct file_operations fops_mbox = { static int wil_debugfs_iomem_x32_set(void *data, u64 val) { - iowrite32(val, (void __iomem *)data); + writel(val, (void __iomem *)data); wmb(); /* make sure write propagated to HW */ return 0; @@ -276,7 +271,7 @@ static int wil_debugfs_iomem_x32_set(void *data, u64 val) static int wil_debugfs_iomem_x32_get(void *data, u64 *val) { - *val = ioread32((void __iomem *)data); + *val = readl((void __iomem *)data); return 0; } @@ -306,7 +301,7 @@ static int wil_debugfs_ulong_get(void *data, u64 *val) } DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get, - wil_debugfs_ulong_set, "%llu\n"); + wil_debugfs_ulong_set, "0x%llx\n"); static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode, struct dentry *parent, @@ -477,7 +472,7 @@ static int wil_memread_debugfs_show(struct seq_file *s, void *data) void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr)); if (a) - seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, ioread32(a)); + seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a)); else seq_printf(s, "[0x%08x] = INVALID\n", mem_addr); @@ -867,22 +862,6 @@ static const struct file_operations fops_wmi = { .open = simple_open, }; -static void wil_seq_hexdump(struct seq_file *s, void *p, int len, - const char *prefix) -{ - char printbuf[16 * 3 + 2]; - int i = 0; - - while (i < len) { - int l = min(len - i, 16); - - hex_dump_to_buffer(p + i, l, 16, 1, printbuf, - sizeof(printbuf), false); - seq_printf(s, "%s%s\n", prefix, printbuf); - i += l; - } -} - static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb) { int i = 0; @@ -1344,6 +1323,7 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r) { int i; u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size; + unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old; seq_printf(s, "([%2d] %3d TU) 0x%03x [", r->buf_size, r->timeout, r->head_seq_num); @@ -1353,7 +1333,10 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r) else seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_'); } - seq_printf(s, "] last drop 0x%03x\n", r->ssn_last_drop); + seq_printf(s, + "] total %llu drop %llu (dup %llu + old %llu) last 0x%03x\n", + r->total, drop_dup + drop_old, drop_dup, drop_old, + r->ssn_last_drop); } static int wil_sta_debugfs_show(struct seq_file *s, void *data) diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c index 0ea695ff9..7053b62ca 100644 --- a/drivers/net/wireless/ath/wil6210/ethtool.c +++ b/drivers/net/wireless/ath/wil6210/ethtool.c @@ -50,19 +50,13 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev, wil_dbg_misc(wil, "%s()\n", __func__); - tx_itr_en = ioread32(wil->csr + - HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL)); + tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL); if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN) - tx_itr_val = - ioread32(wil->csr + - HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH)); + tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH); - rx_itr_en = ioread32(wil->csr + - HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL)); + rx_itr_en = wil_r(wil, RGF_DMA_ITR_RX_CNT_CTL); if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN) - rx_itr_val = - ioread32(wil->csr + - HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH)); + rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH); cp->tx_coalesce_usecs = tx_itr_val; cp->rx_coalesce_usecs = rx_itr_val; diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c index d9aedad0c..b626a7f69 100644 --- a/drivers/net/wireless/ath/wil6210/fw.c +++ b/drivers/net/wireless/ath/wil6210/fw.c @@ -21,16 +21,6 @@ /*(DEBLOBBED)*/ -/* target operations */ -/* register read */ -#define R(a) ioread32(wil->csr + HOSTADDR(a)) -/* register write. wmb() to make sure it is completed */ -#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0) -/* register set = read, OR, write */ -#define S(a, v) W(a, R(a) | v) -/* register clear = read, AND with inverted, write */ -#define C(a, v) W(a, R(a) & ~v) - static void wil_memset_toio_32(volatile void __iomem *dst, u32 val, size_t count) diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c index c3f815f55..1a8a11c21 100644 --- a/drivers/net/wireless/ath/wil6210/fw_inc.c +++ b/drivers/net/wireless/ath/wil6210/fw_inc.c @@ -221,12 +221,12 @@ static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data, FW_ADDR_CHECK(dst, block[i].addr, "address"); - x = ioread32(dst); + x = readl(dst); y = (x & m) | (v & ~m); wil_dbg_fw(wil, "write [0x%08x] <== 0x%08x " "(old 0x%08x val 0x%08x mask 0x%08x)\n", le32_to_cpu(block[i].addr), y, x, v, m); - iowrite32(y, dst); + writel(y, dst); wmb(); /* finish before processing next record */ } @@ -239,18 +239,18 @@ static int gw_write(struct wil6210_priv *wil, void __iomem *gwa_addr, { unsigned delay = 0; - iowrite32(a, gwa_addr); - iowrite32(gw_cmd, gwa_cmd); + writel(a, gwa_addr); + writel(gw_cmd, gwa_cmd); wmb(); /* finish before activate gw */ - iowrite32(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */ + writel(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */ do { udelay(1); /* typical time is few usec */ if (delay++ > 100) { wil_err_fw(wil, "gw timeout\n"); return -EINVAL; } - } while (ioread32(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */ + } while (readl(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */ return 0; } @@ -305,7 +305,7 @@ static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data, wil_dbg_fw(wil, " gw write[%3d] [0x%08x] <== 0x%08x\n", i, a, v); - iowrite32(v, gwa_val); + writel(v, gwa_val); rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a); if (rc) return rc; @@ -372,7 +372,7 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data, sizeof(v), false); for (k = 0; k < ARRAY_SIZE(block->value); k++) - iowrite32(v[k], gwa_val[k]); + writel(v[k], gwa_val[k]); rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a); if (rc) return rc; diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 28ffc1846..a371f036d 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -61,13 +61,13 @@ static inline void wil_icr_clear(u32 x, void __iomem *addr) static inline void wil_icr_clear(u32 x, void __iomem *addr) { - iowrite32(x, addr); + writel(x, addr); } #endif /* defined(CONFIG_WIL6210_ISR_COR) */ static inline u32 wil_ioread32_and_clear(void __iomem *addr) { - u32 x = ioread32(addr); + u32 x = readl(addr); wil_icr_clear(x, addr); @@ -76,54 +76,47 @@ static inline u32 wil_ioread32_and_clear(void __iomem *addr) static void wil6210_mask_irq_tx(struct wil6210_priv *wil) { - iowrite32(WIL6210_IRQ_DISABLE, wil->csr + - HOSTADDR(RGF_DMA_EP_TX_ICR) + - offsetof(struct RGF_ICR, IMS)); + wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMS), + WIL6210_IRQ_DISABLE); } static void wil6210_mask_irq_rx(struct wil6210_priv *wil) { - iowrite32(WIL6210_IRQ_DISABLE, wil->csr + - HOSTADDR(RGF_DMA_EP_RX_ICR) + - offsetof(struct RGF_ICR, IMS)); + wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS), + WIL6210_IRQ_DISABLE); } static void wil6210_mask_irq_misc(struct wil6210_priv *wil) { - iowrite32(WIL6210_IRQ_DISABLE, wil->csr + - HOSTADDR(RGF_DMA_EP_MISC_ICR) + - offsetof(struct RGF_ICR, IMS)); + wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS), + WIL6210_IRQ_DISABLE); } static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil) { wil_dbg_irq(wil, "%s()\n", __func__); - iowrite32(WIL6210_IRQ_DISABLE, wil->csr + - HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW)); + wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE); clear_bit(wil_status_irqen, wil->status); } void wil6210_unmask_irq_tx(struct wil6210_priv *wil) { - iowrite32(WIL6210_IMC_TX, wil->csr + - HOSTADDR(RGF_DMA_EP_TX_ICR) + - offsetof(struct RGF_ICR, IMC)); + wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMC), + WIL6210_IMC_TX); } void wil6210_unmask_irq_rx(struct wil6210_priv *wil) { - iowrite32(WIL6210_IMC_RX, wil->csr + - HOSTADDR(RGF_DMA_EP_RX_ICR) + - offsetof(struct RGF_ICR, IMC)); + wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC), + WIL6210_IMC_RX); } static void wil6210_unmask_irq_misc(struct wil6210_priv *wil) { - iowrite32(WIL6210_IMC_MISC, wil->csr + - HOSTADDR(RGF_DMA_EP_MISC_ICR) + - offsetof(struct RGF_ICR, IMC)); + wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC), + WIL6210_IMC_MISC); } static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil) @@ -132,8 +125,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil) set_bit(wil_status_irqen, wil->status); - iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr + - HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW)); + wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_PSEUDO_MASK); } void wil_mask_irq(struct wil6210_priv *wil) @@ -150,12 +142,12 @@ void wil_unmask_irq(struct wil6210_priv *wil) { wil_dbg_irq(wil, "%s()\n", __func__); - iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) + - offsetof(struct RGF_ICR, ICC)); - iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) + - offsetof(struct RGF_ICR, ICC)); - iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) + - offsetof(struct RGF_ICR, ICC)); + wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC), + WIL_ICR_ICC_VALUE); + wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC), + WIL_ICR_ICC_VALUE); + wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC), + WIL_ICR_ICC_VALUE); wil6210_unmask_irq_pseudo(wil); wil6210_unmask_irq_tx(wil); @@ -163,9 +155,6 @@ void wil_unmask_irq(struct wil6210_priv *wil) wil6210_unmask_irq_misc(wil); } -/* target write operation */ -#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0) - void wil_configure_interrupt_moderation(struct wil6210_priv *wil) { wil_dbg_irq(wil, "%s()\n", __func__); @@ -177,44 +166,42 @@ void wil_configure_interrupt_moderation(struct wil6210_priv *wil) return; /* Disable and clear tx counter before (re)configuration */ - W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR); - W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration); + wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR); + wil_w(wil, RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration); wil_info(wil, "set ITR_TX_CNT_TRSH = %d usec\n", wil->tx_max_burst_duration); /* Configure TX max burst duration timer to use usec units */ - W(RGF_DMA_ITR_TX_CNT_CTL, - BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL); + wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL, + BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL); /* Disable and clear tx idle counter before (re)configuration */ - W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR); - W(RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout); + wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR); + wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout); wil_info(wil, "set ITR_TX_IDL_CNT_TRSH = %d usec\n", wil->tx_interframe_timeout); /* Configure TX max burst duration timer to use usec units */ - W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN | - BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL); + wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN | + BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL); /* Disable and clear rx counter before (re)configuration */ - W(RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR); - W(RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration); + wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR); + wil_w(wil, RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration); wil_info(wil, "set ITR_RX_CNT_TRSH = %d usec\n", wil->rx_max_burst_duration); /* Configure TX max burst duration timer to use usec units */ - W(RGF_DMA_ITR_RX_CNT_CTL, - BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL); + wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL, + BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL); /* Disable and clear rx idle counter before (re)configuration */ - W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR); - W(RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout); + wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR); + wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout); wil_info(wil, "set ITR_RX_IDL_CNT_TRSH = %d usec\n", wil->rx_interframe_timeout); /* Configure TX max burst duration timer to use usec units */ - W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN | - BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL); + wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN | + BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL); } -#undef W - static irqreturn_t wil6210_irq_rx(int irq, void *cookie) { struct wil6210_priv *wil = cookie; @@ -452,27 +439,24 @@ static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause) u32 icr_rx = wil_ioread32_and_clear(wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) + offsetof(struct RGF_ICR, ICR)); - u32 imv_rx = ioread32(wil->csr + - HOSTADDR(RGF_DMA_EP_RX_ICR) + - offsetof(struct RGF_ICR, IMV)); + u32 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR + + offsetof(struct RGF_ICR, IMV)); u32 icm_tx = wil_ioread32_and_clear(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) + offsetof(struct RGF_ICR, ICM)); u32 icr_tx = wil_ioread32_and_clear(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) + offsetof(struct RGF_ICR, ICR)); - u32 imv_tx = ioread32(wil->csr + - HOSTADDR(RGF_DMA_EP_TX_ICR) + - offsetof(struct RGF_ICR, IMV)); + u32 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR + + offsetof(struct RGF_ICR, IMV)); u32 icm_misc = wil_ioread32_and_clear(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) + offsetof(struct RGF_ICR, ICM)); u32 icr_misc = wil_ioread32_and_clear(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) + offsetof(struct RGF_ICR, ICR)); - u32 imv_misc = ioread32(wil->csr + - HOSTADDR(RGF_DMA_EP_MISC_ICR) + - offsetof(struct RGF_ICR, IMV)); + u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR + + offsetof(struct RGF_ICR, IMV)); wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n" "Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n" "Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n" @@ -492,7 +476,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie) { irqreturn_t rc = IRQ_HANDLED; struct wil6210_priv *wil = cookie; - u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE)); + u32 pseudo_cause = wil_r(wil, RGF_DMA_PSEUDO_CAUSE); /** * pseudo_cause is Clear-On-Read, no need to ACK @@ -541,48 +525,12 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie) return rc; } -static int wil6210_request_3msi(struct wil6210_priv *wil, int irq) -{ - int rc; - /* - * IRQ's are in the following order: - * - Tx - * - Rx - * - Misc - */ - - rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED, - WIL_NAME"_tx", wil); - if (rc) - return rc; - - rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED, - WIL_NAME"_rx", wil); - if (rc) - goto free0; - - rc = request_threaded_irq(irq + 2, wil6210_irq_misc, - wil6210_irq_misc_thread, - IRQF_SHARED, WIL_NAME"_misc", wil); - if (rc) - goto free1; - - return 0; - /* error branch */ -free1: - free_irq(irq + 1, wil); -free0: - free_irq(irq, wil); - - return rc; -} - /* can't use wil_ioread32_and_clear because ICC value is not set yet */ static inline void wil_clear32(void __iomem *addr) { - u32 x = ioread32(addr); + u32 x = readl(addr); - iowrite32(x, addr); + writel(x, addr); } void wil6210_clear_irq(struct wil6210_priv *wil) @@ -596,19 +544,16 @@ void wil6210_clear_irq(struct wil6210_priv *wil) wmb(); /* make sure write completed */ } -int wil6210_init_irq(struct wil6210_priv *wil, int irq) +int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi) { int rc; - wil_dbg_misc(wil, "%s() n_msi=%d\n", __func__, wil->n_msi); + wil_dbg_misc(wil, "%s(%s)\n", __func__, use_msi ? "MSI" : "INTx"); - if (wil->n_msi == 3) - rc = wil6210_request_3msi(wil, irq); - else - rc = request_threaded_irq(irq, wil6210_hardirq, - wil6210_thread_irq, - wil->n_msi ? 0 : IRQF_SHARED, - WIL_NAME, wil); + rc = request_threaded_irq(irq, wil6210_hardirq, + wil6210_thread_irq, + use_msi ? 0 : IRQF_SHARED, + WIL_NAME, wil); return rc; } @@ -618,8 +563,4 @@ void wil6210_fini_irq(struct wil6210_priv *wil, int irq) wil_mask_irq(wil); free_irq(irq, wil); - if (wil->n_msi == 3) { - free_irq(irq + 1, wil); - free_irq(irq + 2, wil); - } } diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c index e9c067381..f7f948621 100644 --- a/drivers/net/wireless/ath/wil6210/ioctl.c +++ b/drivers/net/wireless/ath/wil6210/ioctl.c @@ -76,11 +76,11 @@ static int wil_ioc_memio_dword(struct wil6210_priv *wil, void __user *data) /* operation */ switch (io.op & wil_mmio_op_mask) { case wil_mmio_read: - io.val = ioread32(a); + io.val = readl(a); need_copy = true; break; case wil_mmio_write: - iowrite32(io.val, a); + writel(io.val, a); wmb(); /* make sure write propagated to HW */ break; default: diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 6ca6193ab..2fb04c51d 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -21,6 +21,7 @@ #include "wil6210.h" #include "txrx.h" #include "wmi.h" +#include "boot_loader.h" #define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000 #define WAIT_FOR_DISCONNECT_INTERVAL_MS 10 @@ -270,8 +271,7 @@ static void wil_scan_timer_fn(ulong x) clear_bit(wil_status_fwready, wil->status); wil_err(wil, "Scan timeout detected, start fw error recovery\n"); - wil->recovery_state = fw_recovery_pending; - schedule_work(&wil->fw_error_worker); + wil_fw_error_recovery(wil); } static int wil_wait_for_recovery(struct wil6210_priv *wil) @@ -528,26 +528,16 @@ void wil_priv_deinit(struct wil6210_priv *wil) destroy_workqueue(wil->wmi_wq); } -/* target operations */ -/* register read */ -#define R(a) ioread32(wil->csr + HOSTADDR(a)) -/* register write. wmb() to make sure it is completed */ -#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0) -/* register set = read, OR, write */ -#define S(a, v) W(a, R(a) | v) -/* register clear = read, AND with inverted, write */ -#define C(a, v) W(a, R(a) & ~v) - static inline void wil_halt_cpu(struct wil6210_priv *wil) { - W(RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST); - W(RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST); + wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST); + wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST); } static inline void wil_release_cpu(struct wil6210_priv *wil) { /* Start CPU */ - W(RGF_USER_USER_CPU_0, 1); + wil_w(wil, RGF_USER_USER_CPU_0, 1); } static int wil_target_reset(struct wil6210_priv *wil) @@ -558,56 +548,60 @@ static int wil_target_reset(struct wil6210_priv *wil) wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name); /* Clear MAC link up */ - S(RGF_HP_CTRL, BIT(15)); - S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD); - S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST); + wil_s(wil, RGF_HP_CTRL, BIT(15)); + wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD); + wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST); wil_halt_cpu(wil); /* clear all boot loader "ready" bits */ - W(RGF_USER_BL + offsetof(struct RGF_BL, ready), 0); + wil_w(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v0, boot_loader_ready), 0); /* Clear Fw Download notification */ - C(RGF_USER_USAGE_6, BIT(0)); + wil_c(wil, RGF_USER_USAGE_6, BIT(0)); - S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN); + wil_s(wil, RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN); /* XTAL stabilization should take about 3ms */ usleep_range(5000, 7000); - x = R(RGF_CAF_PLL_LOCK_STATUS); + x = wil_r(wil, RGF_CAF_PLL_LOCK_STATUS); if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) { wil_err(wil, "Xtal stabilization timeout\n" "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x); return -ETIME; } /* switch 10k to XTAL*/ - C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF); + wil_c(wil, RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF); /* 40 MHz */ - C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL); + wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL); - W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f); - W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf); + wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f); + wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00); - W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0); - W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0); + wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0); + wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); /* reset A2 PCIE AHB */ + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003); + /* reset A2 PCIE AHB */ + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); /* wait until device ready. typical time is 20..80 msec */ do { msleep(RST_DELAY); - x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready)); + x = wil_r(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v0, + boot_loader_ready)); if (x1 != x) { wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x); x1 = x; @@ -617,13 +611,13 @@ static int wil_target_reset(struct wil6210_priv *wil) x); return -ETIME; } - } while (x != BIT_BL_READY); + } while (x != BL_READY); - C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); + wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); /* enable fix for HW bug related to the SA/DA swap in AP Rx */ - S(RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN | - BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC); + wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN | + BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC); wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY); return 0; @@ -641,29 +635,93 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) static int wil_get_bl_info(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); - struct RGF_BL bl; - - wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), sizeof(bl)); - le32_to_cpus(&bl.ready); - le32_to_cpus(&bl.version); - le32_to_cpus(&bl.rf_type); - le32_to_cpus(&bl.baseband_type); + union { + struct bl_dedicated_registers_v0 bl0; + struct bl_dedicated_registers_v1 bl1; + } bl; + u32 bl_ver; + u8 *mac; + u16 rf_status; + + wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), + sizeof(bl)); + bl_ver = le32_to_cpu(bl.bl0.boot_loader_struct_version); + mac = bl.bl0.mac_address; + + if (bl_ver == 0) { + le32_to_cpus(&bl.bl0.rf_type); + le32_to_cpus(&bl.bl0.baseband_type); + rf_status = 0; /* actually, unknown */ + wil_info(wil, + "Boot Loader struct v%d: MAC = %pM RF = 0x%08x bband = 0x%08x\n", + bl_ver, mac, + bl.bl0.rf_type, bl.bl0.baseband_type); + wil_info(wil, "Boot Loader build unknown for struct v0\n"); + } else { + le16_to_cpus(&bl.bl1.rf_type); + rf_status = le16_to_cpu(bl.bl1.rf_status); + le32_to_cpus(&bl.bl1.baseband_type); + le16_to_cpus(&bl.bl1.bl_version_subminor); + le16_to_cpus(&bl.bl1.bl_version_build); + wil_info(wil, + "Boot Loader struct v%d: MAC = %pM RF = 0x%04x (status 0x%04x) bband = 0x%08x\n", + bl_ver, mac, + bl.bl1.rf_type, rf_status, + bl.bl1.baseband_type); + wil_info(wil, "Boot Loader build %d.%d.%d.%d\n", + bl.bl1.bl_version_major, bl.bl1.bl_version_minor, + bl.bl1.bl_version_subminor, bl.bl1.bl_version_build); + } - if (!is_valid_ether_addr(bl.mac_address)) { - wil_err(wil, "BL: Invalid MAC %pM\n", bl.mac_address); + if (!is_valid_ether_addr(mac)) { + wil_err(wil, "BL: Invalid MAC %pM\n", mac); return -EINVAL; } - ether_addr_copy(ndev->perm_addr, bl.mac_address); + ether_addr_copy(ndev->perm_addr, mac); if (!is_valid_ether_addr(ndev->dev_addr)) - ether_addr_copy(ndev->dev_addr, bl.mac_address); - wil_info(wil, - "Boot Loader: ver = %d MAC = %pM RF = 0x%08x bband = 0x%08x\n", - bl.version, bl.mac_address, bl.rf_type, bl.baseband_type); + ether_addr_copy(ndev->dev_addr, mac); + + if (rf_status) {/* bad RF cable? */ + wil_err(wil, "RF communication error 0x%04x", + rf_status); + return -EAGAIN; + } return 0; } +static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err) +{ + u32 bl_assert_code, bl_assert_blink, bl_magic_number; + u32 bl_ver = wil_r(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v0, + boot_loader_struct_version)); + + if (bl_ver < 2) + return; + + bl_assert_code = wil_r(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v1, + bl_assert_code)); + bl_assert_blink = wil_r(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v1, + bl_assert_blink)); + bl_magic_number = wil_r(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v1, + bl_magic_number)); + + if (is_err) { + wil_err(wil, + "BL assert code 0x%08x blink 0x%08x magic 0x%08x\n", + bl_assert_code, bl_assert_blink, bl_magic_number); + } else { + wil_dbg_misc(wil, + "BL assert code 0x%08x blink 0x%08x magic 0x%08x\n", + bl_assert_code, bl_assert_blink, bl_magic_number); + } +} + static int wil_wait_for_fw_ready(struct wil6210_priv *wil) { ulong to = msecs_to_jiffies(1000); @@ -690,9 +748,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) wil_dbg_misc(wil, "%s()\n", __func__); - if (wil->hw_version == HW_VER_UNKNOWN) - return -ENODEV; - WARN_ON(!mutex_is_locked(&wil->mutex)); WARN_ON(test_bit(wil_status_napi_en, wil->status)); @@ -707,6 +762,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) return 0; } + if (wil->hw_version == HW_VER_UNKNOWN) + return -ENODEV; + cancel_work_sync(&wil->disconnect_worker); wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); wil_bcast_fini(wil); @@ -729,12 +787,17 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) flush_workqueue(wil->wq_service); flush_workqueue(wil->wmi_wq); + wil_bl_crash_info(wil, false); rc = wil_target_reset(wil); wil_rx_fini(wil); - if (rc) + if (rc) { + wil_bl_crash_info(wil, true); return rc; + } rc = wil_get_bl_info(wil); + if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */ + rc = 0; if (rc) return rc; @@ -752,7 +815,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) return rc; /* Mark FW as loaded from host */ - S(RGF_USER_USAGE_6, 1); + wil_s(wil, RGF_USER_USAGE_6, 1); /* clear any interrupts which on-card-firmware * may have set @@ -760,8 +823,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) wil6210_clear_irq(wil); /* CAF_ICR - clear and mask */ /* it is W1C, clear by writing back same value */ - S(RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); - W(RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); + wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); + wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); wil_release_cpu(wil); } @@ -785,11 +848,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) return rc; } -#undef R -#undef W -#undef S -#undef C - void wil_fw_error_recovery(struct wil6210_priv *wil) { wil_dbg_misc(wil, "starting fw error recovery\n"); diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 8ef18ace1..e3b3c8fb4 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -173,7 +173,10 @@ void *wil_if_alloc(struct device *dev) wil_set_ethtoolops(ndev); ndev->ieee80211_ptr = wdev; ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | - NETIF_F_SG | NETIF_F_GRO; + NETIF_F_SG | NETIF_F_GRO | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_RXHASH; + ndev->features |= ndev->hw_features; SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); wdev->netdev = ndev; diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index aa3ecc607..feff1ef10 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -21,16 +21,14 @@ #include "wil6210.h" -static int use_msi = 1; -module_param(use_msi, int, S_IRUGO); -MODULE_PARM_DESC(use_msi, - " Use MSI interrupt: " - "0 - don't, 1 - (default) - single, or 3"); +static bool use_msi = true; +module_param(use_msi, bool, S_IRUGO); +MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true"); static void wil_set_capabilities(struct wil6210_priv *wil) { - u32 rev_id = ioread32(wil->csr + HOSTADDR(RGF_USER_JTAG_DEV_ID)); + u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID); bitmap_zero(wil->hw_capabilities, hw_capability_last); @@ -50,24 +48,12 @@ void wil_set_capabilities(struct wil6210_priv *wil) void wil_disable_irq(struct wil6210_priv *wil) { - int irq = wil->pdev->irq; - - disable_irq(irq); - if (wil->n_msi == 3) { - disable_irq(irq + 1); - disable_irq(irq + 2); - } + disable_irq(wil->pdev->irq); } void wil_enable_irq(struct wil6210_priv *wil) { - int irq = wil->pdev->irq; - - enable_irq(irq); - if (wil->n_msi == 3) { - enable_irq(irq + 1); - enable_irq(irq + 2); - } + enable_irq(wil->pdev->irq); } /* Bus ops */ @@ -80,6 +66,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) * and only MSI should be used */ int msi_only = pdev->msi_enabled; + bool _use_msi = use_msi; wil_dbg_misc(wil, "%s()\n", __func__); @@ -87,41 +74,20 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) pci_set_master(pdev); - /* - * how many MSI interrupts to request? - */ - switch (use_msi) { - case 3: - case 1: - wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi); - break; - case 0: - wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n"); - break; - default: - wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi); - use_msi = 1; - } - - if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) { - wil_err(wil, "3 MSI mode failed, try 1 MSI\n"); - use_msi = 1; - } + wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx"); - if (use_msi == 1 && pci_enable_msi(pdev)) { + if (use_msi && pci_enable_msi(pdev)) { wil_err(wil, "pci_enable_msi failed, use INTx\n"); - use_msi = 0; + _use_msi = false; } - wil->n_msi = use_msi; - - if ((wil->n_msi == 0) && msi_only) { + if (!_use_msi && msi_only) { wil_err(wil, "Interrupt pin not routed, unable to use INTx\n"); rc = -ENODEV; goto stop_master; } - rc = wil6210_init_irq(wil, pdev->irq); + rc = wil6210_init_irq(wil, pdev->irq, _use_msi); if (rc) goto stop_master; @@ -293,11 +259,80 @@ static const struct pci_device_id wil6210_pcie_ids[] = { }; MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids); +#ifdef CONFIG_PM + +static int wil6210_suspend(struct device *dev, bool is_runtime) +{ + int rc = 0; + struct pci_dev *pdev = to_pci_dev(dev); + struct wil6210_priv *wil = pci_get_drvdata(pdev); + + wil_dbg_pm(wil, "%s(%s)\n", __func__, + is_runtime ? "runtime" : "system"); + + rc = wil_can_suspend(wil, is_runtime); + if (rc) + goto out; + + rc = wil_suspend(wil, is_runtime); + if (rc) + goto out; + + /* TODO: how do I bring card in low power state? */ + + /* disable bus mastering */ + pci_clear_master(pdev); + /* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */ + +out: + return rc; +} + +static int wil6210_resume(struct device *dev, bool is_runtime) +{ + int rc = 0; + struct pci_dev *pdev = to_pci_dev(dev); + struct wil6210_priv *wil = pci_get_drvdata(pdev); + + wil_dbg_pm(wil, "%s(%s)\n", __func__, + is_runtime ? "runtime" : "system"); + + /* allow master */ + pci_set_master(pdev); + + rc = wil_resume(wil, is_runtime); + if (rc) + pci_clear_master(pdev); + + return rc; +} + +#ifdef CONFIG_PM_SLEEP +static int wil6210_pm_suspend(struct device *dev) +{ + return wil6210_suspend(dev, false); +} + +static int wil6210_pm_resume(struct device *dev) +{ + return wil6210_resume(dev, false); +} +#endif /* CONFIG_PM_SLEEP */ + +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops wil6210_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume) +}; + static struct pci_driver wil6210_driver = { .probe = wil_pcie_probe, .remove = wil_pcie_remove, .id_table = wil6210_pcie_ids, .name = WIL_NAME, + .driver = { + .pm = &wil6210_pm_ops, + }, }; static int __init wil6210_driver_init(void) diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c new file mode 100644 index 000000000..0b7ecbcac --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wil6210.h" + +int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) +{ + int rc = 0; + struct wireless_dev *wdev = wil->wdev; + + wil_dbg_pm(wil, "%s(%s)\n", __func__, + is_runtime ? "runtime" : "system"); + + switch (wdev->iftype) { + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + break; + /* AP-like interface - can't suspend */ + default: + wil_dbg_pm(wil, "AP-like interface\n"); + rc = -EBUSY; + break; + } + + wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__, + is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc); + + return rc; +} + +int wil_suspend(struct wil6210_priv *wil, bool is_runtime) +{ + int rc = 0; + struct net_device *ndev = wil_to_ndev(wil); + + wil_dbg_pm(wil, "%s(%s)\n", __func__, + is_runtime ? "runtime" : "system"); + + /* if netif up, hardware is alive, shut it down */ + if (ndev->flags & IFF_UP) { + rc = wil_down(wil); + if (rc) { + wil_err(wil, "wil_down : %d\n", rc); + goto out; + } + } + + if (wil->platform_ops.suspend) + rc = wil->platform_ops.suspend(wil->platform_handle); + +out: + wil_dbg_pm(wil, "%s(%s) => %d\n", __func__, + is_runtime ? "runtime" : "system", rc); + return rc; +} + +int wil_resume(struct wil6210_priv *wil, bool is_runtime) +{ + int rc = 0; + struct net_device *ndev = wil_to_ndev(wil); + + wil_dbg_pm(wil, "%s(%s)\n", __func__, + is_runtime ? "runtime" : "system"); + + if (wil->platform_ops.resume) { + rc = wil->platform_ops.resume(wil->platform_handle); + if (rc) { + wil_err(wil, "platform_ops.resume : %d\n", rc); + goto out; + } + } + + /* if netif up, bring hardware up + * During open(), IFF_UP set after actual device method + * invocation. This prevent recursive call to wil_up() + */ + if (ndev->flags & IFF_UP) + rc = wil_up(wil); + +out: + wil_dbg_pm(wil, "%s(%s) => %d\n", __func__, + is_runtime ? "runtime" : "system", rc); + return rc; +} diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index ca10dcf09..9238c1ac2 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -121,6 +121,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) goto out; } + r->total++; hseq = r->head_seq_num; /** Due to the race between WMI events, where BACK establishment @@ -153,6 +154,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) /* frame with out of date sequence number */ if (seq_less(seq, r->head_seq_num)) { r->ssn_last_drop = seq; + r->drop_old++; + wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n", + seq, r->head_seq_num); dev_kfree_skb(skb); goto out; } @@ -173,6 +177,8 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) /* check if we already stored this frame */ if (r->reorder_buf[index]) { + r->drop_dup++; + wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq); dev_kfree_skb(skb); goto out; } diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index aa20af86e..6229110d5 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -509,7 +509,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count) break; } } - iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail)); + wil_w(wil, v->hwtail, v->swtail); return rc; } @@ -541,6 +541,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) [GRO_DROP] = "GRO_DROP", }; + if (ndev->features & NETIF_F_RXHASH) + /* fake L4 to ensure it won't be re-calculated later + * set hash to any non-zero value to activate rps + * mechanism, core will be chosen according + * to user-level rps configuration. + */ + skb_set_hash(skb, 1, PKT_HASH_TYPE_L4); + skb_orphan(skb); if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) { @@ -1058,14 +1066,52 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len, static inline void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags) { - d->mac.d[2] |= ((nr_frags + 1) << - MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); + d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); } -static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil, - struct vring_tx_desc *d, - struct sk_buff *skb) +/** + * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding + * @skb is used to obtain the protocol and headers length. + * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data, + * 2 - middle, 3 - last descriptor. + */ + +static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d, + struct sk_buff *skb, + int tso_desc_type, bool is_ipv4, + int tcp_hdr_len, int skb_net_hdr_len) { + d->dma.b11 = ETH_HLEN; /* MAC header length */ + d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS; + + d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS); + /* L4 header len: TCP header length */ + d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); + + /* Setup TSO: bit and desc type */ + d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) | + (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS); + d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS); + + d->dma.ip_length = skb_net_hdr_len; + /* Enable TCP/UDP checksum */ + d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS); + /* Calculate pseudo-header */ + d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS); +} + +/** + * Sets the descriptor @d up for csum. The corresponding + * @skb is used to obtain the protocol and headers length. + * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6. + * Note, if d==NULL, the function only returns the protocol result. + * + * It is very similar to previous wil_tx_desc_offload_setup_tso. This + * is "if unrolling" to optimize the critical path. + */ + +static int wil_tx_desc_offload_setup(struct vring_tx_desc *d, + struct sk_buff *skb){ int protocol; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -1110,6 +1156,305 @@ static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil, return 0; } +static inline void wil_tx_last_desc(struct vring_tx_desc *d) +{ + d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) | + BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) | + BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); +} + +static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d) +{ + d->dma.d0 |= wil_tso_type_lst << + DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS; +} + +static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring, + struct sk_buff *skb) +{ + struct device *dev = wil_to_dev(wil); + + /* point to descriptors in shared memory */ + volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc, + *_first_desc = NULL; + + /* pointers to shadow descriptors */ + struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem, + *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem, + *first_desc = &first_desc_mem; + + /* pointer to shadow descriptors' context */ + struct wil_ctx *hdr_ctx, *first_ctx = NULL; + + int descs_used = 0; /* total number of used descriptors */ + int sg_desc_cnt = 0; /* number of descriptors for current mss*/ + + u32 swhead = vring->swhead; + int used, avail = wil_vring_avail_tx(vring); + int nr_frags = skb_shinfo(skb)->nr_frags; + int min_desc_required = nr_frags + 1; + int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */ + int f, len, hdrlen, headlen; + int vring_index = vring - wil->vring_tx; + struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; + uint i = swhead; + dma_addr_t pa; + const skb_frag_t *frag = NULL; + int rem_data = mss; + int lenmss; + int hdr_compensation_need = true; + int desc_tso_type = wil_tso_type_first; + bool is_ipv4; + int tcp_hdr_len; + int skb_net_hdr_len; + int gso_type; + + wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n", + __func__, skb->len, vring_index); + + if (unlikely(!txdata->enabled)) + return -EINVAL; + + /* A typical page 4K is 3-4 payloads, we assume each fragment + * is a full payload, that's how min_desc_required has been + * calculated. In real we might need more or less descriptors, + * this is the initial check only. + */ + if (unlikely(avail < min_desc_required)) { + wil_err_ratelimited(wil, + "TSO: Tx ring[%2d] full. No space for %d fragments\n", + vring_index, min_desc_required); + return -ENOMEM; + } + + /* Header Length = MAC header len + IP header len + TCP header len*/ + hdrlen = ETH_HLEN + + (int)skb_network_header_len(skb) + + tcp_hdrlen(skb); + + gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4); + switch (gso_type) { + case SKB_GSO_TCPV4: + /* TCP v4, zero out the IP length and IPv4 checksum fields + * as required by the offloading doc + */ + ip_hdr(skb)->tot_len = 0; + ip_hdr(skb)->check = 0; + is_ipv4 = true; + break; + case SKB_GSO_TCPV6: + /* TCP v6, zero out the payload length */ + ipv6_hdr(skb)->payload_len = 0; + is_ipv4 = false; + break; + default: + /* other than TCPv4 or TCPv6 types are not supported for TSO. + * It is also illegal for both to be set simultaneously + */ + return -EINVAL; + } + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return -EINVAL; + + /* tcp header length and skb network header length are fixed for all + * packet's descriptors - read then once here + */ + tcp_hdr_len = tcp_hdrlen(skb); + skb_net_hdr_len = skb_network_header_len(skb); + + _hdr_desc = &vring->va[i].tx; + + pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, pa))) { + wil_err(wil, "TSO: Skb head DMA map error\n"); + goto err_exit; + } + + wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index); + wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4, + tcp_hdr_len, skb_net_hdr_len); + wil_tx_last_desc(hdr_desc); + + vring->ctx[i].mapped_as = wil_mapped_as_single; + hdr_ctx = &vring->ctx[i]; + + descs_used++; + headlen = skb_headlen(skb) - hdrlen; + + for (f = headlen ? -1 : 0; f < nr_frags; f++) { + if (headlen) { + len = headlen; + wil_dbg_txrx(wil, "TSO: process skb head, len %u\n", + len); + } else { + frag = &skb_shinfo(skb)->frags[f]; + len = frag->size; + wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len); + } + + while (len) { + wil_dbg_txrx(wil, + "TSO: len %d, rem_data %d, descs_used %d\n", + len, rem_data, descs_used); + + if (descs_used == avail) { + wil_err(wil, "TSO: ring overflow\n"); + goto dma_error; + } + + lenmss = min_t(int, rem_data, len); + i = (swhead + descs_used) % vring->size; + wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i); + + if (!headlen) { + pa = skb_frag_dma_map(dev, frag, + frag->size - len, lenmss, + DMA_TO_DEVICE); + vring->ctx[i].mapped_as = wil_mapped_as_page; + } else { + pa = dma_map_single(dev, + skb->data + + skb_headlen(skb) - headlen, + lenmss, + DMA_TO_DEVICE); + vring->ctx[i].mapped_as = wil_mapped_as_single; + headlen -= lenmss; + } + + if (unlikely(dma_mapping_error(dev, pa))) + goto dma_error; + + _desc = &vring->va[i].tx; + + if (!_first_desc) { + _first_desc = _desc; + first_ctx = &vring->ctx[i]; + d = first_desc; + } else { + d = &desc_mem; + } + + wil_tx_desc_map(d, pa, lenmss, vring_index); + wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type, + is_ipv4, tcp_hdr_len, + skb_net_hdr_len); + + /* use tso_type_first only once */ + desc_tso_type = wil_tso_type_mid; + + descs_used++; /* desc used so far */ + sg_desc_cnt++; /* desc used for this segment */ + len -= lenmss; + rem_data -= lenmss; + + wil_dbg_txrx(wil, + "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n", + len, rem_data, descs_used, sg_desc_cnt); + + /* Close the segment if reached mss size or last frag*/ + if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) { + if (hdr_compensation_need) { + /* first segment include hdr desc for + * release + */ + hdr_ctx->nr_frags = sg_desc_cnt; + wil_tx_desc_set_nr_frags(first_desc, + sg_desc_cnt + + 1); + hdr_compensation_need = false; + } else { + wil_tx_desc_set_nr_frags(first_desc, + sg_desc_cnt); + } + first_ctx->nr_frags = sg_desc_cnt - 1; + + wil_tx_last_desc(d); + + /* first descriptor may also be the last + * for this mss - make sure not to copy + * it twice + */ + if (first_desc != d) + *_first_desc = *first_desc; + + /*last descriptor will be copied at the end + * of this TS processing + */ + if (f < nr_frags - 1 || len > 0) + *_desc = *d; + + rem_data = mss; + _first_desc = NULL; + sg_desc_cnt = 0; + } else if (first_desc != d) /* update mid descriptor */ + *_desc = *d; + } + } + + /* first descriptor may also be the last. + * in this case d pointer is invalid + */ + if (_first_desc == _desc) + d = first_desc; + + /* Last data descriptor */ + wil_set_tx_desc_last_tso(d); + *_desc = *d; + + /* Fill the total number of descriptors in first desc (hdr)*/ + wil_tx_desc_set_nr_frags(hdr_desc, descs_used); + *_hdr_desc = *hdr_desc; + + /* hold reference to skb + * to prevent skb release before accounting + * in case of immediate "tx done" + */ + vring->ctx[i].skb = skb_get(skb); + + /* performance monitoring */ + used = wil_vring_used_tx(vring); + if (wil_val_in_range(vring_idle_trsh, + used, used + descs_used)) { + txdata->idle += get_cycles() - txdata->last_idle; + wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", + vring_index, used, used + descs_used); + } + + /* advance swhead */ + wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead); + wil_vring_advance_head(vring, descs_used); + + /* make sure all writes to descriptors (shared memory) are done before + * committing them to HW + */ + wmb(); + + wil_w(wil, vring->hwtail, vring->swhead); + return 0; + +dma_error: + wil_err(wil, "TSO: DMA map page error\n"); + while (descs_used > 0) { + struct wil_ctx *ctx; + + i = (swhead + descs_used) % vring->size; + d = (struct vring_tx_desc *)&vring->va[i].tx; + _desc = &vring->va[i].tx; + *d = *_desc; + _desc->dma.status = TX_DMA_STATUS_DU; + ctx = &vring->ctx[i]; + wil_txdesc_unmap(dev, d, ctx); + if (ctx->skb) + dev_kfree_skb_any(ctx->skb); + memset(ctx, 0, sizeof(*ctx)); + descs_used--; + } + +err_exit: + return -EINVAL; +} + static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, struct sk_buff *skb) { @@ -1128,7 +1473,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, bool mcast = (vring_index == wil->bcast_vring); uint len = skb_headlen(skb); - wil_dbg_txrx(wil, "%s()\n", __func__); + wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n", + __func__, skb->len, vring_index); if (unlikely(!txdata->enabled)) return -EINVAL; @@ -1159,14 +1505,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS); } /* Process TCP/UDP checksum offloading */ - if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) { + if (unlikely(wil_tx_desc_offload_setup(d, skb))) { wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n", vring_index); goto dma_error; } vring->ctx[i].nr_frags = nr_frags; - wil_tx_desc_set_nr_frags(d, nr_frags); + wil_tx_desc_set_nr_frags(d, nr_frags + 1); /* middle segments */ for (; f < nr_frags; f++) { @@ -1190,7 +1536,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, * if it succeeded for 1-st descriptor, * it will succeed here too */ - wil_tx_desc_offload_cksum_set(wil, d, skb); + wil_tx_desc_offload_setup(d, skb); } /* for the last seg only */ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS); @@ -1221,7 +1567,13 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead, vring->swhead); trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags); - iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail)); + + /* make sure all writes to descriptors (shared memory) are done before + * committing them to HW + */ + wmb(); + + wil_w(wil, vring->hwtail, vring->swhead); return 0; dma_error: @@ -1254,8 +1606,12 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, int rc; spin_lock(&txdata->lock); - rc = __wil_tx_vring(wil, vring, skb); + + rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring) + (wil, vring, skb); + spin_unlock(&txdata->lock); + return rc; } @@ -1382,7 +1738,8 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) struct wil_ctx *ctx = &vring->ctx[vring->swtail]; /** * For the fragmented skb, HW will set DU bit only for the - * last fragment. look for it + * last fragment. look for it. + * In TSO the first DU will include hdr desc */ int lf = (vring->swtail + ctx->nr_frags) % vring->size; /* TODO: check we are not past head */ diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index 0c4638487..82a8f9a03 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -291,6 +291,14 @@ struct vring_tx_dma { __le16 length; } __packed; +/* TSO type used in dma descriptor d0 bits 11-12 */ +enum { + wil_tso_type_hdr = 0, + wil_tso_type_first = 1, + wil_tso_type_mid = 2, + wil_tso_type_lst = 3, +}; + /* Rx descriptor - MAC part * [dword 0] * bit 0.. 3 : tid:4 The QoS (b3-0) TID Field diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 208848d26..794e5d636 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -127,16 +127,6 @@ struct RGF_ICR { u32 IMC; /* Mask Clear, write 1 to clear */ } __packed; -struct RGF_BL { - u32 ready; /* 0x880A3C bit [0] */ -#define BIT_BL_READY BIT(0) - u32 version; /* 0x880A40 version of the BL struct */ - u32 rf_type; /* 0x880A44 ID of the connected RF */ - u32 baseband_type; /* 0x880A48 ID of the baseband */ - u8 mac_address[ETH_ALEN]; /* 0x880A4C permanent MAC */ - u8 pad[2]; -} __packed; - /* registers - FW addresses */ #define RGF_USER_USAGE_1 (0x880004) #define RGF_USER_USAGE_6 (0x880018) @@ -262,9 +252,8 @@ enum { }; /* popular locations */ -#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD) -#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \ - offsetof(struct RGF_ICR, ICS)) +#define RGF_MBOX RGF_USER_USER_SCRATCH_PAD +#define HOST_MBOX HOSTADDR(RGF_MBOX) #define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2 /* ISR register bits */ @@ -434,12 +423,12 @@ struct pci_dev; * @ssn: Starting Sequence Number expected to be aggregated. * @buf_size: buffer size for incoming A-MPDUs * @timeout: reset timer value (in TUs). + * @ssn_last_drop: SSN of the last dropped frame + * @total: total number of processed incoming frames + * @drop_dup: duplicate frames dropped for this reorder buffer + * @drop_old: old frames dropped for this reorder buffer * @dialog_token: dialog token for aggregation session - * @rcu_head: RCU head used for freeing this struct - * - * This structure's lifetime is managed by RCU, assignments to - * the array holding it must hold the aggregation mutex. - * + * @first_time: true when this buffer used 1-st time */ struct wil_tid_ampdu_rx { struct sk_buff **reorder_buf; @@ -453,6 +442,9 @@ struct wil_tid_ampdu_rx { u16 buf_size; u16 timeout; u16 ssn_last_drop; + unsigned long long total; /* frames processed */ + unsigned long long drop_dup; + unsigned long long drop_old; u8 dialog_token; bool first_time; /* is it 1-st time this buffer used? */ }; @@ -543,7 +535,6 @@ struct pmc_ctx { struct wil6210_priv { struct pci_dev *pdev; - int n_msi; struct wireless_dev *wdev; void __iomem *csr; DECLARE_BITMAP(status, wil_status_last); @@ -559,6 +550,8 @@ struct wil6210_priv { /* profile */ u32 monitor_flags; u32 privacy; /* secure connection? */ + u8 hidden_ssid; /* relevant in AP mode */ + u16 channel; /* relevant in AP mode */ int sinfo_gen; u32 ap_isolate; /* no intra-BSS communication */ /* interrupt moderation */ @@ -654,6 +647,33 @@ void wil_info(struct wil6210_priv *wil, const char *fmt, ...); #define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg) #define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg) #define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg) +#define wil_dbg_pm(wil, fmt, arg...) wil_dbg(wil, "DBG[ PM ]" fmt, ##arg) + +/* target operations */ +/* register read */ +static inline u32 wil_r(struct wil6210_priv *wil, u32 reg) +{ + return readl(wil->csr + HOSTADDR(reg)); +} + +/* register write. wmb() to make sure it is completed */ +static inline void wil_w(struct wil6210_priv *wil, u32 reg, u32 val) +{ + writel(val, wil->csr + HOSTADDR(reg)); + wmb(); /* wait for write to propagate to the HW */ +} + +/* register set = read, OR, write */ +static inline void wil_s(struct wil6210_priv *wil, u32 reg, u32 val) +{ + wil_w(wil, reg, wil_r(wil, reg) | val); +} + +/* register clear = read, AND with inverted, write */ +static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val) +{ + wil_w(wil, reg, wil_r(wil, reg) & ~val); +} #if defined(CONFIG_DYNAMIC_DEBUG) #define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \ @@ -744,7 +764,7 @@ void wil_back_tx_worker(struct work_struct *work); void wil_back_tx_flush(struct wil6210_priv *wil); void wil6210_clear_irq(struct wil6210_priv *wil); -int wil6210_init_irq(struct wil6210_priv *wil, int irq); +int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi); void wil6210_fini_irq(struct wil6210_priv *wil, int irq); void wil_mask_irq(struct wil6210_priv *wil); void wil_unmask_irq(struct wil6210_priv *wil); @@ -796,4 +816,8 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type); int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd); int wil_request_firmware(struct wil6210_priv *wil, const char *name); +int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime); +int wil_suspend(struct wil6210_priv *wil, bool is_runtime); +int wil_resume(struct wil6210_priv *wil, bool is_runtime); + #endif /* __WIL6210_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c index de15f1422..2e831bf20 100644 --- a/drivers/net/wireless/ath/wil6210/wil_platform.c +++ b/drivers/net/wireless/ath/wil6210/wil_platform.c @@ -14,7 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "linux/device.h" +#include #include "wil_platform.h" int __init wil_platform_modinit(void) diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index c759759af..2f35d4c51 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -228,8 +228,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head); /* wait till FW finish with previous command */ for (retry = 5; retry > 0; retry--) { - r->tail = ioread32(wil->csr + HOST_MBOX + - offsetof(struct wil6210_mbox_ctl, tx.tail)); + r->tail = wil_r(wil, RGF_MBOX + + offsetof(struct wil6210_mbox_ctl, tx.tail)); if (next_head != r->tail) break; msleep(20); @@ -254,16 +254,16 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) wil_memcpy_toio_32(dst, &cmd, sizeof(cmd)); wil_memcpy_toio_32(dst + sizeof(cmd), buf, len); /* mark entry as full */ - iowrite32(1, wil->csr + HOSTADDR(r->head) + - offsetof(struct wil6210_mbox_ring_desc, sync)); + wil_w(wil, r->head + offsetof(struct wil6210_mbox_ring_desc, sync), 1); /* advance next ptr */ - iowrite32(r->head = next_head, wil->csr + HOST_MBOX + - offsetof(struct wil6210_mbox_ctl, tx.head)); + wil_w(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, tx.head), + r->head = next_head); trace_wil6210_wmi_cmd(&cmd.wmi, buf, len); /* interrupt to FW */ - iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT); + wil_w(wil, RGF_USER_USER_ICR + offsetof(struct RGF_ICR, ICS), + SW_INT_MBOX); return 0; } @@ -312,22 +312,44 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) struct wiphy *wiphy = wil_to_wiphy(wil); struct ieee80211_mgmt *rx_mgmt_frame = (struct ieee80211_mgmt *)data->payload; - int ch_no = data->info.channel+1; - u32 freq = ieee80211_channel_to_frequency(ch_no, - IEEE80211_BAND_60GHZ); - struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq); - s32 signal = data->info.sqi; - __le16 fc = rx_mgmt_frame->frame_control; - u32 d_len = le32_to_cpu(data->info.len); - u16 d_status = le16_to_cpu(data->info.status); - - wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d SQI %d%%\n", + int flen = len - offsetof(struct wmi_rx_mgmt_packet_event, payload); + int ch_no; + u32 freq; + struct ieee80211_channel *channel; + s32 signal; + __le16 fc; + u32 d_len; + u16 d_status; + + if (flen < 0) { + wil_err(wil, "MGMT Rx: short event, len %d\n", len); + return; + } + + d_len = le32_to_cpu(data->info.len); + if (d_len != flen) { + wil_err(wil, + "MGMT Rx: length mismatch, d_len %d should be %d\n", + d_len, flen); + return; + } + + ch_no = data->info.channel + 1; + freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ); + channel = ieee80211_get_channel(wiphy, freq); + signal = data->info.sqi; + d_status = le16_to_cpu(data->info.status); + fc = rx_mgmt_frame->frame_control; + + wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d SNR %d SQI %d%%\n", data->info.channel, data->info.mcs, data->info.snr, data->info.sqi); wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len, le16_to_cpu(fc)); wil_dbg_wmi(wil, "qid %d mid %d cid %d\n", data->info.qid, data->info.mid, data->info.cid); + wil_hex_dump_wmi("MGMT Rx ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame, + d_len, true); if (!channel) { wil_err(wil, "Frame on unsupported channel\n"); @@ -363,6 +385,17 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) } } +static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) +{ + struct wmi_tx_mgmt_packet_event *data = d; + struct ieee80211_mgmt *mgmt_frame = + (struct ieee80211_mgmt *)data->payload; + int flen = len - offsetof(struct wmi_tx_mgmt_packet_event, payload); + + wil_hex_dump_wmi("MGMT Tx ", DUMP_PREFIX_OFFSET, 16, 1, mgmt_frame, + flen, true); +} + static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id, void *d, int len) { @@ -659,6 +692,7 @@ static const struct { {WMI_READY_EVENTID, wmi_evt_ready}, {WMI_FW_READY_EVENTID, wmi_evt_fw_ready}, {WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt}, + {WMI_TX_MGMT_PACKET_EVENTID, wmi_evt_tx_mgmt}, {WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete}, {WMI_CONNECT_EVENTID, wmi_evt_connect}, {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect}, @@ -695,8 +729,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil) u16 len; bool q; - r->head = ioread32(wil->csr + HOST_MBOX + - offsetof(struct wil6210_mbox_ctl, rx.head)); + r->head = wil_r(wil, RGF_MBOX + + offsetof(struct wil6210_mbox_ctl, rx.head)); if (r->tail == r->head) break; @@ -734,8 +768,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil) cmd = (void *)&evt->event.wmi; wil_memcpy_fromio_32(cmd, src, len); /* mark entry as empty */ - iowrite32(0, wil->csr + HOSTADDR(r->tail) + - offsetof(struct wil6210_mbox_ring_desc, sync)); + wil_w(wil, r->tail + + offsetof(struct wil6210_mbox_ring_desc, sync), 0); /* indicate */ if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) && (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { @@ -754,8 +788,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil) /* advance tail */ r->tail = r->base + ((r->tail - r->base + sizeof(struct wil6210_mbox_ring_desc)) % r->size); - iowrite32(r->tail, wil->csr + HOST_MBOX + - offsetof(struct wil6210_mbox_ctl, rx.tail)); + wil_w(wil, RGF_MBOX + + offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail); /* add to the pending list */ spin_lock_irqsave(&wil->wmi_ev_lock, flags); @@ -772,7 +806,7 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, u16 reply_id, void *reply, u8 reply_size, int to_msec) { int rc; - int remain; + unsigned long remain; mutex_lock(&wil->wmi_mutex); @@ -988,12 +1022,21 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie) { + static const char *const names[] = { + [WMI_FRAME_BEACON] = "BEACON", + [WMI_FRAME_PROBE_REQ] = "PROBE_REQ", + [WMI_FRAME_PROBE_RESP] = "WMI_FRAME_PROBE_RESP", + [WMI_FRAME_ASSOC_REQ] = "WMI_FRAME_ASSOC_REQ", + [WMI_FRAME_ASSOC_RESP] = "WMI_FRAME_ASSOC_RESP", + }; int rc; u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len; struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL); - if (!cmd) - return -ENOMEM; + if (!cmd) { + rc = -ENOMEM; + goto out; + } if (!ie) ie_len = 0; @@ -1003,6 +1046,12 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie) memcpy(cmd->ie_info, ie, ie_len); rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len); kfree(cmd); +out: + if (rc) { + const char *name = type < ARRAY_SIZE(names) ? + names[type] : "??"; + wil_err(wil, "set_ie(%d %s) failed : %d\n", type, name, rc); + } return rc; } @@ -1129,15 +1178,42 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf) int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason) { + int rc; + u16 reason_code; struct wmi_disconnect_sta_cmd cmd = { .disconnect_reason = cpu_to_le16(reason), }; + struct { + struct wil6210_mbox_hdr_wmi wmi; + struct wmi_disconnect_event evt; + } __packed reply; ether_addr_copy(cmd.dst_mac, mac); wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason); - return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd)); + rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd), + WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000); + /* failure to disconnect in reasonable time treated as FW error */ + if (rc) { + wil_fw_error_recovery(wil); + return rc; + } + + /* call event handler manually after processing wmi_call, + * to avoid deadlock - disconnect event handler acquires wil->mutex + * while it is already held here + */ + reason_code = le16_to_cpu(reply.evt.protocol_reason_status); + + wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n", + reply.evt.bssid, reason_code, + reply.evt.disconnect_reason); + + wil->sinfo_gen++; + wil6210_disconnect(wil, reply.evt.bssid, reason_code, true); + + return 0; } int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout) @@ -1279,7 +1355,7 @@ static void wmi_event_handle(struct wil6210_priv *wil, /* search for handler */ if (!wmi_evt_call_handler(wil, id, evt_data, len - sizeof(*wmi))) { - wil_err(wil, "Unhandled event 0x%04x\n", id); + wil_info(wil, "Unhandled event 0x%04x\n", id); } } else { wil_err(wil, "Unknown event type\n"); diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c index 916123a3d..a335f94c7 100644 --- a/drivers/net/wireless/b43/lo.c +++ b/drivers/net/wireless/b43/lo.c @@ -929,8 +929,8 @@ void b43_lo_g_adjust_to(struct b43_wldev *dev, b43_lo_write(dev, &cal->ctl); } -/* Periodic LO maintanance work */ -void b43_lo_g_maintanance_work(struct b43_wldev *dev) +/* Periodic LO maintenance work */ +void b43_lo_g_maintenance_work(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; diff --git a/drivers/net/wireless/b43/lo.h b/drivers/net/wireless/b43/lo.h index 3b27e20ef..7b4df3883 100644 --- a/drivers/net/wireless/b43/lo.h +++ b/drivers/net/wireless/b43/lo.h @@ -80,7 +80,7 @@ void b43_lo_g_adjust_to(struct b43_wldev *dev, void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all); -void b43_lo_g_maintanance_work(struct b43_wldev *dev); +void b43_lo_g_maintenance_work(struct b43_wldev *dev); void b43_lo_g_cleanup(struct b43_wldev *dev); void b43_lo_g_init(struct b43_wldev *dev); diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 7303006fd..816d5b249 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -114,6 +114,7 @@ MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if over #ifdef CONFIG_B43_BCMA static const struct bcma_device_id b43_bcma_tbl[] = { BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS), + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x15, BCMA_ANY_CLASS), BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS), BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS), BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1C, BCMA_ANY_CLASS), diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c index 727ce6edb..462310e6e 100644 --- a/drivers/net/wireless/b43/phy_g.c +++ b/drivers/net/wireless/b43/phy_g.c @@ -3004,7 +3004,7 @@ static void b43_gphy_op_pwork_15sec(struct b43_wldev *dev) phy->rev == 1) { //TODO: implement rev1 workaround } - b43_lo_g_maintanance_work(dev); + b43_lo_g_maintenance_work(dev); b43_mac_enable(dev); } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c index d86d1f1f1..a293275c1 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c @@ -469,6 +469,36 @@ brcmf_find_wpsie(const u8 *parse, u32 len) return NULL; } +static int brcmf_vif_change_validate(struct brcmf_cfg80211_info *cfg, + struct brcmf_cfg80211_vif *vif, + enum nl80211_iftype new_type) +{ + int iftype_num[NUM_NL80211_IFTYPES]; + struct brcmf_cfg80211_vif *pos; + + memset(&iftype_num[0], 0, sizeof(iftype_num)); + list_for_each_entry(pos, &cfg->vif_list, list) + if (pos == vif) + iftype_num[new_type]++; + else + iftype_num[pos->wdev.iftype]++; + + return cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num); +} + +static int brcmf_vif_add_validate(struct brcmf_cfg80211_info *cfg, + enum nl80211_iftype new_type) +{ + int iftype_num[NUM_NL80211_IFTYPES]; + struct brcmf_cfg80211_vif *pos; + + memset(&iftype_num[0], 0, sizeof(iftype_num)); + list_for_each_entry(pos, &cfg->vif_list, list) + iftype_num[pos->wdev.iftype]++; + + iftype_num[new_type]++; + return cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num); +} static void convert_key_from_CPU(struct brcmf_wsec_key *key, struct brcmf_wsec_key_le *key_le) @@ -663,8 +693,14 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy, struct vif_params *params) { struct wireless_dev *wdev; + int err; brcmf_dbg(TRACE, "enter: %s type %d\n", name, type); + err = brcmf_vif_add_validate(wiphy_to_cfg(wiphy), type); + if (err) { + brcmf_err("iface validation failed: err=%d\n", err); + return ERR_PTR(err); + } switch (type) { case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_STATION: @@ -823,8 +859,12 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, s32 ap = 0; s32 err = 0; - brcmf_dbg(TRACE, "Enter, ndev=%p, type=%d\n", ndev, type); - + brcmf_dbg(TRACE, "Enter, idx=%d, type=%d\n", ifp->bssidx, type); + err = brcmf_vif_change_validate(wiphy_to_cfg(wiphy), vif, type); + if (err) { + brcmf_err("iface validation failed: err=%d\n", err); + return err; + } switch (type) { case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_WDS: @@ -5695,63 +5735,132 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = { } }; +/** + * brcmf_setup_ifmodes() - determine interface modes and combinations. + * + * @wiphy: wiphy object. + * @ifp: interface object needed for feat module api. + * + * The interface modes and combinations are determined dynamically here + * based on firmware functionality. + * + * no p2p and no mbss: + * + * #STA <= 1, #AP <= 1, channels = 1, 2 total + * + * no p2p and mbss: + * + * #STA <= 1, #AP <= 1, channels = 1, 2 total + * #AP <= 4, matching BI, channels = 1, 4 total + * + * p2p, no mchan, and mbss: + * + * #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 1, 3 total + * #STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total + * #AP <= 4, matching BI, channels = 1, 4 total + * + * p2p, mchan, and mbss: + * + * #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 2, 3 total + * #STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total + * #AP <= 4, matching BI, channels = 1, 4 total + */ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp) { struct ieee80211_iface_combination *combo = NULL; - struct ieee80211_iface_limit *limits = NULL; - int i = 0, max_iface_cnt; + struct ieee80211_iface_limit *c0_limits = NULL; + struct ieee80211_iface_limit *p2p_limits = NULL; + struct ieee80211_iface_limit *mbss_limits = NULL; + bool mbss, p2p; + int i, c, n_combos; - combo = kzalloc(sizeof(*combo), GFP_KERNEL); + mbss = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS); + p2p = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_P2P); + + n_combos = 1 + !!p2p + !!mbss; + combo = kcalloc(n_combos, sizeof(*combo), GFP_KERNEL); if (!combo) goto err; - limits = kzalloc(sizeof(*limits) * 4, GFP_KERNEL); - if (!limits) + c0_limits = kcalloc(p2p ? 3 : 2, sizeof(*c0_limits), GFP_KERNEL); + if (!c0_limits) goto err; + if (p2p) { + p2p_limits = kcalloc(4, sizeof(*p2p_limits), GFP_KERNEL); + if (!p2p_limits) + goto err; + } + + if (mbss) { + mbss_limits = kcalloc(1, sizeof(*mbss_limits), GFP_KERNEL); + if (!mbss_limits) + goto err; + } + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP); - if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN)) - combo->num_different_channels = 2; - else - combo->num_different_channels = 1; - - if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) { - limits[i].max = 1; - limits[i++].types = BIT(NL80211_IFTYPE_STATION); - limits[i].max = 4; - limits[i++].types = BIT(NL80211_IFTYPE_AP); - max_iface_cnt = 5; - } else { - limits[i].max = 2; - limits[i++].types = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP); - max_iface_cnt = 2; - } - - if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_P2P)) { + c = 0; + i = 0; + combo[c].num_different_channels = 1; + c0_limits[i].max = 1; + c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION); + if (p2p) { + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN)) + combo[c].num_different_channels = 2; wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_DEVICE); - limits[i].max = 1; - limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) | - BIT(NL80211_IFTYPE_P2P_GO); - limits[i].max = 1; - limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE); - max_iface_cnt += 2; - } - combo->max_interfaces = max_iface_cnt; - combo->limits = limits; - combo->n_limits = i; - + c0_limits[i].max = 1; + c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE); + c0_limits[i].max = 1; + c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO); + } else { + c0_limits[i].max = 1; + c0_limits[i++].types = BIT(NL80211_IFTYPE_AP); + } + combo[c].max_interfaces = i; + combo[c].n_limits = i; + combo[c].limits = c0_limits; + + if (p2p) { + c++; + i = 0; + combo[c].num_different_channels = 1; + p2p_limits[i].max = 1; + p2p_limits[i++].types = BIT(NL80211_IFTYPE_STATION); + p2p_limits[i].max = 1; + p2p_limits[i++].types = BIT(NL80211_IFTYPE_AP); + p2p_limits[i].max = 1; + p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT); + p2p_limits[i].max = 1; + p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE); + combo[c].max_interfaces = i; + combo[c].n_limits = i; + combo[c].limits = p2p_limits; + } + + if (mbss) { + c++; + combo[c].beacon_int_infra_match = true; + combo[c].num_different_channels = 1; + mbss_limits[0].max = 4; + mbss_limits[0].types = BIT(NL80211_IFTYPE_AP); + combo[c].max_interfaces = 4; + combo[c].n_limits = 1; + combo[c].limits = mbss_limits; + } + wiphy->n_iface_combinations = n_combos; wiphy->iface_combinations = combo; - wiphy->n_iface_combinations = 1; return 0; err: - kfree(limits); + kfree(c0_limits); + kfree(p2p_limits); + kfree(mbss_limits); kfree(combo); return -ENOMEM; } @@ -5785,7 +5894,10 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy) static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) { + struct brcmf_pub *drvr = ifp->drvr; + const struct ieee80211_iface_combination *combo; struct ieee80211_supported_band *band; + u16 max_interfaces = 0; __le32 bandlist[3]; u32 n_bands; int err, i; @@ -5798,6 +5910,24 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) if (err) return err; + for (i = 0, combo = wiphy->iface_combinations; + i < wiphy->n_iface_combinations; i++, combo++) { + max_interfaces = max(max_interfaces, combo->max_interfaces); + } + + for (i = 0; i < max_interfaces && i < ARRAY_SIZE(drvr->addresses); + i++) { + u8 *addr = drvr->addresses[i].addr; + + memcpy(addr, drvr->mac, ETH_ALEN); + if (i) { + addr[0] |= BIT(1); + addr[ETH_ALEN - 1] ^= i; + } + } + wiphy->addresses = drvr->addresses; + wiphy->n_addresses = i; + wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wiphy->cipher_suites = __wl_cipher_suites; wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites); @@ -6059,11 +6189,15 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy, static void brcmf_free_wiphy(struct wiphy *wiphy) { + int i; + if (!wiphy) return; - if (wiphy->iface_combinations) - kfree(wiphy->iface_combinations->limits); + if (wiphy->iface_combinations) { + for (i = 0; i < wiphy->n_iface_combinations; i++) + kfree(wiphy->iface_combinations[i].limits); + } kfree(wiphy->iface_combinations); if (wiphy->bands[IEEE80211_BAND_2GHZ]) { kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.h b/drivers/net/wireless/brcm80211/brcmfmac/core.h index fd74a9c6e..746304121 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/core.h @@ -21,6 +21,7 @@ #ifndef BRCMFMAC_CORE_H #define BRCMFMAC_CORE_H +#include #include "fweh.h" #define TOE_TX_CSUM_OL 0x00000001 @@ -118,6 +119,8 @@ struct brcmf_pub { /* Multicast data packets sent to dongle */ unsigned long tx_multicast; + struct mac_address addresses[BRCMF_MAX_IFS]; + struct brcmf_if *iflist[BRCMF_MAX_IFS]; struct mutex proto_block; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c index 99ff4b7df..1f6d73ed8 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "debug.h" #include "firmware.h" @@ -426,18 +427,32 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) struct brcmf_fw *fwctx = ctx; u32 nvram_length = 0; void *nvram = NULL; + u8 *data = NULL; + size_t data_len; + bool raw_nvram; brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev)); - if (!fw && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) - goto fail; + if (fw && fw->data) { + data = (u8 *)fw->data; + data_len = fw->size; + raw_nvram = false; + } else { + data = bcm47xx_nvram_get_contents(&data_len); + if (!data && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) + goto fail; + raw_nvram = true; + } - if (fw) { - nvram = brcmf_fw_nvram_strip(fw->data, fw->size, &nvram_length, + if (data) + nvram = brcmf_fw_nvram_strip(data, data_len, &nvram_length, fwctx->domain_nr, fwctx->bus_nr); + + if (raw_nvram) + bcm47xx_nvram_release_contents(data); + if (fw) release_firmware(fw); - if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) - goto fail; - } + if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) + goto fail; fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length); kfree(fwctx); @@ -473,15 +488,9 @@ static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx) if (!ret) return; - /* when nvram is optional call .done() callback here */ - if (fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL) { - fwctx->done(fwctx->dev, fw, NULL, 0); - kfree(fwctx); - return; - } + brcmf_fw_request_nvram_done(NULL, fwctx); + return; - /* failed nvram request */ - release_firmware(fw); fail: brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); device_release_driver(fwctx->dev); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c index 59440631f..8d1ab4ab5 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c @@ -194,11 +194,15 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid, spin_lock_irqsave(&flow->block_lock, flags); ring = flow->rings[flowid]; + if (ring->blocked == blocked) { + spin_unlock_irqrestore(&flow->block_lock, flags); + return; + } ifidx = brcmf_flowring_ifidx_get(flow, flowid); currently_blocked = false; for (i = 0; i < flow->nrofrings; i++) { - if (flow->rings[i]) { + if ((flow->rings[i]) && (i != flowid)) { ring = flow->rings[i]; if ((ring->status == RING_OPEN) && (brcmf_flowring_ifidx_get(flow, i) == ifidx)) { @@ -209,8 +213,8 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid, } } } - ring->blocked = blocked; - if (currently_blocked == blocked) { + flow->rings[flowid]->blocked = blocked; + if (currently_blocked) { spin_unlock_irqrestore(&flow->block_lock, flags); return; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h index cbf033f59..1326898d6 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h @@ -85,7 +85,6 @@ struct brcmf_event; BRCMF_ENUM_DEF(IF, 54) \ BRCMF_ENUM_DEF(P2P_DISC_LISTEN_COMPLETE, 55) \ BRCMF_ENUM_DEF(RSSI, 56) \ - BRCMF_ENUM_DEF(PFN_SCAN_COMPLETE, 57) \ BRCMF_ENUM_DEF(EXTLOG_MSG, 58) \ BRCMF_ENUM_DEF(ACTION_FRAME, 59) \ BRCMF_ENUM_DEF(ACTION_FRAME_COMPLETE, 60) \ @@ -103,8 +102,7 @@ struct brcmf_event; BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \ BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \ BRCMF_ENUM_DEF(TDLS_PEER_EVENT, 92) \ - BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \ - BRCMF_ENUM_DEF(PSTA_PRIMARY_INTF_IND, 128) + BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) #define BRCMF_ENUM_DEF(id, val) \ BRCMF_E_##id = (val), @@ -112,7 +110,11 @@ struct brcmf_event; /* firmware event codes sent by the dongle */ enum brcmf_fweh_event_code { BRCMF_FWEH_EVENT_ENUM_DEFLIST - BRCMF_E_LAST + /* this determines event mask length which must match + * minimum length check in device firmware so it is + * hard-coded here. + */ + BRCMF_E_LAST = 139 }; #undef BRCMF_ENUM_DEF diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c index 898c3801e..7b2136c9b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c @@ -1360,6 +1360,60 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid) } } +#ifdef DEBUG +static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(seq->private); + struct brcmf_pub *drvr = bus_if->drvr; + struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; + struct brcmf_commonring *commonring; + u16 i; + struct brcmf_flowring_ring *ring; + struct brcmf_flowring_hash *hash; + + commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; + seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n", + commonring->r_ptr, commonring->w_ptr, commonring->depth); + commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT]; + seq_printf(seq, "h2d_rx_submit: rp %4u, wp %4u, depth %4u\n", + commonring->r_ptr, commonring->w_ptr, commonring->depth); + commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE]; + seq_printf(seq, "d2h_ctl_cmplt: rp %4u, wp %4u, depth %4u\n", + commonring->r_ptr, commonring->w_ptr, commonring->depth); + commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE]; + seq_printf(seq, "d2h_tx_cmplt: rp %4u, wp %4u, depth %4u\n", + commonring->r_ptr, commonring->w_ptr, commonring->depth); + commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE]; + seq_printf(seq, "d2h_rx_cmplt: rp %4u, wp %4u, depth %4u\n", + commonring->r_ptr, commonring->w_ptr, commonring->depth); + + seq_printf(seq, "\nh2d_flowrings: depth %u\n", + BRCMF_H2D_TXFLOWRING_MAX_ITEM); + seq_puts(seq, "Active flowrings:\n"); + hash = msgbuf->flow->hash; + for (i = 0; i < msgbuf->flow->nrofrings; i++) { + if (!msgbuf->flow->rings[i]) + continue; + ring = msgbuf->flow->rings[i]; + if (ring->status != RING_OPEN) + continue; + commonring = msgbuf->flowrings[i]; + hash = &msgbuf->flow->hash[ring->hash_id]; + seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n" + " ifidx %u, fifo %u, da %pM\n", + i, commonring->r_ptr, commonring->w_ptr, + skb_queue_len(&ring->skblist), ring->blocked, + hash->ifidx, hash->fifo, hash->mac); + } + + return 0; +} +#else +static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) +{ + return 0; +} +#endif int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) { @@ -1460,6 +1514,8 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) spin_lock_init(&msgbuf->flowring_work_lock); INIT_LIST_HEAD(&msgbuf->work_queue); + brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read); + return 0; fail: diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c index 342129138..b45cc12d0 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c @@ -2537,15 +2537,6 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus) } } -static void atomic_orr(int val, atomic_t *v) -{ - int old_val; - - old_val = atomic_read(v); - while (atomic_cmpxchg(v, old_val, val | old_val) != old_val) - old_val = atomic_read(v); -} - static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus) { struct brcmf_core *buscore; @@ -2568,7 +2559,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus) if (val) { brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret); bus->sdcnt.f1regdata++; - atomic_orr(val, &bus->intstatus); + atomic_or(val, &bus->intstatus); } return ret; @@ -2685,7 +2676,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus) /* Keep still-pending events for next scheduling */ if (intstatus) - atomic_orr(intstatus, &bus->intstatus); + atomic_or(intstatus, &bus->intstatus); brcmf_sdio_clrintr(bus); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index a2323e4da..af8a16e94 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -1471,9 +1471,7 @@ struct brcms_timer *brcms_init_timer(struct brcms_info *wl, wl->timers = t; #ifdef DEBUG - t->name = kmalloc(strlen(name) + 1, GFP_ATOMIC); - if (t->name) - strcpy(t->name, name); + t->name = kstrdup(name, GFP_ATOMIC); #endif return t; diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c index 7603546d2..29185aecc 100644 --- a/drivers/net/wireless/cw1200/cw1200_spi.c +++ b/drivers/net/wireless/cw1200/cw1200_spi.c @@ -467,7 +467,6 @@ static struct spi_driver spi_driver = { .remove = cw1200_spi_disconnect, .driver = { .name = "cw1200_wlan_spi", - .bus = &spi_bus_type, .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &cw1200_pm_ops, diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 01de1a3bf..80d4228ba 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c @@ -865,7 +865,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local, switch(type) { case HOSTAP_INTERFACE_AP: - dev->tx_queue_len = 0; /* use main radio device queue */ + dev->priv_flags |= IFF_NO_QUEUE; /* use main radio device queue */ dev->netdev_ops = &hostap_mgmt_netdev_ops; dev->type = ARPHRD_IEEE80211; dev->header_ops = &hostap_80211_ops; @@ -874,7 +874,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local, dev->netdev_ops = &hostap_master_ops; break; default: - dev->tx_queue_len = 0; /* use main radio device queue */ + dev->priv_flags |= IFF_NO_QUEUE; /* use main radio device queue */ dev->netdev_ops = &hostap_netdev_ops; } diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 764a7e2b8..85182461a 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -1410,7 +1410,7 @@ static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv) static int ipw2100_hw_phy_off(struct ipw2100_priv *priv) { -#define HW_PHY_OFF_LOOP_DELAY (HZ / 5000) +#define HW_PHY_OFF_LOOP_DELAY (msecs_to_jiffies(50)) struct host_command cmd = { .host_command = CARD_DISABLE_PHY_OFF, diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h index c6d78790c..193947865 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.h +++ b/drivers/net/wireless/ipw2x00/ipw2100.h @@ -746,7 +746,7 @@ struct ipw2100_priv { #define IPW_REG_GPIO IPW_REG_DOMAIN_0_OFFSET + 0x0030 #define IPW_REG_FW_TYPE IPW_REG_DOMAIN_1_OFFSET + 0x0188 #define IPW_REG_FW_VERSION IPW_REG_DOMAIN_1_OFFSET + 0x018C -#define IPW_REG_FW_COMPATABILITY_VERSION IPW_REG_DOMAIN_1_OFFSET + 0x0190 +#define IPW_REG_FW_COMPATIBILITY_VERSION IPW_REG_DOMAIN_1_OFFSET + 0x0190 #define IPW_REG_INDIRECT_ADDR_MASK 0x00FFFFFC diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c index 9fa15b410..e8b5545ca 100644 --- a/drivers/net/wireless/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/iwlegacy/3945-mac.c @@ -3259,7 +3259,7 @@ il3945_show_measurement(struct device *d, struct device_attribute *attr, while (size && PAGE_SIZE - len) { hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, - PAGE_SIZE - len, 1); + PAGE_SIZE - len, true); len = strlen(buf); if (PAGE_SIZE - len) buf[len++] = '\n'; diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c index 344010153..908b9f4fe 100644 --- a/drivers/net/wireless/iwlegacy/debug.c +++ b/drivers/net/wireless/iwlegacy/debug.c @@ -515,12 +515,8 @@ il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count, scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n", eeprom_ver); for (ofs = 0; ofs < eeprom_len; ofs += 16) { - pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); - hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos, - buf_size - pos, 0); - pos += strlen(buf + pos); - if (buf_size - pos > 0) - buf[pos++] = '\n'; + pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n", + ofs, ptr + ofs); } ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h index c160dad03..991def878 100644 --- a/drivers/net/wireless/iwlwifi/dvm/agn.h +++ b/drivers/net/wireless/iwlwifi/dvm/agn.h @@ -122,9 +122,8 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd) void iwl_down(struct iwl_priv *priv); void iwl_cancel_deferred_work(struct iwl_priv *priv); void iwlagn_prepare_restart(struct iwl_priv *priv); -int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb); bool iwl_check_for_ct_kill(struct iwl_priv *priv); @@ -216,11 +215,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); -int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb); +void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb); static inline u32 iwl_tx_status_to_mac80211(u32 status) { @@ -277,9 +274,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv, /* bt coex */ void iwlagn_send_advance_bt_config(struct iwl_priv *priv); -int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv); void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv); void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv); @@ -332,8 +326,7 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct iwl_link_quality_cmd *lq, u8 flags, bool init); -int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb); int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct ieee80211_sta *sta); @@ -480,7 +473,7 @@ do { \ } while (0) #endif /* CONFIG_IWLWIFI_DEBUG */ -extern const char *const iwl_dvm_cmd_strings[REPLY_MAX]; +extern const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1]; static inline const char *iwl_dvm_get_cmd_string(u8 cmd) { diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c index 0ffb6ff1a..b15e44f8d 100644 --- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c @@ -310,12 +310,8 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file, pos += scnprintf(buf + pos, buf_size - pos, "NVM version: 0x%x\n", nvm_ver); for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { - pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); - hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos, - buf_size - pos, 0); - pos += strlen(buf + pos); - if (buf_size - pos > 0) - buf[pos++] = '\n'; + pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n", + ofs, ptr + ofs); } ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h index 3811878ab..0ba3e56d6 100644 --- a/drivers/net/wireless/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/iwlwifi/dvm/dev.h @@ -669,6 +669,8 @@ struct iwl_priv { /* ieee device used by generic ieee processing code */ struct ieee80211_hw *hw; + struct napi_struct *napi; + struct list_head calib_results; struct workqueue_struct *workqueue; @@ -678,9 +680,8 @@ struct iwl_priv { enum ieee80211_band band; u8 valid_contexts; - int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); + void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb); struct iwl_notif_wait_data notif_wait; diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c index 1d2223df5..e18629a16 100644 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c @@ -659,9 +659,8 @@ static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv, return need_update; } -int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data; @@ -669,7 +668,7 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) { /* bt coex disabled */ - return 0; + return; } IWL_DEBUG_COEX(priv, "BT Coex notification:\n"); @@ -714,7 +713,6 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, /* FIXME: based on notification, adjust the prio_boost */ priv->bt_ci_compliance = coex->bt_ci_compliance; - return 0; } void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv) @@ -1022,7 +1020,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw, u8 *pn = seq.ccmp.pn; ieee80211_get_key_rx_seq(key, i, &seq); - aes_sc->pn = cpu_to_le64( + aes_sc[i].pn = cpu_to_le64( (u64)pn[5] | ((u64)pn[4] << 8) | ((u64)pn[3] << 16) | diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index 7acaa266b..453f7c315 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -250,12 +250,24 @@ static int __iwl_up(struct iwl_priv *priv) } } + ret = iwl_trans_start_hw(priv->trans); + if (ret) { + IWL_ERR(priv, "Failed to start HW: %d\n", ret); + goto error; + } + ret = iwl_run_init_ucode(priv); if (ret) { IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret); goto error; } + ret = iwl_trans_start_hw(priv->trans); + if (ret) { + IWL_ERR(priv, "Failed to start HW: %d\n", ret); + goto error; + } + ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR); if (ret) { IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret); @@ -432,7 +444,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) u32 error_id; } err_info; struct iwl_notification_wait status_wait; - static const u8 status_cmd[] = { + static const u16 status_cmd[] = { REPLY_WOWLAN_GET_STATUS, }; struct iwlagn_wowlan_status status_data = {}; diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c index 234e30f49..e7616f0ee 100644 --- a/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/iwlwifi/dvm/main.c @@ -2029,17 +2029,6 @@ static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) return false; } -static void iwl_napi_add(struct iwl_op_mode *op_mode, - struct napi_struct *napi, - struct net_device *napi_dev, - int (*poll)(struct napi_struct *, int), - int weight) -{ - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - - ieee80211_napi_add(priv->hw, napi, napi_dev, poll, weight); -} - static const struct iwl_op_mode_ops iwl_dvm_ops = { .start = iwl_op_mode_dvm_start, .stop = iwl_op_mode_dvm_stop, @@ -2052,7 +2041,6 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = { .cmd_queue_full = iwl_cmd_queue_full, .nic_config = iwl_nic_config, .wimax_active = iwl_wimax_active, - .napi_add = iwl_napi_add, }; /***************************************************************************** diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c index 3bd7c86e9..cef921c1a 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/iwlwifi/dvm/rs.c @@ -1416,11 +1416,11 @@ static int rs_switch_to_siso(struct iwl_priv *priv, /* * Try to switch to new modulation mode from legacy */ -static int rs_move_legacy_other(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, - int index) +static void rs_move_legacy_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + int index) { struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); struct iwl_scale_tbl_info *search_tbl = @@ -1575,7 +1575,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv, } search_tbl->lq_type = LQ_NONE; - return 0; + return; out: lq_sta->search_better_tbl = 1; @@ -1584,17 +1584,15 @@ out: tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; if (update_search_tbl_counter) search_tbl->action = tbl->action; - return 0; - } /* * Try to switch to new modulation mode from SISO */ -static int rs_move_siso_to_other(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, int index) +static void rs_move_siso_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) { u8 is_green = lq_sta->is_green; struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); @@ -1747,7 +1745,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv, break; } search_tbl->lq_type = LQ_NONE; - return 0; + return; out: lq_sta->search_better_tbl = 1; @@ -1756,17 +1754,15 @@ static int rs_move_siso_to_other(struct iwl_priv *priv, tbl->action = IWL_SISO_SWITCH_ANTENNA1; if (update_search_tbl_counter) search_tbl->action = tbl->action; - - return 0; } /* * Try to switch to new modulation mode from MIMO2 */ -static int rs_move_mimo2_to_other(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, int index) +static void rs_move_mimo2_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) { s8 is_green = lq_sta->is_green; struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); @@ -1917,7 +1913,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv, break; } search_tbl->lq_type = LQ_NONE; - return 0; + return; out: lq_sta->search_better_tbl = 1; tbl->action++; @@ -1926,17 +1922,15 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv, if (update_search_tbl_counter) search_tbl->action = tbl->action; - return 0; - } /* * Try to switch to new modulation mode from MIMO3 */ -static int rs_move_mimo3_to_other(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, int index) +static void rs_move_mimo3_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) { s8 is_green = lq_sta->is_green; struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); @@ -2093,7 +2087,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv, break; } search_tbl->lq_type = LQ_NONE; - return 0; + return; out: lq_sta->search_better_tbl = 1; tbl->action++; @@ -2101,9 +2095,6 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv, tbl->action = IWL_MIMO3_SWITCH_ANTENNA1; if (update_search_tbl_counter) search_tbl->action = tbl->action; - - return 0; - } /* diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c index debec963c..4a45b0b59 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/iwlwifi/dvm/rx.c @@ -39,7 +39,7 @@ #define IWL_CMD_ENTRY(x) [x] = #x -const char *const iwl_dvm_cmd_strings[REPLY_MAX] = { +const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1] = { IWL_CMD_ENTRY(REPLY_ALIVE), IWL_CMD_ENTRY(REPLY_ERROR), IWL_CMD_ENTRY(REPLY_ECHO), @@ -123,9 +123,8 @@ const char *const iwl_dvm_cmd_strings[REPLY_MAX] = { * ******************************************************************************/ -static int iwlagn_rx_reply_error(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_reply_error(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_error_resp *err_resp = (void *)pkt->data; @@ -136,11 +135,9 @@ static int iwlagn_rx_reply_error(struct iwl_priv *priv, err_resp->cmd_id, le16_to_cpu(err_resp->bad_cmd_seq_num), le32_to_cpu(err_resp->error_info)); - return 0; } -static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_csa_notification *csa = (void *)pkt->data; @@ -152,7 +149,7 @@ static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, struct iwl_rxon_cmd *rxon = (void *)&ctx->active; if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) - return 0; + return; if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) { rxon->channel = csa->channel; @@ -165,13 +162,11 @@ static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, le16_to_cpu(csa->channel)); iwl_chswitch_done(priv, false); } - return 0; } -static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_spectrum_notification *report = (void *)pkt->data; @@ -179,17 +174,15 @@ static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv, if (!report->state) { IWL_DEBUG_11H(priv, "Spectrum Measure Notification: Start\n"); - return 0; + return; } memcpy(&priv->measure_report, report, sizeof(*report)); priv->measurement_status |= MEASUREMENT_READY; - return 0; } -static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { #ifdef CONFIG_IWLWIFI_DEBUG struct iwl_rx_packet *pkt = rxb_addr(rxb); @@ -197,24 +190,20 @@ static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv, IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n", sleep->pm_sleep_mode, sleep->pm_wakeup_src); #endif - return 0; } -static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u32 __maybe_unused len = iwl_rx_packet_len(pkt); IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " "notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len); iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len); - return 0; } -static int iwlagn_rx_beacon_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwlagn_beacon_notif *beacon = (void *)pkt->data; @@ -232,8 +221,6 @@ static int iwlagn_rx_beacon_notif(struct iwl_priv *priv, #endif priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); - - return 0; } /** @@ -448,9 +435,8 @@ iwlagn_accumulative_statistics(struct iwl_priv *priv, } #endif -static int iwlagn_rx_statistics(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { unsigned long stamp = jiffies; const int reg_recalib_period = 60; @@ -505,7 +491,7 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv, len, sizeof(struct iwl_bt_notif_statistics), sizeof(struct iwl_notif_statistics)); spin_unlock(&priv->statistics.lock); - return 0; + return; } change = common->temperature != priv->statistics.common.temperature || @@ -550,13 +536,10 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv, priv->lib->temperature(priv); spin_unlock(&priv->statistics.lock); - - return 0; } -static int iwlagn_rx_reply_statistics(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_reply_statistics(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_notif_statistics *stats = (void *)pkt->data; @@ -572,15 +555,14 @@ static int iwlagn_rx_reply_statistics(struct iwl_priv *priv, #endif IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); } - iwlagn_rx_statistics(priv, rxb, cmd); - return 0; + + iwlagn_rx_statistics(priv, rxb); } /* Handle notification from uCode that card's power state is changing * due to software, hardware, or critical temperature RFKILL */ -static int iwlagn_rx_card_state_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_card_state_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_card_state_notif *card_state_notif = (void *)pkt->data; @@ -627,12 +609,10 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv, test_bit(STATUS_RF_KILL_HW, &priv->status))) wiphy_rfkill_set_hw_state(priv->hw->wiphy, test_bit(STATUS_RF_KILL_HW, &priv->status)); - return 0; } -static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); @@ -649,14 +629,12 @@ static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv, if (!test_bit(STATUS_SCANNING, &priv->status)) iwl_init_sensitivity(priv); } - return 0; } /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ -static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); @@ -664,7 +642,6 @@ static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, priv->ampdu_ref++; memcpy(&priv->last_phy_res, pkt->data, sizeof(struct iwl_rx_phy_res)); - return 0; } /* @@ -786,7 +763,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); - ieee80211_rx(priv->hw, skb); + ieee80211_rx_napi(priv->hw, skb, priv->napi); } static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) @@ -890,9 +867,8 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv, } /* Called for REPLY_RX_MPDU_CMD */ -static int iwlagn_rx_reply_rx(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct ieee80211_hdr *header; struct ieee80211_rx_status rx_status = {}; @@ -906,7 +882,7 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv, if (!priv->last_phy_res_valid) { IWL_ERR(priv, "MPDU frame without cached PHY data\n"); - return 0; + return; } phy_res = &priv->last_phy_res; amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data; @@ -919,14 +895,14 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv, if ((unlikely(phy_res->cfg_phy_cnt > 20))) { IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n", phy_res->cfg_phy_cnt); - return 0; + return; } if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status)); - return 0; + return; } /* This will be used in several places later */ @@ -998,12 +974,10 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv, iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status, rxb, &rx_status); - return 0; } -static int iwlagn_rx_noa_notification(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_noa_notification(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_wipan_noa_data *new_data, *old_data; struct iwl_rx_packet *pkt = rxb_addr(rxb); @@ -1041,8 +1015,6 @@ static int iwlagn_rx_noa_notification(struct iwl_priv *priv, if (old_data) kfree_rcu(old_data, rcu_head); - - return 0; } /** @@ -1053,8 +1025,7 @@ static int iwlagn_rx_noa_notification(struct iwl_priv *priv, */ void iwl_setup_rx_handlers(struct iwl_priv *priv) { - int (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); + void (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb); handlers = priv->rx_handlers; @@ -1102,12 +1073,11 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv) iwlagn_bt_rx_handler_setup(priv); } -int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - int err = 0; /* * Do the notification wait before RX handlers so @@ -1121,12 +1091,11 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, * rx_handlers table. See iwl_setup_rx_handlers() */ if (priv->rx_handlers[pkt->hdr.cmd]) { priv->rx_handlers_stats[pkt->hdr.cmd]++; - err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd); + priv->rx_handlers[pkt->hdr.cmd](priv, rxb); } else { /* No handling needed */ IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n", iwl_dvm_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); } - return err; } diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index ed50de636..85ceceb34 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -123,7 +124,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv, __le32 old_filter = send->filter_flags; u8 old_dev_type = send->dev_type; int ret; - static const u8 deactivate_cmd[] = { + static const u16 deactivate_cmd[] = { REPLY_WIPAN_DEACTIVATION_COMPLETE }; diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c index 43bef901e..648159495 100644 --- a/drivers/net/wireless/iwlwifi/dvm/scan.c +++ b/drivers/net/wireless/iwlwifi/dvm/scan.c @@ -247,9 +247,8 @@ void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) } /* Service response to REPLY_SCAN_CMD (0x80) */ -static int iwl_rx_reply_scan(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwl_rx_reply_scan(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { #ifdef CONFIG_IWLWIFI_DEBUG struct iwl_rx_packet *pkt = rxb_addr(rxb); @@ -257,13 +256,11 @@ static int iwl_rx_reply_scan(struct iwl_priv *priv, IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status); #endif - return 0; } /* Service SCAN_START_NOTIFICATION (0x82) */ -static int iwl_rx_scan_start_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwl_rx_scan_start_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_scanstart_notification *notif = (void *)pkt->data; @@ -277,14 +274,11 @@ static int iwl_rx_scan_start_notif(struct iwl_priv *priv, le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer); - - return 0; } /* Service SCAN_RESULTS_NOTIFICATION (0x83) */ -static int iwl_rx_scan_results_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwl_rx_scan_results_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { #ifdef CONFIG_IWLWIFI_DEBUG struct iwl_rx_packet *pkt = rxb_addr(rxb); @@ -303,13 +297,11 @@ static int iwl_rx_scan_results_notif(struct iwl_priv *priv, le32_to_cpu(notif->statistics[0]), le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf); #endif - return 0; } /* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ -static int iwl_rx_scan_complete_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data; @@ -356,7 +348,6 @@ static int iwl_rx_scan_complete_notif(struct iwl_priv *priv, queue_work(priv->workqueue, &priv->bt_traffic_change_work); } - return 0; } void iwl_setup_rx_scan_handlers(struct iwl_priv *priv) diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c index 6ec86adbe..0fa67d3b7 100644 --- a/drivers/net/wireless/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/iwlwifi/dvm/sta.c @@ -60,41 +60,28 @@ static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) return 0; } -static int iwl_process_add_sta_resp(struct iwl_priv *priv, - struct iwl_addsta_cmd *addsta, - struct iwl_rx_packet *pkt) +static void iwl_process_add_sta_resp(struct iwl_priv *priv, + struct iwl_rx_packet *pkt) { struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data; - u8 sta_id = addsta->sta.sta_id; - int ret = -EIO; - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", - pkt->hdr.flags); - return ret; - } - - IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n", - sta_id); + IWL_DEBUG_INFO(priv, "Processing response for adding station\n"); spin_lock_bh(&priv->sta_lock); switch (add_sta_resp->status) { case ADD_STA_SUCCESS_MSK: IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); - ret = iwl_sta_ucode_activate(priv, sta_id); break; case ADD_STA_NO_ROOM_IN_TABLE: - IWL_ERR(priv, "Adding station %d failed, no room in table.\n", - sta_id); + IWL_ERR(priv, "Adding station failed, no room in table.\n"); break; case ADD_STA_NO_BLOCK_ACK_RESOURCE: - IWL_ERR(priv, "Adding station %d failed, no block ack " - "resource.\n", sta_id); + IWL_ERR(priv, + "Adding station failed, no block ack resource.\n"); break; case ADD_STA_MODIFY_NON_EXIST_STA: - IWL_ERR(priv, "Attempting to modify non-existing station %d\n", - sta_id); + IWL_ERR(priv, "Attempting to modify non-existing station\n"); break; default: IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", @@ -102,37 +89,14 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv, break; } - IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n", - priv->stations[sta_id].sta.mode == - STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", - sta_id, priv->stations[sta_id].sta.sta.addr); - - /* - * XXX: The MAC address in the command buffer is often changed from - * the original sent to the device. That is, the MAC address - * written to the command buffer often is not the same MAC address - * read from the command buffer when the command returns. This - * issue has not yet been resolved and this debugging is left to - * observe the problem. - */ - IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n", - priv->stations[sta_id].sta.mode == - STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", - addsta->sta.addr); spin_unlock_bh(&priv->sta_lock); - - return ret; } -int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); - if (!cmd) - return 0; - - return iwl_process_add_sta_resp(priv, (void *)cmd->payload, pkt); + iwl_process_add_sta_resp(priv, pkt); } int iwl_send_add_sta(struct iwl_priv *priv, @@ -146,6 +110,8 @@ int iwl_send_add_sta(struct iwl_priv *priv, .len = { sizeof(*sta), }, }; u8 sta_id __maybe_unused = sta->sta.sta_id; + struct iwl_rx_packet *pkt; + struct iwl_add_sta_resp *add_sta_resp; IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); @@ -159,16 +125,22 @@ int iwl_send_add_sta(struct iwl_priv *priv, if (ret || (flags & CMD_ASYNC)) return ret; - /*else the command was successfully sent in SYNC mode, need to free - * the reply page */ - iwl_free_resp(&cmd); + pkt = cmd.resp_pkt; + add_sta_resp = (void *)pkt->data; - if (cmd.handler_status) - IWL_ERR(priv, "%s - error in the CMD response %d\n", __func__, - cmd.handler_status); + /* debug messages are printed in the handler */ + if (add_sta_resp->status == ADD_STA_SUCCESS_MSK) { + spin_lock_bh(&priv->sta_lock); + ret = iwl_sta_ucode_activate(priv, sta_id); + spin_unlock_bh(&priv->sta_lock); + } else { + ret = -EIO; + } - return cmd.handler_status; + iwl_free_resp(&cmd); + + return ret; } bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, @@ -452,6 +424,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv, struct iwl_rx_packet *pkt; int ret; struct iwl_rem_sta_cmd rm_sta_cmd; + struct iwl_rem_sta_resp *rem_sta_resp; struct iwl_host_cmd cmd = { .id = REPLY_REMOVE_STA, @@ -471,29 +444,23 @@ static int iwl_send_remove_station(struct iwl_priv *priv, return ret; pkt = cmd.resp_pkt; - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", - pkt->hdr.flags); - ret = -EIO; - } + rem_sta_resp = (void *)pkt->data; - if (!ret) { - struct iwl_rem_sta_resp *rem_sta_resp = (void *)pkt->data; - switch (rem_sta_resp->status) { - case REM_STA_SUCCESS_MSK: - if (!temporary) { - spin_lock_bh(&priv->sta_lock); - iwl_sta_ucode_deactivate(priv, sta_id); - spin_unlock_bh(&priv->sta_lock); - } - IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); - break; - default: - ret = -EIO; - IWL_ERR(priv, "REPLY_REMOVE_STA failed\n"); - break; + switch (rem_sta_resp->status) { + case REM_STA_SUCCESS_MSK: + if (!temporary) { + spin_lock_bh(&priv->sta_lock); + iwl_sta_ucode_deactivate(priv, sta_id); + spin_unlock_bh(&priv->sta_lock); } + IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); + break; + default: + ret = -EIO; + IWL_ERR(priv, "REPLY_REMOVE_STA failed\n"); + break; } + iwl_free_resp(&cmd); return ret; diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 275df12a6..bddd19769 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -1128,8 +1128,7 @@ static void iwl_check_abort_status(struct iwl_priv *priv, } } -int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); @@ -1273,8 +1272,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, skb = __skb_dequeue(&skbs); ieee80211_tx_status(priv->hw, skb); } - - return 0; } /** @@ -1283,9 +1280,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, * Handles block-acknowledge notification from device, which reports success * of frames sent via aggregation. */ -int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data; @@ -1306,7 +1302,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, if (scd_flow >= priv->cfg->base_params->num_of_queues) { IWL_ERR(priv, "BUG_ON scd_flow is bigger than number of queues\n"); - return 0; + return; } sta_id = ba_resp->sta_id; @@ -1319,7 +1315,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, if (unlikely(ba_resp->bitmap)) IWL_ERR(priv, "Received BA when not expected\n"); spin_unlock_bh(&priv->sta_lock); - return 0; + return; } if (unlikely(scd_flow != agg->txq_id)) { @@ -1333,7 +1329,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n", scd_flow, sta_id, tid, agg->txq_id); spin_unlock_bh(&priv->sta_lock); - return 0; + return; } __skb_queue_head_init(&reclaimed_skbs); @@ -1413,6 +1409,4 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, skb = __skb_dequeue(&reclaimed_skbs); ieee80211_tx_status(priv->hw, skb); } - - return 0; } diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c index 5244e43bf..931a8e426 100644 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c @@ -3,6 +3,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -327,7 +328,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, const struct fw_img *fw; int ret; enum iwl_ucode_type old_type; - static const u8 alive_cmd[] = { REPLY_ALIVE }; + static const u16 alive_cmd[] = { REPLY_ALIVE }; fw = iwl_get_ucode_image(priv, ucode_type); if (WARN_ON(!fw)) @@ -406,7 +407,7 @@ static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait, int iwl_run_init_ucode(struct iwl_priv *priv) { struct iwl_notification_wait calib_wait; - static const u8 calib_complete[] = { + static const u16 calib_complete[] = { CALIBRATION_RES_NOTIFICATION, CALIBRATION_COMPLETE_NOTIFICATION }; diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 54cc2c135..4d5d8c0d7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -69,14 +69,14 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL7260_UCODE_API_MAX 15 +#define IWL7260_UCODE_API_MAX 17 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 12 #define IWL3165_UCODE_API_OK 13 /* Lowest firmware API version supported */ -#define IWL7260_UCODE_API_MIN 10 +#define IWL7260_UCODE_API_MIN 12 #define IWL3165_UCODE_API_MIN 13 /* NVM versions */ diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index 5882da5fb..251e97d34 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -69,13 +69,13 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 15 +#define IWL8000_UCODE_API_MAX 17 /* Oldest version we won't warn about */ #define IWL8000_UCODE_API_OK 12 /* Lowest firmware API version supported */ -#define IWL8000_UCODE_API_MIN 10 +#define IWL8000_UCODE_API_MIN 12 /* NVM versions */ #define IWL8000_NVM_VERSION 0x0a1d @@ -97,8 +97,9 @@ #define DEFAULT_NVM_FILE_FAMILY_8000B "/*(DEBLOBBED)*/" #define DEFAULT_NVM_FILE_FAMILY_8000C "/*(DEBLOBBED)*/" -/* Max SDIO RX aggregation size of the ADDBA request/response */ -#define MAX_RX_AGG_SIZE_8260_SDIO 28 +/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */ +#define MAX_RX_AGG_SIZE_8260_SDIO 21 +#define MAX_TX_AGG_SIZE_8260_SDIO 40 /* Max A-MPDU exponent for HT and VHT */ #define MAX_HT_AMPDU_EXPONENT_8260_SDIO IEEE80211_HT_MAX_AMPDU_32K @@ -154,6 +155,7 @@ static const struct iwl_tt_params iwl8000_tt_params = { .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \ .d0i3 = true, \ + .features = NETIF_F_RXCSUM, \ .non_shared_ant = ANT_A, \ .dccm_offset = IWL8260_DCCM_OFFSET, \ .dccm_len = IWL8260_DCCM_LEN, \ @@ -203,6 +205,7 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = { .nvm_ver = IWL8000_NVM_VERSION, .nvm_calib_ver = IWL8000_TX_POWER_VERSION, .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, + .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO, .disable_dummy_notification = true, .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO, .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO, @@ -216,6 +219,7 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = { .nvm_ver = IWL8000_NVM_VERSION, .nvm_calib_ver = IWL8000_TX_POWER_VERSION, .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, + .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO, .bt_shared_single_ant = true, .disable_dummy_notification = true, .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO, diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 08c14afeb..939fa229c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h @@ -297,6 +297,7 @@ struct iwl_pwr_tx_backoff { * mode set * @d0i3: device uses d0i3 instead of d3 * @nvm_hw_section_num: the ID of the HW NVM section + * @features: hw features, any combination of feature_whitelist * @pwr_tx_backoffs: translation table between power limits and backoffs * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response @@ -348,6 +349,7 @@ struct iwl_cfg { bool no_power_up_nic_in_init; const char *default_nvm_file_B_step; const char *default_nvm_file_C_step; + netdev_features_t features; unsigned int max_rx_agg_size; bool disable_dummy_notification; unsigned int max_tx_agg_size; diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index faa17f2e3..543abeaff 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -200,6 +200,7 @@ #define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ #define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ #define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ +#define CSR_INT_BIT_PAGING (1 << 24) /* SDIO PAGING */ #define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ #define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ #define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */ @@ -210,6 +211,7 @@ CSR_INT_BIT_HW_ERR | \ CSR_INT_BIT_FH_TX | \ CSR_INT_BIT_SW_ERR | \ + CSR_INT_BIT_PAGING | \ CSR_INT_BIT_RF_KILL | \ CSR_INT_BIT_SW_RX | \ CSR_INT_BIT_WAKEUP | \ @@ -422,6 +424,7 @@ enum { /* DRAM INT TABLE */ #define CSR_DRAM_INT_TBL_ENABLE (1 << 31) +#define CSR_DRAM_INIT_TBL_WRITE_POINTER (1 << 28) #define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) /* diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h index 04e664934..71a78cede 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h @@ -35,8 +35,8 @@ TRACE_EVENT(iwlwifi_dev_tx_data, TP_PROTO(const struct device *dev, struct sk_buff *skb, - void *data, size_t data_len), - TP_ARGS(dev, skb, data, data_len), + u8 hdr_len, size_t data_len), + TP_ARGS(dev, skb, hdr_len, data_len), TP_STRUCT__entry( DEV_ENTRY @@ -45,7 +45,8 @@ TRACE_EVENT(iwlwifi_dev_tx_data, TP_fast_assign( DEV_ASSIGN; if (iwl_trace_data(skb)) - memcpy(__get_dynamic_array(data), data, data_len); + skb_copy_bits(skb, hdr_len, + __get_dynamic_array(data), data_len); ), TP_printk("[%s] TX frame data", __get_str(dev)) ); diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h index 948ce0802..eb4b99a1c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h @@ -36,7 +36,7 @@ TRACE_EVENT(iwlwifi_dev_hcmd, TP_PROTO(const struct device *dev, struct iwl_host_cmd *cmd, u16 total_size, - struct iwl_cmd_header *hdr), + struct iwl_cmd_header_wide *hdr), TP_ARGS(dev, cmd, total_size, hdr), TP_STRUCT__entry( DEV_ENTRY @@ -44,11 +44,14 @@ TRACE_EVENT(iwlwifi_dev_hcmd, __field(u32, flags) ), TP_fast_assign( - int i, offset = sizeof(*hdr); + int i, offset = sizeof(struct iwl_cmd_header); + + if (hdr->group_id) + offset = sizeof(struct iwl_cmd_header_wide); DEV_ASSIGN; __entry->flags = cmd->flags; - memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr)); + memcpy(__get_dynamic_array(hcmd), hdr, offset); for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { if (!cmd->len[i]) @@ -58,8 +61,9 @@ TRACE_EVENT(iwlwifi_dev_hcmd, offset += cmd->len[i]; } ), - TP_printk("[%s] hcmd %#.2x (%ssync)", - __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0], + TP_printk("[%s] hcmd %#.2x.%#.2x (%ssync)", + __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[1], + ((u8 *)__get_dynamic_array(hcmd))[0], __entry->flags & CMD_ASYNC ? "a" : "") ); diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 8b56901d8..56ed4526a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -372,6 +372,30 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len) return 0; } +static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data, + const u32 len) +{ + struct iwl_fw_gscan_capabilities *fw_capa = (void *)data; + struct iwl_gscan_capabilities *capa = &fw->gscan_capa; + + if (len < sizeof(*fw_capa)) + return -EINVAL; + + capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size); + capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets); + capa->max_ap_cache_per_scan = + le32_to_cpu(fw_capa->max_ap_cache_per_scan); + capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size); + capa->max_scan_reporting_threshold = + le32_to_cpu(fw_capa->max_scan_reporting_threshold); + capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps); + capa->max_significant_change_aps = + le32_to_cpu(fw_capa->max_significant_change_aps); + capa->max_bssid_history_entries = + le32_to_cpu(fw_capa->max_bssid_history_entries); + return 0; +} + /* * Gets uCode section from tlv. */ @@ -573,13 +597,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, size_t len = ucode_raw->size; const u8 *data; u32 tlv_len; + u32 usniffer_img; enum iwl_ucode_tlv_type tlv_type; const u8 *tlv_data; char buildstr[25]; - u32 build; + u32 build, paging_mem_size; int num_of_cpus; bool usniffer_images = false; bool usniffer_req = false; + bool gscan_capa = false; if (len < sizeof(*ucode)) { IWL_ERR(drv, "uCode has invalid length: %zd\n", len); @@ -955,12 +981,46 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, IWL_UCODE_REGULAR_USNIFFER, tlv_len); break; + case IWL_UCODE_TLV_PAGING: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + paging_mem_size = le32_to_cpup((__le32 *)tlv_data); + + IWL_DEBUG_FW(drv, + "Paging: paging enabled (size = %u bytes)\n", + paging_mem_size); + + if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) { + IWL_ERR(drv, + "Paging: driver supports up to %lu bytes for paging image\n", + MAX_PAGING_IMAGE_SIZE); + return -EINVAL; + } + + if (paging_mem_size & (FW_PAGING_SIZE - 1)) { + IWL_ERR(drv, + "Paging: image isn't multiple %lu\n", + FW_PAGING_SIZE); + return -EINVAL; + } + + drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size = + paging_mem_size; + usniffer_img = IWL_UCODE_REGULAR_USNIFFER; + drv->fw.img[usniffer_img].paging_mem_size = + paging_mem_size; + break; case IWL_UCODE_TLV_SDIO_ADMA_ADDR: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; drv->fw.sdio_adma_addr = le32_to_cpup((__le32 *)tlv_data); break; + case IWL_UCODE_TLV_FW_GSCAN_CAPA: + if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len)) + goto invalid_tlv_len; + gscan_capa = true; + break; default: IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); break; @@ -979,6 +1039,16 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, return -EINVAL; } + /* + * If ucode advertises that it supports GSCAN but GSCAN + * capabilities TLV is not present, warn and continue without GSCAN. + */ + if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) && + WARN(!gscan_capa, + "GSCAN is supported but capabilities TLV is unavailable\n")) + __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT, + capa->_capa); + return 0; invalid_tlv_len: diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c index 21302b6f2..acc3d186c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c @@ -713,12 +713,12 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data, struct ieee80211_channel *chan = &data->channels[0]; int n = 0, idx = 0; - while (chan->band != band && idx < n_channels) + while (idx < n_channels && chan->band != band) chan = &data->channels[++idx]; sband->channels = &data->channels[idx]; - while (chan->band == band && idx < n_channels) { + while (idx < n_channels && chan->band == band) { chan = &data->channels[++idx]; n++; } diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h index d45dc021c..d56064861 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/iwlwifi/iwl-fh.h @@ -438,12 +438,6 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) #define RX_QUEUE_MASK 255 #define RX_QUEUE_SIZE_LOG 8 -/* - * RX related structures and functions - */ -#define RX_FREE_BUFFERS 64 -#define RX_LOW_WATERMARK 8 - /** * struct iwl_rb_status - reserve buffer status * host memory mapped FH registers diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h index e57dbd0ef..af5b32014 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h @@ -84,6 +84,8 @@ * @IWL_FW_ERROR_DUMP_MEM: chunk of memory * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump. * Structured as &struct iwl_fw_error_dump_trigger_desc. + * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as + * &struct iwl_fw_error_dump_rb */ enum iwl_fw_error_dump_type { /* 0 is deprecated */ @@ -97,6 +99,7 @@ enum iwl_fw_error_dump_type { IWL_FW_ERROR_DUMP_FH_REGS = 8, IWL_FW_ERROR_DUMP_MEM = 9, IWL_FW_ERROR_DUMP_ERROR_INFO = 10, + IWL_FW_ERROR_DUMP_RB = 11, IWL_FW_ERROR_DUMP_MAX, }; @@ -222,6 +225,20 @@ struct iwl_fw_error_dump_mem { u8 data[]; }; +/** + * struct iwl_fw_error_dump_rb - content of an Receive Buffer + * @index: the index of the Receive Buffer in the Rx queue + * @rxq: the RB's Rx queue + * @reserved: + * @data: the content of the Receive Buffer + */ +struct iwl_fw_error_dump_rb { + __le32 index; + __le32 rxq; + __le32 reserved; + u8 data[]; +}; + /** * iwl_fw_error_next_data - advance fw error dump data pointer * @data: previous data block diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index a9b5ae4eb..84653e3d0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -132,12 +132,14 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_API_CHANGES_SET = 29, IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30, IWL_UCODE_TLV_N_SCAN_CHANNELS = 31, + IWL_UCODE_TLV_PAGING = 32, IWL_UCODE_TLV_SEC_RT_USNIFFER = 34, IWL_UCODE_TLV_SDIO_ADMA_ADDR = 35, IWL_UCODE_TLV_FW_VERSION = 36, IWL_UCODE_TLV_FW_DBG_DEST = 38, IWL_UCODE_TLV_FW_DBG_CONF = 39, IWL_UCODE_TLV_FW_DBG_TRIGGER = 40, + IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, }; struct iwl_ucode_tlv { @@ -247,9 +249,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t; * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR * @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power. - * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, - * regardless of the band or the number of the probes. FW will calculate - * the actual dwell time. + * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header * @IWL_UCODE_TLV_API_SCD_CFG: This firmware can configure the scheduler * through the dedicated host command. * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too. @@ -259,6 +259,8 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t; * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority * instead of 3. + * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size + * (command version 3) that supports per-chain limits */ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3, @@ -266,7 +268,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9, IWL_UCODE_TLV_API_HDC_PHASE_0 = (__force iwl_ucode_tlv_api_t)10, IWL_UCODE_TLV_API_TX_POWER_DEV = (__force iwl_ucode_tlv_api_t)11, - IWL_UCODE_TLV_API_BASIC_DWELL = (__force iwl_ucode_tlv_api_t)13, + IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14, IWL_UCODE_TLV_API_SCD_CFG = (__force iwl_ucode_tlv_api_t)15, IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = (__force iwl_ucode_tlv_api_t)16, IWL_UCODE_TLV_API_ASYNC_DTM = (__force iwl_ucode_tlv_api_t)17, @@ -274,6 +276,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_STATS_V10 = (__force iwl_ucode_tlv_api_t)19, IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24, + IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27, }; typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; @@ -284,6 +287,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan. * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer + * @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM) * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current * tx power value into TPC Report action frame and Link Measurement Report @@ -298,6 +302,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command + * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different @@ -305,12 +310,14 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR * is supported. * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC + * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan */ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0, IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1, IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2, IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3, + IWL_UCODE_TLV_CAPA_TOF_SUPPORT = (__force iwl_ucode_tlv_capa_t)5, IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8, IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9, @@ -320,10 +327,12 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13, IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18, IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19, + IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22, IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28, IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, + IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31, }; /* The default calibrate table size if not specified by firmware file */ @@ -341,8 +350,9 @@ enum iwl_ucode_tlv_capa { * For 16.0 uCode and above, there is no differentiation between sections, * just an offset to the HW address. */ -#define IWL_UCODE_SECTION_MAX 12 +#define IWL_UCODE_SECTION_MAX 16 #define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC +#define PAGING_SEPARATOR_SECTION 0xAAAABBBB /* uCode version contains 4 values: Major/Minor/API/Serial */ #define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) @@ -412,6 +422,12 @@ enum iwl_fw_dbg_reg_operator { PRPH_ASSIGN, PRPH_SETBIT, PRPH_CLEARBIT, + + INDIRECT_ASSIGN, + INDIRECT_SETBIT, + INDIRECT_CLEARBIT, + + PRPH_BLOCKBIT, }; /** @@ -485,10 +501,13 @@ struct iwl_fw_dbg_conf_hcmd { * * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data + * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to + * collect only monitor data */ enum iwl_fw_dbg_trigger_mode { IWL_FW_DBG_TRIGGER_START = BIT(0), IWL_FW_DBG_TRIGGER_STOP = BIT(1), + IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2), }; /** @@ -718,4 +737,28 @@ struct iwl_fw_dbg_conf_tlv { struct iwl_fw_dbg_conf_hcmd hcmd; } __packed; +/** + * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW + * @max_scan_cache_size: total space allocated for scan results (in bytes). + * @max_scan_buckets: maximum number of channel buckets. + * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan. + * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI. + * @max_scan_reporting_threshold: max possible report threshold. in percentage. + * @max_hotlist_aps: maximum number of entries for hotlist APs. + * @max_significant_change_aps: maximum number of entries for significant + * change APs. + * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can + * hold. + */ +struct iwl_fw_gscan_capabilities { + __le32 max_scan_cache_size; + __le32 max_scan_buckets; + __le32 max_ap_cache_per_scan; + __le32 max_rssi_sample_size; + __le32 max_scan_reporting_threshold; + __le32 max_hotlist_aps; + __le32 max_significant_change_aps; + __le32 max_bssid_history_entries; +} __packed; + #endif /* __iwl_fw_file_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index 3e3c9d8b3..45e732150 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -133,6 +133,7 @@ struct fw_desc { struct fw_img { struct fw_desc sec[IWL_UCODE_SECTION_MAX]; bool is_dual_cpus; + u32 paging_mem_size; }; struct iwl_sf_region { @@ -140,6 +141,48 @@ struct iwl_sf_region { u32 size; }; +/* + * Block paging calculations + */ +#define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */ +#define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */ +#define PAGE_PER_GROUP_2_EXP_SIZE 3 +/* 8 pages per group */ +#define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE) +/* don't change, support only 32KB size */ +#define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE) +/* 32K == 2^15 */ +#define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE) + +/* + * Image paging calculations + */ +#define BLOCK_PER_IMAGE_2_EXP_SIZE 5 +/* 2^5 == 32 blocks per image */ +#define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE) +/* maximum image size 1024KB */ +#define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE) + +/* Virtual address signature */ +#define PAGING_ADDR_SIG 0xAA000000 + +#define PAGING_CMD_IS_SECURED BIT(9) +#define PAGING_CMD_IS_ENABLED BIT(8) +#define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS 0 +#define PAGING_TLV_SECURE_MASK 1 + +/** + * struct iwl_fw_paging + * @fw_paging_phys: page phy pointer + * @fw_paging_block: pointer to the allocated block + * @fw_paging_size: page size + */ +struct iwl_fw_paging { + dma_addr_t fw_paging_phys; + struct page *fw_paging_block; + u32 fw_paging_size; +}; + /** * struct iwl_fw_cscheme_list - a cipher scheme list * @size: a number of entries @@ -150,6 +193,30 @@ struct iwl_fw_cscheme_list { struct iwl_fw_cipher_scheme cs[]; } __packed; +/** + * struct iwl_gscan_capabilities - gscan capabilities supported by FW + * @max_scan_cache_size: total space allocated for scan results (in bytes). + * @max_scan_buckets: maximum number of channel buckets. + * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan. + * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI. + * @max_scan_reporting_threshold: max possible report threshold. in percentage. + * @max_hotlist_aps: maximum number of entries for hotlist APs. + * @max_significant_change_aps: maximum number of entries for significant + * change APs. + * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can + * hold. + */ +struct iwl_gscan_capabilities { + u32 max_scan_cache_size; + u32 max_scan_buckets; + u32 max_ap_cache_per_scan; + u32 max_rssi_sample_size; + u32 max_scan_reporting_threshold; + u32 max_hotlist_aps; + u32 max_significant_change_aps; + u32 max_bssid_history_entries; +}; + /** * struct iwl_fw - variables associated with the firmware * @@ -208,6 +275,7 @@ struct iwl_fw { struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; u8 dbg_dest_reg_num; + struct iwl_gscan_capabilities gscan_capa; }; static inline const char *get_fw_dbg_mode_string(int mode) diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c index b5bc959b1..6caf2affb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c +++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -98,7 +99,8 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait, continue; for (i = 0; i < w->n_cmds; i++) { - if (w->cmds[i] == pkt->hdr.cmd) { + if (w->cmds[i] == + WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) { found = true; break; } @@ -136,7 +138,7 @@ IWL_EXPORT_SYMBOL(iwl_abort_notification_waits); void iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait, struct iwl_notification_wait *wait_entry, - const u8 *cmds, int n_cmds, + const u16 *cmds, int n_cmds, bool (*fn)(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data), void *fn_data) @@ -147,7 +149,7 @@ iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait, wait_entry->fn = fn; wait_entry->fn_data = fn_data; wait_entry->n_cmds = n_cmds; - memcpy(wait_entry->cmds, cmds, n_cmds); + memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16)); wait_entry->triggered = false; wait_entry->aborted = false; diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h index 95af97a6c..dbe823452 100644 --- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h +++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -105,7 +106,7 @@ struct iwl_notification_wait { struct iwl_rx_packet *pkt, void *data); void *fn_data; - u8 cmds[MAX_NOTIF_CMDS]; + u16 cmds[MAX_NOTIF_CMDS]; u8 n_cmds; bool triggered, aborted; }; @@ -121,7 +122,7 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data); void __acquires(wait_entry) iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data, struct iwl_notification_wait *wait_entry, - const u8 *cmds, int n_cmds, + const u16 *cmds, int n_cmds, bool (*fn)(struct iwl_notif_wait_data *notif_data, struct iwl_rx_packet *pkt, void *data), void *fn_data); diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h index ce1cdd760..b47fe9d6b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h @@ -116,10 +116,6 @@ struct iwl_cfg; * May sleep * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the * HCMD this Rx responds to. Can't sleep. - * @napi_add: NAPI initialization. The transport is fully responsible for NAPI, - * but the higher layers need to know about it (in particular mac80211 to - * to able to call the right NAPI RX functions); this function is needed - * to eventually call netif_napi_add() with higher layer involvement. * @queue_full: notifies that a HW queue is full. * Must be atomic and called with BH disabled. * @queue_not_full: notifies that a HW queue is not full any more. @@ -148,13 +144,8 @@ struct iwl_op_mode_ops { const struct iwl_fw *fw, struct dentry *dbgfs_dir); void (*stop)(struct iwl_op_mode *op_mode); - int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); - void (*napi_add)(struct iwl_op_mode *op_mode, - struct napi_struct *napi, - struct net_device *napi_dev, - int (*poll)(struct napi_struct *, int), - int weight); + void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb); void (*queue_full)(struct iwl_op_mode *op_mode, int queue); void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); @@ -188,11 +179,11 @@ static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode) op_mode->ops->stop(op_mode); } -static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode, + struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb) { - return op_mode->ops->rx(op_mode, rxb, cmd); + return op_mode->ops->rx(op_mode, napi, rxb); } static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, @@ -260,15 +251,4 @@ static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode) return op_mode->ops->exit_d0i3(op_mode); } -static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode, - struct napi_struct *napi, - struct net_device *napi_dev, - int (*poll)(struct napi_struct *, int), - int weight) -{ - if (!op_mode->ops->napi_add) - return; - op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight); -} - #endif /* __iwl_op_mode_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h index 5af1c776d..3ab777f79 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h @@ -253,6 +253,7 @@ #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) #define SCD_GP_CTRL_ENABLE_31_QUEUES BIT(0) +#define SCD_GP_CTRL_AUTO_ACTIVE_MODE BIT(18) /* Context Data */ #define SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600) @@ -291,6 +292,9 @@ /*********************** END TX SCHEDULER *************************************/ +/* tcp checksum offload */ +#define RX_EN_CSUM (0x00a00d88) + /* Oscillator clock */ #define OSC_CLK (0xa04068) #define OSC_CLK_FORCE_CONTROL (0x8) @@ -379,6 +383,8 @@ enum aux_misc_master1_en { #define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800 #define RSA_ENABLE 0xA24B08 #define PREG_AUX_BUS_WPROT_0 0xA04CC0 +#define SB_CPU_1_STATUS 0xA01E30 +#define SB_CPU_2_STATUS 0xA01E34 /* FW chicken bits */ #define LMPM_CHICK 0xA01FF8 @@ -386,4 +392,10 @@ enum { LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0), }; +/* FW chicken bits */ +#define LMPM_PAGE_PASS_NOTIF 0xA03824 +enum { + LMPM_PAGE_PASS_NOTIF_POS = BIT(20), +}; + #endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 87a230a7f..c829c505e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -122,6 +122,40 @@ #define INDEX_TO_SEQ(i) ((i) & 0xff) #define SEQ_RX_FRAME cpu_to_le16(0x8000) +/* + * those functions retrieve specific information from + * the id field in the iwl_host_cmd struct which contains + * the command id, the group id and the version of the command + * and vice versa +*/ +static inline u8 iwl_cmd_opcode(u32 cmdid) +{ + return cmdid & 0xFF; +} + +static inline u8 iwl_cmd_groupid(u32 cmdid) +{ + return ((cmdid & 0xFF00) >> 8); +} + +static inline u8 iwl_cmd_version(u32 cmdid) +{ + return ((cmdid & 0xFF0000) >> 16); +} + +static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version) +{ + return opcode + (groupid << 8) + (version << 16); +} + +/* make u16 wide id out of u8 group and opcode */ +#define WIDE_ID(grp, opcode) ((grp << 8) | opcode) + +/* due to the conversion, this group is special; new groups + * should be defined in the appropriate fw-api header files + */ +#define IWL_ALWAYS_LONG_GROUP 1 + /** * struct iwl_cmd_header * @@ -130,7 +164,7 @@ */ struct iwl_cmd_header { u8 cmd; /* Command ID: REPLY_RXON, etc. */ - u8 flags; /* 0:5 reserved, 6 abort, 7 internal */ + u8 group_id; /* * The driver sets up the sequence number to values of its choosing. * uCode does not use this value, but passes it back to the driver @@ -154,9 +188,22 @@ struct iwl_cmd_header { __le16 sequence; } __packed; -/* iwl_cmd_header flags value */ -#define IWL_CMD_FAILED_MSK 0x40 - +/** + * struct iwl_cmd_header_wide + * + * This header format appears in the beginning of each command sent from the + * driver, and each response/notification received from uCode. + * this is the wide version that contains more information about the command + * like length, version and command type + */ +struct iwl_cmd_header_wide { + u8 cmd; + u8 group_id; + __le16 sequence; + __le16 length; + u8 reserved; + u8 version; +} __packed; #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ #define FH_RSCSR_FRAME_INVALID 0x55550000 @@ -201,6 +248,8 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle. * @CMD_WAKE_UP_TRANS: The command response should wake up the trans * (i.e. mark it as non-idle). + * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to + * check that we leave enough room for the TBs bitmap which needs 20 bits. */ enum CMD_MODE { CMD_ASYNC = BIT(0), @@ -210,6 +259,8 @@ enum CMD_MODE { CMD_SEND_IN_IDLE = BIT(4), CMD_MAKE_TRANS_IDLE = BIT(5), CMD_WAKE_UP_TRANS = BIT(6), + + CMD_TB_BITMAP_POS = 11, }; #define DEF_CMD_PAYLOAD_SIZE 320 @@ -222,8 +273,18 @@ enum CMD_MODE { * aren't fully copied and use other TFD space. */ struct iwl_device_cmd { - struct iwl_cmd_header hdr; /* uCode API */ - u8 payload[DEF_CMD_PAYLOAD_SIZE]; + union { + struct { + struct iwl_cmd_header hdr; /* uCode API */ + u8 payload[DEF_CMD_PAYLOAD_SIZE]; + }; + struct { + struct iwl_cmd_header_wide hdr_wide; + u8 payload_wide[DEF_CMD_PAYLOAD_SIZE - + sizeof(struct iwl_cmd_header_wide) + + sizeof(struct iwl_cmd_header)]; + }; + }; } __packed; #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) @@ -261,24 +322,22 @@ enum iwl_hcmd_dataflag { * @resp_pkt: response packet, if %CMD_WANT_SKB was set * @_rx_page_order: (internally used to free response packet) * @_rx_page_addr: (internally used to free response packet) - * @handler_status: return value of the handler of the command - * (put in setup_rx_handlers) - valid for SYNC mode only * @flags: can be CMD_* * @len: array of the lengths of the chunks in data * @dataflags: IWL_HCMD_DFL_* - * @id: id of the host command + * @id: command id of the host command, for wide commands encoding the + * version and group as well */ struct iwl_host_cmd { const void *data[IWL_MAX_CMD_TBS_PER_TFD]; struct iwl_rx_packet *resp_pkt; unsigned long _rx_page_addr; u32 _rx_page_order; - int handler_status; u32 flags; + u32 id; u16 len[IWL_MAX_CMD_TBS_PER_TFD]; u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD]; - u8 id; }; static inline void iwl_free_resp(struct iwl_host_cmd *cmd) @@ -379,6 +438,7 @@ enum iwl_trans_status { * @bc_table_dword: set to true if the BC table expects the byte count to be * in DWORD (as opposed to bytes) * @scd_set_active: should the transport configure the SCD for HCMD queue + * @wide_cmd_header: firmware supports wide host command header * @command_names: array of command names, must be 256 entries * (one for each command); for debugging only * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until @@ -396,6 +456,7 @@ struct iwl_trans_config { bool rx_buf_size_8k; bool bc_table_dword; bool scd_set_active; + bool wide_cmd_header; const char *const *command_names; u32 sdio_adma_addr; @@ -544,10 +605,12 @@ struct iwl_trans_ops { u32 value); void (*ref)(struct iwl_trans *trans); void (*unref)(struct iwl_trans *trans); - void (*suspend)(struct iwl_trans *trans); + int (*suspend)(struct iwl_trans *trans); void (*resume)(struct iwl_trans *trans); - struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans); + struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, + struct iwl_fw_dbg_trigger_tlv + *trigger); }; /** @@ -584,6 +647,8 @@ enum iwl_d0i3_mode { * @cfg - pointer to the configuration * @status: a bit-mask of transport status flags * @dev - pointer to struct device * that represents the device + * @max_skb_frags: maximum number of fragments an SKB can have when transmitted. + * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported. * @hw_id: a u32 with the ID of the device / sub-device. * Set during transport allocation. * @hw_id_str: a string with info about HW ID. Set during transport allocation. @@ -603,6 +668,12 @@ enum iwl_d0i3_mode { * @dbg_conf_tlv: array of pointers to configuration TLVs for debug * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv + * @paging_req_addr: The location were the FW will upload / download the pages + * from. The address is set by the opmode + * @paging_db: Pointer to the opmode paging data base, the pointer is set by + * the opmode. + * @paging_download_buf: Buffer used for copying all of the pages before + * downloading them to the FW. The buffer is allocated in the opmode */ struct iwl_trans { const struct iwl_trans_ops *ops; @@ -612,6 +683,7 @@ struct iwl_trans { unsigned long status; struct device *dev; + u32 max_skb_frags; u32 hw_rev; u32 hw_id; char hw_id_str[52]; @@ -639,6 +711,14 @@ struct iwl_trans { struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; u8 dbg_dest_reg_num; + /* + * Paging parameters - All of the parameters should be set by the + * opmode when paging is enabled + */ + u32 paging_req_addr; + struct iwl_fw_paging *paging_db; + void *paging_download_buf; + enum iwl_d0i3_mode d0i3_mode; bool wowlan_d0i3; @@ -730,7 +810,8 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans) static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test) { might_sleep(); - trans->ops->d3_suspend(trans, test); + if (trans->ops->d3_suspend) + trans->ops->d3_suspend(trans, test); } static inline int iwl_trans_d3_resume(struct iwl_trans *trans, @@ -738,6 +819,9 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans, bool test) { might_sleep(); + if (!trans->ops->d3_resume) + return 0; + return trans->ops->d3_resume(trans, status, test); } @@ -753,10 +837,12 @@ static inline void iwl_trans_unref(struct iwl_trans *trans) trans->ops->unref(trans); } -static inline void iwl_trans_suspend(struct iwl_trans *trans) +static inline int iwl_trans_suspend(struct iwl_trans *trans) { - if (trans->ops->suspend) - trans->ops->suspend(trans); + if (!trans->ops->suspend) + return 0; + + return trans->ops->suspend(trans); } static inline void iwl_trans_resume(struct iwl_trans *trans) @@ -766,11 +852,12 @@ static inline void iwl_trans_resume(struct iwl_trans *trans) } static inline struct iwl_trans_dump_data * -iwl_trans_dump_data(struct iwl_trans *trans) +iwl_trans_dump_data(struct iwl_trans *trans, + struct iwl_fw_dbg_trigger_tlv *trigger) { if (!trans->ops->dump_data) return NULL; - return trans->ops->dump_data(trans); + return trans->ops->dump_data(trans, trigger); } static inline int iwl_trans_send_cmd(struct iwl_trans *trans, diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile index 2d7c3ea3c..8c2c3d13b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/iwlwifi/mvm/Makefile @@ -6,6 +6,7 @@ iwlmvm-y += power.o coex.o coex_legacy.o iwlmvm-y += tt.o offloading.o tdls.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o +iwlmvm-y += tof.o iwlmvm-$(CONFIG_PM_SLEEP) += d3.o ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../ diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index b4737e296..e290ac67d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -725,15 +725,17 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) } } -int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *dev_cmd) +void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd); + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + iwl_mvm_rx_bt_coex_notif_old(mvm, rxb); + return; + } IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); @@ -748,12 +750,6 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif)); iwl_mvm_bt_coex_notif_handle(mvm); - - /* - * This is an async handler for a notification, returning anything other - * than 0 doesn't make sense even if HCMD failed. - */ - return 0; } void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -947,9 +943,8 @@ void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) iwl_mvm_bt_coex_notif_handle(mvm); } -int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *dev_cmd) +void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u32 ant_isolation = le32_to_cpup((void *)pkt->data); @@ -957,20 +952,23 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, u8 __maybe_unused lower_bound, upper_bound; u8 lut; - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd); + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb); + return; + } if (!iwl_mvm_bt_is_plcr_supported(mvm)) - return 0; + return; lockdep_assert_held(&mvm->mutex); /* Ignore updates if we are in force mode */ if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return 0; + return; if (ant_isolation == mvm->last_ant_isol) - return 0; + return; for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++) if (ant_isolation < antenna_coupling_ranges[lut + 1].range) @@ -989,7 +987,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, mvm->last_ant_isol = ant_isolation; if (mvm->last_corun_lut == lut) - return 0; + return; mvm->last_corun_lut = lut; @@ -1000,6 +998,8 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, memcpy(&cmd.corun_lut40, antenna_coupling_ranges[lut].lut20, sizeof(cmd.corun_lut40)); - return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0, - sizeof(cmd), &cmd); + if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0, + sizeof(cmd), &cmd)) + IWL_ERR(mvm, + "failed to send BT_COEX_UPDATE_CORUN_LUT command\n"); } diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c index 6ac6de2af..61c07b05f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c @@ -1058,9 +1058,8 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n"); } -int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *dev_cmd) +void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data; @@ -1083,12 +1082,6 @@ int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old)); iwl_mvm_bt_coex_notif_handle(mvm); - - /* - * This is an async handler for a notification, returning anything other - * than 0 doesn't make sense even if HCMD failed. - */ - return 0; } static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac, @@ -1250,14 +1243,12 @@ void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm) iwl_mvm_bt_coex_notif_handle(mvm); } -int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *dev_cmd) +void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u32 ant_isolation = le32_to_cpup((void *)pkt->data); u8 __maybe_unused lower_bound, upper_bound; - int ret; u8 lut; struct iwl_bt_coex_cmd_old *bt_cmd; @@ -1268,16 +1259,16 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, }; if (!iwl_mvm_bt_is_plcr_supported(mvm)) - return 0; + return; lockdep_assert_held(&mvm->mutex); /* Ignore updates if we are in force mode */ if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return 0; + return; if (ant_isolation == mvm->last_ant_isol) - return 0; + return; for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++) if (ant_isolation < antenna_coupling_ranges[lut + 1].range) @@ -1296,13 +1287,13 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, mvm->last_ant_isol = ant_isolation; if (mvm->last_corun_lut == lut) - return 0; + return; mvm->last_corun_lut = lut; bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); if (!bt_cmd) - return 0; + return; cmd.data[0] = bt_cmd; bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD); @@ -1317,8 +1308,8 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20, sizeof(bt_cmd->bt4_corun_lut40)); - ret = iwl_mvm_send_cmd(mvm, &cmd); + if (iwl_mvm_send_cmd(mvm, &cmd)) + IWL_ERR(mvm, "failed to send BT_CONFIG command\n"); kfree(bt_cmd); - return ret; } diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h index beba37548..b8ee3121f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/iwlwifi/mvm/constants.h @@ -102,6 +102,7 @@ #define IWL_MVM_QUOTA_THRESHOLD 4 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0 #define IWL_MVM_RS_DISABLE_P2P_MIMO 0 +#define IWL_MVM_TOF_IS_RESPONDER 0 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 4165d104e..576187611 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -274,18 +274,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, break; case WLAN_CIPHER_SUITE_CCMP: if (sta) { - u8 *pn = seq.ccmp.pn; + u64 pn64; aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; - ieee80211_get_key_tx_seq(key, &seq); - aes_tx_sc->pn = cpu_to_le64((u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); + pn64 = atomic64_read(&key->tx_pn); + aes_tx_sc->pn = cpu_to_le64(pn64); } else { aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; } @@ -298,12 +293,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, u8 *pn = seq.ccmp.pn; ieee80211_get_key_rx_seq(key, i, &seq); - aes_sc->pn = cpu_to_le64((u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); + aes_sc[i].pn = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); } data->use_rsc_tsc = true; break; @@ -1145,7 +1140,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm) { struct iwl_notification_wait wait_d3; - static const u8 d3_notif[] = { D3_CONFIG_CMD }; + static const u16 d3_notif[] = { D3_CONFIG_CMD }; int ret; iwl_init_notification_wait(&mvm->notif_wait, &wait_d3, @@ -1168,13 +1163,17 @@ remove_notif: int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + ret = iwl_trans_suspend(mvm->trans); + if (ret) + return ret; - iwl_trans_suspend(mvm->trans); mvm->trans->wowlan_d0i3 = wowlan->any; if (mvm->trans->wowlan_d0i3) { /* 'any' trigger means d0i3 usage */ if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) { - int ret = iwl_mvm_enter_d0i3_sync(mvm); + ret = iwl_mvm_enter_d0i3_sync(mvm); if (ret) return ret; @@ -1183,6 +1182,9 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) mutex_lock(&mvm->d0i3_suspend_mutex); __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); mutex_unlock(&mvm->d0i3_suspend_mutex); + + iwl_trans_d3_suspend(mvm->trans, false); + return 0; } @@ -1446,15 +1448,15 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw, switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: - iwl_mvm_aes_sc_to_seq(&sc->aes.tsc, &seq); iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key); + atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn)); break; case WLAN_CIPHER_SUITE_TKIP: iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq); iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key); + ieee80211_set_key_tx_seq(key, &seq); break; } - ieee80211_set_key_tx_seq(key, &seq); /* that's it for this key */ return; @@ -1935,28 +1937,59 @@ out: return 1; } -int iwl_mvm_resume(struct ieee80211_hw *hw) +static int iwl_mvm_resume_d3(struct iwl_mvm *mvm) { - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + iwl_trans_resume(mvm->trans); + + return __iwl_mvm_resume(mvm, false); +} + +static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm) +{ + bool exit_now; + enum iwl_d3_status d3_status; + + iwl_trans_d3_resume(mvm->trans, &d3_status, false); + + /* + * make sure to clear D0I3_DEFER_WAKEUP before + * calling iwl_trans_resume(), which might wait + * for d0i3 exit completion. + */ + mutex_lock(&mvm->d0i3_suspend_mutex); + __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); + exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP, + &mvm->d0i3_suspend_flags); + mutex_unlock(&mvm->d0i3_suspend_mutex); + if (exit_now) { + IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n"); + _iwl_mvm_exit_d0i3(mvm); + } iwl_trans_resume(mvm->trans); - if (mvm->hw->wiphy->wowlan_config->any) { - /* 'any' trigger means d0i3 usage */ - if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) { - int ret = iwl_mvm_exit_d0i3(hw->priv); + if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) { + int ret = iwl_mvm_exit_d0i3(mvm->hw->priv); - if (ret) - return ret; - /* - * d0i3 exit will be deferred until reconfig_complete. - * make sure there we are out of d0i3. - */ - } - return 0; + if (ret) + return ret; + /* + * d0i3 exit will be deferred until reconfig_complete. + * make sure there we are out of d0i3. + */ } + return 0; +} - return __iwl_mvm_resume(mvm, false); +int iwl_mvm_resume(struct ieee80211_hw *hw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + /* 'any' trigger means d0i3 was used */ + if (hw->wiphy->wowlan_config->any) + return iwl_mvm_resume_d0i3(mvm); + else + return iwl_mvm_resume_d3(mvm); } void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c index 5c8a65de0..383a31620 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c @@ -63,6 +63,7 @@ * *****************************************************************************/ #include "mvm.h" +#include "fw-api-tof.h" #include "debugfs.h" static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, @@ -497,6 +498,731 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } +static inline char *iwl_dbgfs_is_match(char *name, char *buf) +{ + int len = strlen(name); + + return !strncmp(name, buf, len) ? buf + len : NULL; +} + +static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif, + char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + int value, ret = -EINVAL; + char *data; + + mutex_lock(&mvm->mutex); + + data = iwl_dbgfs_is_match("tof_disabled=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.tof_cfg.tof_disabled = value; + goto out; + } + + data = iwl_dbgfs_is_match("one_sided_disabled=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.tof_cfg.one_sided_disabled = value; + goto out; + } + + data = iwl_dbgfs_is_match("is_debug_mode=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.tof_cfg.is_debug_mode = value; + goto out; + } + + data = iwl_dbgfs_is_match("is_buf=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.tof_cfg.is_buf_required = value; + goto out; + } + + data = iwl_dbgfs_is_match("send_tof_cfg=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0 && value) { + ret = iwl_mvm_tof_config_cmd(mvm); + goto out; + } + } + +out: + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_tof_enable_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + struct iwl_tof_config_cmd *cmd; + + cmd = &mvm->tof_data.tof_cfg; + + mutex_lock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n", + cmd->tof_disabled); + pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n", + cmd->one_sided_disabled); + pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n", + cmd->is_debug_mode); + pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n", + cmd->is_buf_required); + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif, + char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + int value, ret = 0; + char *data; + + mutex_lock(&mvm->mutex); + + data = iwl_dbgfs_is_match("burst_period=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (!ret) + mvm->tof_data.responder_cfg.burst_period = + cpu_to_le16(value); + goto out; + } + + data = iwl_dbgfs_is_match("min_delta_ftm=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.min_delta_ftm = value; + goto out; + } + + data = iwl_dbgfs_is_match("burst_duration=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.burst_duration = value; + goto out; + } + + data = iwl_dbgfs_is_match("num_of_burst_exp=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.num_of_burst_exp = value; + goto out; + } + + data = iwl_dbgfs_is_match("abort_responder=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.abort_responder = value; + goto out; + } + + data = iwl_dbgfs_is_match("get_ch_est=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.get_ch_est = value; + goto out; + } + + data = iwl_dbgfs_is_match("recv_sta_req_params=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.recv_sta_req_params = value; + goto out; + } + + data = iwl_dbgfs_is_match("channel_num=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.channel_num = value; + goto out; + } + + data = iwl_dbgfs_is_match("bandwidth=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.bandwidth = value; + goto out; + } + + data = iwl_dbgfs_is_match("rate=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.rate = value; + goto out; + } + + data = iwl_dbgfs_is_match("bssid=", buf); + if (data) { + u8 *mac = mvm->tof_data.responder_cfg.bssid; + + if (!mac_pton(data, mac)) { + ret = -EINVAL; + goto out; + } + } + + data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.tsf_timer_offset_msecs = + cpu_to_le16(value); + goto out; + } + + data = iwl_dbgfs_is_match("toa_offset=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.toa_offset = + cpu_to_le16(value); + goto out; + } + + data = iwl_dbgfs_is_match("ctrl_ch_position=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.ctrl_ch_position = value; + goto out; + } + + data = iwl_dbgfs_is_match("ftm_per_burst=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.ftm_per_burst = value; + goto out; + } + + data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value; + goto out; + } + + data = iwl_dbgfs_is_match("asap_mode=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.asap_mode = value; + goto out; + } + + data = iwl_dbgfs_is_match("send_responder_cfg=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0 && value) { + ret = iwl_mvm_tof_responder_cmd(mvm, vif); + goto out; + } + } + +out: + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + struct iwl_tof_responder_config_cmd *cmd; + + cmd = &mvm->tof_data.responder_cfg; + + mutex_lock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n", + le16_to_cpu(cmd->burst_period)); + pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n", + cmd->burst_duration); + pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n", + cmd->bandwidth); + pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n", + cmd->channel_num); + pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n", + cmd->ctrl_ch_position); + pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n", + cmd->bssid); + pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n", + cmd->min_delta_ftm); + pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n", + cmd->num_of_burst_exp); + pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate); + pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n", + cmd->abort_responder); + pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n", + cmd->get_ch_est); + pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n", + cmd->recv_sta_req_params); + pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n", + cmd->ftm_per_burst); + pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n", + cmd->ftm_resp_ts_avail); + pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n", + cmd->asap_mode); + pos += scnprintf(buf + pos, bufsz - pos, + "tsf_timer_offset_msecs = %d\n", + le16_to_cpu(cmd->tsf_timer_offset_msecs)); + pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n", + le16_to_cpu(cmd->toa_offset)); + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + int value, ret = 0; + char *data; + + mutex_lock(&mvm->mutex); + + data = iwl_dbgfs_is_match("request_id=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.request_id = value; + goto out; + } + + data = iwl_dbgfs_is_match("initiator=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.initiator = value; + goto out; + } + + data = iwl_dbgfs_is_match("one_sided_los_disable=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.one_sided_los_disable = value; + goto out; + } + + data = iwl_dbgfs_is_match("req_timeout=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.req_timeout = value; + goto out; + } + + data = iwl_dbgfs_is_match("report_policy=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.report_policy = value; + goto out; + } + + data = iwl_dbgfs_is_match("macaddr_random=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.macaddr_random = value; + goto out; + } + + data = iwl_dbgfs_is_match("num_of_ap=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.num_of_ap = value; + goto out; + } + + data = iwl_dbgfs_is_match("macaddr_template=", buf); + if (data) { + u8 mac[ETH_ALEN]; + + if (!mac_pton(data, mac)) { + ret = -EINVAL; + goto out; + } + memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN); + } + + data = iwl_dbgfs_is_match("macaddr_mask=", buf); + if (data) { + u8 mac[ETH_ALEN]; + + if (!mac_pton(data, mac)) { + ret = -EINVAL; + goto out; + } + memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN); + } + + data = iwl_dbgfs_is_match("ap=", buf); + if (data) { + struct iwl_tof_range_req_ap_entry ap; + int size = sizeof(struct iwl_tof_range_req_ap_entry); + u16 burst_period; + u8 *mac = ap.bssid; + unsigned int i; + + if (sscanf(data, "%u %hhd %hhx %hhx" + "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx" + "%hhx %hhx %hx" + "%hhx %hhx %x" + "%hhx %hhx %hhx %hhx", + &i, &ap.channel_num, &ap.bandwidth, + &ap.ctrl_ch_position, + mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5, + &ap.measure_type, &ap.num_of_bursts, + &burst_period, + &ap.samples_per_burst, &ap.retries_per_sample, + &ap.tsf_delta, &ap.location_req, &ap.asap_mode, + &ap.enable_dyn_ack, &ap.rssi) != 20) { + ret = -EINVAL; + goto out; + } + if (i >= IWL_MVM_TOF_MAX_APS) { + IWL_ERR(mvm, "Invalid AP index %d\n", i); + ret = -EINVAL; + goto out; + } + + ap.burst_period = cpu_to_le16(burst_period); + + memcpy(&mvm->tof_data.range_req.ap[i], &ap, size); + goto out; + } + + data = iwl_dbgfs_is_match("send_range_request=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0 && value) { + ret = iwl_mvm_tof_range_request_cmd(mvm, vif); + goto out; + } + } + +out: + mutex_unlock(&mvm->mutex); + return ret ?: count; +} + +static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[512]; + int pos = 0; + const size_t bufsz = sizeof(buf); + struct iwl_tof_range_req_cmd *cmd; + int i; + + cmd = &mvm->tof_data.range_req; + + mutex_lock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n", + cmd->request_id); + pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n", + cmd->initiator); + pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n", + cmd->one_sided_los_disable); + pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n", + cmd->req_timeout); + pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n", + cmd->report_policy); + pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n", + cmd->macaddr_random); + pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n", + cmd->macaddr_template); + pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n", + cmd->macaddr_mask); + pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n", + cmd->num_of_ap); + for (i = 0; i < cmd->num_of_ap; i++) { + struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i]; + + pos += scnprintf(buf + pos, bufsz - pos, + "ap %.2d: channel_num=%hhx bw=%hhx" + " control=%hhx bssid=%pM type=%hhx" + " num_of_bursts=%hhx burst_period=%hx ftm=%hhx" + " retries=%hhx tsf_delta=%x location_req=%hhx " + " asap=%hhx enable=%hhx rssi=%hhx\n", + i, ap->channel_num, ap->bandwidth, + ap->ctrl_ch_position, ap->bssid, + ap->measure_type, ap->num_of_bursts, + ap->burst_period, ap->samples_per_burst, + ap->retries_per_sample, ap->tsf_delta, + ap->location_req, ap->asap_mode, + ap->enable_dyn_ack, ap->rssi); + } + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif, + char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + int value, ret = 0; + char *data; + + mutex_lock(&mvm->mutex); + + data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req_ext.tsf_timer_offset_msec = + cpu_to_le16(value); + goto out; + } + + data = iwl_dbgfs_is_match("min_delta_ftm=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req_ext.min_delta_ftm = value; + goto out; + } + + data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req_ext.ftm_format_and_bw20M = + value; + goto out; + } + + data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req_ext.ftm_format_and_bw40M = + value; + goto out; + } + + data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req_ext.ftm_format_and_bw80M = + value; + goto out; + } + + data = iwl_dbgfs_is_match("send_range_req_ext=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0 && value) { + ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif); + goto out; + } + } + +out: + mutex_unlock(&mvm->mutex); + return ret ?: count; +} + +static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + struct iwl_tof_range_req_ext_cmd *cmd; + + cmd = &mvm->tof_data.range_req_ext; + + mutex_lock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, + "tsf_timer_offset_msec = %hx\n", + cmd->tsf_timer_offset_msec); + pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhx\n", + cmd->min_delta_ftm); + pos += scnprintf(buf + pos, bufsz - pos, + "ftm_format_and_bw20M = %hhx\n", + cmd->ftm_format_and_bw20M); + pos += scnprintf(buf + pos, bufsz - pos, + "ftm_format_and_bw40M = %hhx\n", + cmd->ftm_format_and_bw40M); + pos += scnprintf(buf + pos, bufsz - pos, + "ftm_format_and_bw80M = %hhx\n", + cmd->ftm_format_and_bw80M); + + mutex_unlock(&mvm->mutex); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif, + char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + int value, ret = 0; + int abort_id; + char *data; + + mutex_lock(&mvm->mutex); + + data = iwl_dbgfs_is_match("abort_id=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.last_abort_id = value; + goto out; + } + + data = iwl_dbgfs_is_match("send_range_abort=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0 && value) { + abort_id = mvm->tof_data.last_abort_id; + ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id); + goto out; + } + } + +out: + mutex_unlock(&mvm->mutex); + return ret ?: count; +} + +static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[32]; + int pos = 0; + const size_t bufsz = sizeof(buf); + int last_abort_id; + + mutex_lock(&mvm->mutex); + last_abort_id = mvm->tof_data.last_abort_id; + mutex_unlock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n", + last_abort_id); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char *buf; + int pos = 0; + const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256; + struct iwl_tof_range_rsp_ntfy *cmd; + int i, ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&mvm->mutex); + cmd = &mvm->tof_data.range_resp; + + pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n", + cmd->request_id); + pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n", + cmd->request_status); + pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n", + cmd->last_in_batch); + pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n", + cmd->num_of_aps); + for (i = 0; i < cmd->num_of_aps; i++) { + struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i]; + + pos += scnprintf(buf + pos, bufsz - pos, + "ap %.2d: bssid=%pM status=%hhx bw=%hhx" + " rtt=%x rtt_var=%x rtt_spread=%x" + " rssi=%hhx rssi_spread=%hhx" + " range=%x range_var=%x" + " time_stamp=%x\n", + i, ap->bssid, ap->measure_status, + ap->measure_bw, + ap->rtt, ap->rtt_variance, ap->rtt_spread, + ap->rssi, ap->rssi_spread, ap->range, + ap->range_variance, ap->timestamp); + } + mutex_unlock(&mvm->mutex); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) { @@ -628,6 +1354,12 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20); MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32); +MVM_DEBUGFS_READ_FILE_OPS(tof_range_response); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32); void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -671,6 +1403,25 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, S_IRUSR | S_IWUSR); + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) && + !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) { + if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP) + MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params, + mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + + MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir, + S_IRUSR); + } + /* * Create symlink for convenience pointing to interface specific * debugfs entries for the driver. For example, under diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index ffb4b5cef..7d69a556b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -949,9 +949,10 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { - int ret, conf_id; + unsigned int conf_id; + int ret; - ret = kstrtoint(buf, 0, &conf_id); + ret = kstrtouint(buf, 0, &conf_id); if (ret) return ret; @@ -974,7 +975,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, if (ret) return ret; - iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, 0); + iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, NULL); iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE); @@ -1200,12 +1201,7 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf, if (ptr) { for (ofs = 0; ofs < len; ofs += 16) { pos += scnprintf(buf + pos, bufsz - pos, - "0x%.4x ", ofs); - hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos, - bufsz - pos, false); - pos += strlen(buf + pos); - if (bufsz - pos > 0) - buf[pos++] = '\n'; + "0x%.4x %16ph\n", ofs, ptr + ofs); } } else { pos += scnprintf(buf + pos, bufsz - pos, diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h index d7658d16e..20521bebb 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h @@ -339,8 +339,13 @@ enum iwl_wowlan_wakeup_reason { IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8), IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9), IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10), - /* BIT(11) reserved */ + IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11), IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12), + IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET = BIT(13), + IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = BIT(14), + IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = BIT(15), + IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = BIT(16), + }; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */ struct iwl_wowlan_gtk_status { diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h index b1baa33cc..7005fa4be 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -310,17 +312,22 @@ struct iwl_reduce_tx_power_cmd { __le16 pwr_restriction; } __packed; /* TX_REDUCED_POWER_API_S_VER_1 */ +enum iwl_dev_tx_power_cmd_mode { + IWL_TX_POWER_MODE_SET_MAC = 0, + IWL_TX_POWER_MODE_SET_DEVICE = 1, + IWL_TX_POWER_MODE_SET_CHAINS = 2, +}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_2 */; + /** - * struct iwl_dev_tx_power_cmd - TX power reduction command - * REDUCE_TX_POWER_CMD = 0x9f - * @set_mode: 0 - MAC tx power, 1 - device tx power + * struct iwl_dev_tx_power_cmd_v2 - TX power reduction command + * @set_mode: see &enum iwl_dev_tx_power_cmd_mode * @mac_context_id: id of the mac ctx for which we are reducing TX power. * @pwr_restriction: TX power restriction in 1/8 dBms. * @dev_24: device TX power restriction in 1/8 dBms * @dev_52_low: device TX power restriction upper band - low * @dev_52_high: device TX power restriction upper band - high */ -struct iwl_dev_tx_power_cmd { +struct iwl_dev_tx_power_cmd_v2 { __le32 set_mode; __le32 mac_context_id; __le16 pwr_restriction; @@ -329,6 +336,20 @@ struct iwl_dev_tx_power_cmd { __le16 dev_52_high; } __packed; /* TX_REDUCED_POWER_API_S_VER_2 */ +#define IWL_NUM_CHAIN_LIMITS 2 +#define IWL_NUM_SUB_BANDS 5 + +/** + * struct iwl_dev_tx_power_cmd - TX power reduction command + * @v2: version 2 of the command, embedded here for easier software handling + * @per_chain_restriction: per chain restrictions + */ +struct iwl_dev_tx_power_cmd { + /* v3 is just an extension of v2 - keep this here */ + struct iwl_dev_tx_power_cmd_v2 v2; + __le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; +} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */ + #define IWL_DEV_MAX_TX_POWER 0x7FFF /** @@ -413,7 +434,7 @@ struct iwl_beacon_filter_cmd { #define IWL_BF_TEMP_FAST_FILTER_MIN 0 #define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5 -#define IWL_BF_TEMP_SLOW_FILTER_D0I3 5 +#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20 #define IWL_BF_TEMP_SLOW_FILTER_MAX 255 #define IWL_BF_TEMP_SLOW_FILTER_MIN 0 diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 737774a01..660cc1c93 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -87,41 +87,6 @@ struct iwl_ssid_ie { u8 ssid[IEEE80211_MAX_SSID_LEN]; } __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */ -/* How many statistics are gathered for each channel */ -#define SCAN_RESULTS_STATISTICS 1 - -/** - * enum iwl_scan_complete_status - status codes for scan complete notifications - * @SCAN_COMP_STATUS_OK: scan completed successfully - * @SCAN_COMP_STATUS_ABORT: scan was aborted by user - * @SCAN_COMP_STATUS_ERR_SLEEP: sending null sleep packet failed - * @SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT: timeout before channel is ready - * @SCAN_COMP_STATUS_ERR_PROBE: sending probe request failed - * @SCAN_COMP_STATUS_ERR_WAKEUP: sending null wakeup packet failed - * @SCAN_COMP_STATUS_ERR_ANTENNAS: invalid antennas chosen at scan command - * @SCAN_COMP_STATUS_ERR_INTERNAL: internal error caused scan abort - * @SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax - * @SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful - * (not an error!) - * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repetition the driver - * asked for - * @SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events -*/ -enum iwl_scan_complete_status { - SCAN_COMP_STATUS_OK = 0x1, - SCAN_COMP_STATUS_ABORT = 0x2, - SCAN_COMP_STATUS_ERR_SLEEP = 0x3, - SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT = 0x4, - SCAN_COMP_STATUS_ERR_PROBE = 0x5, - SCAN_COMP_STATUS_ERR_WAKEUP = 0x6, - SCAN_COMP_STATUS_ERR_ANTENNAS = 0x7, - SCAN_COMP_STATUS_ERR_INTERNAL = 0x8, - SCAN_COMP_STATUS_ERR_COEX = 0x9, - SCAN_COMP_STATUS_P2P_ACTION_OK = 0xA, - SCAN_COMP_STATUS_ITERATION_END = 0x0B, - SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C, -}; - /* scan offload */ #define IWL_SCAN_MAX_BLACKLIST_LEN 64 #define IWL_SCAN_SHORT_BLACKLIST_LEN 16 @@ -143,71 +108,6 @@ enum scan_framework_client { SCAN_CLIENT_ASSET_TRACKING = BIT(2), }; -/** - * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6 - * @scan_flags: see enum iwl_scan_flags - * @channel_count: channels in channel list - * @quiet_time: dwell time, in milliseconds, on quiet channel - * @quiet_plcp_th: quiet channel num of packets threshold - * @good_CRC_th: passive to active promotion threshold - * @rx_chain: RXON rx chain. - * @max_out_time: max TUs to be out of associated channel - * @suspend_time: pause scan this TUs when returning to service channel - * @flags: RXON flags - * @filter_flags: RXONfilter - * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz. - * @direct_scan: list of SSIDs for directed active scan - * @scan_type: see enum iwl_scan_type. - * @rep_count: repetition count for each scheduled scan iteration. - */ -struct iwl_scan_offload_cmd { - __le16 len; - u8 scan_flags; - u8 channel_count; - __le16 quiet_time; - __le16 quiet_plcp_th; - __le16 good_CRC_th; - __le16 rx_chain; - __le32 max_out_time; - __le32 suspend_time; - /* RX_ON_FLAGS_API_S_VER_1 */ - __le32 flags; - __le32 filter_flags; - struct iwl_tx_cmd tx_cmd[2]; - /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */ - struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; - __le32 scan_type; - __le32 rep_count; -} __packed; - -enum iwl_scan_offload_channel_flags { - IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE = BIT(0), - IWL_SCAN_OFFLOAD_CHANNEL_NARROW = BIT(22), - IWL_SCAN_OFFLOAD_CHANNEL_FULL = BIT(24), - IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL = BIT(25), -}; - -/* channel configuration for struct iwl_scan_offload_cfg. Each channels needs: - * __le32 type: bitmap; bits 1-20 are for directed scan to i'th ssid and - * see enum iwl_scan_offload_channel_flags. - * __le16 channel_number: channel number 1-13 etc. - * __le16 iter_count: repetition count for the channel. - * __le32 iter_interval: interval between two iterations on one channel. - * u8 active_dwell. - * u8 passive_dwell. - */ -#define IWL_SCAN_CHAN_SIZE 14 - -/** - * iwl_scan_offload_cfg - SCAN_OFFLOAD_CONFIG_API_S - * @scan_cmd: scan command fixed part - * @data: scan channel configuration and probe request frames - */ -struct iwl_scan_offload_cfg { - struct iwl_scan_offload_cmd scan_cmd; - u8 data[0]; -} __packed; - /** * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S * @ssid: MAC address to filter out @@ -297,35 +197,6 @@ enum iwl_scan_ebs_status { IWL_SCAN_EBS_INACTIVE, }; -/** - * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1 - * @last_schedule_line: last schedule line executed (fast or regular) - * @last_schedule_iteration: last scan iteration executed before scan abort - * @status: enum iwl_scan_offload_compleate_status - * @ebs_status: last EBS status, see IWL_SCAN_EBS_* - */ -struct iwl_scan_offload_complete { - u8 last_schedule_line; - u8 last_schedule_iteration; - u8 status; - u8 ebs_status; -} __packed; - -/** - * iwl_sched_scan_results - SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1 - * @ssid_bitmap: SSIDs indexes found in this iteration - * @client_bitmap: clients that are active and wait for this notification - */ -struct iwl_sched_scan_results { - __le16 ssid_bitmap; - u8 client_bitmap; - u8 reserved; -}; - -/* Unified LMAC scan API */ - -#define IWL_MVM_BASIC_PASSIVE_DWELL 110 - /** * iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S * @tx_flags: combination of TX_CMD_FLG_* @@ -550,18 +421,6 @@ struct iwl_periodic_scan_complete { /* UMAC Scan API */ -/** - * struct iwl_mvm_umac_cmd_hdr - Command header for UMAC commands - * @size: size of the command (not including header) - * @reserved0: for future use and alignment - * @ver: API version number - */ -struct iwl_mvm_umac_cmd_hdr { - __le16 size; - u8 reserved0; - u8 ver; -} __packed; - /* The maximum of either of these cannot exceed 8, because we use an * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h). */ @@ -621,7 +480,6 @@ enum iwl_channel_flags { /** * struct iwl_scan_config - * @hdr: umac command header * @flags: enum scan_config_flags * @tx_chains: valid_tx antenna - ANT_* definitions * @rx_chains: valid_rx antenna - ANT_* definitions @@ -639,7 +497,6 @@ enum iwl_channel_flags { * @channel_array: default supported channels */ struct iwl_scan_config { - struct iwl_mvm_umac_cmd_hdr hdr; __le32 flags; __le32 tx_chains; __le32 rx_chains; @@ -735,7 +592,6 @@ struct iwl_scan_req_umac_tail { /** * struct iwl_scan_req_umac - * @hdr: umac command header * @flags: &enum iwl_umac_scan_flags * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority @@ -754,7 +610,6 @@ struct iwl_scan_req_umac_tail { * &struct iwl_scan_req_umac_tail */ struct iwl_scan_req_umac { - struct iwl_mvm_umac_cmd_hdr hdr; __le32 flags; __le32 uid; __le32 ooc_priority; @@ -776,12 +631,10 @@ struct iwl_scan_req_umac { /** * struct iwl_umac_scan_abort - * @hdr: umac command header * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @flags: reserved */ struct iwl_umac_scan_abort { - struct iwl_mvm_umac_cmd_hdr hdr; __le32 uid; __le32 flags; } __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h index 21dd5b771..493a8bdfb 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h @@ -366,8 +366,8 @@ struct iwl_mvm_rm_sta_cmd { * ( MGMT_MCAST_KEY = 0x1f ) * @ctrl_flags: %iwl_sta_key_flag * @IGTK: - * @K1: IGTK master key - * @K2: IGTK sub key + * @K1: unused + * @K2: unused * @sta_id: station ID that support IGTK * @key_id: * @receive_seq_cnt: initial RSC/PN needed for replay check diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h new file mode 100644 index 000000000..eed6271d0 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h @@ -0,0 +1,386 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __fw_api_tof_h__ +#define __fw_api_tof_h__ + +#include "fw-api.h" + +/* ToF sub-group command IDs */ +enum iwl_mvm_tof_sub_grp_ids { + TOF_RANGE_REQ_CMD = 0x1, + TOF_CONFIG_CMD = 0x2, + TOF_RANGE_ABORT_CMD = 0x3, + TOF_RANGE_REQ_EXT_CMD = 0x4, + TOF_RESPONDER_CONFIG_CMD = 0x5, + TOF_NW_INITIATED_RES_SEND_CMD = 0x6, + TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7, + TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC, + TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD, + TOF_RANGE_RESPONSE_NOTIF = 0xFE, + TOF_MCSI_DEBUG_NOTIF = 0xFB, +}; + +/** + * struct iwl_tof_config_cmd - ToF configuration + * @tof_disabled: 0 enabled, 1 - disabled + * @one_sided_disabled: 0 enabled, 1 - disabled + * @is_debug_mode: 1 debug mode, 0 - otherwise + * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise + */ +struct iwl_tof_config_cmd { + __le32 sub_grp_cmd_id; + u8 tof_disabled; + u8 one_sided_disabled; + u8 is_debug_mode; + u8 is_buf_required; +} __packed; + +/** + * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug) + * @burst_period: future use: (currently hard coded in the LMAC) + * The interval between two sequential bursts. + * @min_delta_ftm: future use: (currently hard coded in the LMAC) + * The minimum delay between two sequential FTM Responses + * in the same burst. + * @burst_duration: future use: (currently hard coded in the LMAC) + * The total time for all FTMs handshake in the same burst. + * Affect the time events duration in the LMAC. + * @num_of_burst_exp: future use: (currently hard coded in the LMAC) + * The number of bursts for the current ToF request. Affect + * the number of events allocations in the current iteration. + * @get_ch_est: for xVT only, NA for driver + * @abort_responder: when set to '1' - Responder will terminate its activity + * (all other fields in the command are ignored) + * @recv_sta_req_params: 1 - Responder will ignore the other Responder's + * params and use the recomended Initiator params. + * 0 - otherwise + * @channel_num: current AP Channel + * @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz + * @rate: current AP rate + * @ctrl_ch_position: coding of the control channel position relative to + * the center frequency. + * 40MHz 0 below center, 1 above center + * 80MHz bits [0..1]: 0 the near 20MHz to the center, + * 1 the far 20MHz to the center + * bit[2] as above 40MHz + * @ftm_per_burst: FTMs per Burst + * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response, + * '1' - we measure over the Initial FTM Response + * @asap_mode: ASAP / Non ASAP mode for the current WLS station + * @sta_id: index of the AP STA when in AP mode + * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF + * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug + * purposes, simulating station movement by adding various values + * to this field + * @bssid: Current AP BSSID + */ +struct iwl_tof_responder_config_cmd { + __le32 sub_grp_cmd_id; + __le16 burst_period; + u8 min_delta_ftm; + u8 burst_duration; + u8 num_of_burst_exp; + u8 get_ch_est; + u8 abort_responder; + u8 recv_sta_req_params; + u8 channel_num; + u8 bandwidth; + u8 rate; + u8 ctrl_ch_position; + u8 ftm_per_burst; + u8 ftm_resp_ts_avail; + u8 asap_mode; + u8 sta_id; + __le16 tsf_timer_offset_msecs; + __le16 toa_offset; + u8 bssid[ETH_ALEN]; +} __packed; + +/** + * struct iwl_tof_range_request_ext_cmd - extended range req for WLS + * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF + * @min_delta_ftm: Minimal time between two consecutive measurements, + * in units of 100us. 0 means no preference by station + * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended + * value be sent to the AP + * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended + * value to be sent to the AP + * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended + * value to be sent to the AP + */ +struct iwl_tof_range_req_ext_cmd { + __le32 sub_grp_cmd_id; + __le16 tsf_timer_offset_msec; + __le16 reserved; + u8 min_delta_ftm; + u8 ftm_format_and_bw20M; + u8 ftm_format_and_bw40M; + u8 ftm_format_and_bw80M; +} __packed; + +#define IWL_MVM_TOF_MAX_APS 21 + +/** + * struct iwl_tof_range_req_ap_entry - AP configuration parameters + * @channel_num: Current AP Channel + * @bandwidth: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz + * @tsf_delta_direction: TSF relatively to the subject AP + * @ctrl_ch_position: Coding of the control channel position relative to the + * center frequency. + * 40MHz 0 below center, 1 above center + * 80MHz bits [0..1]: 0 the near 20MHz to the center, + * 1 the far 20MHz to the center + * bit[2] as above 40MHz + * @bssid: AP's bss id + * @measure_type: Measurement type: 0 - two sided, 1 - One sided + * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the + * number of measurement iterations (min 2^0 = 1, max 2^14) + * @burst_period: Recommended value to be sent to the AP. Measurement + * periodicity In units of 100ms. ignored if num_of_bursts = 0 + * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31) + * 1-sided: how many rts/cts pairs should be used per burst. + * @retries_per_sample: Max number of retries that the LMAC should send + * in case of no replies by the AP. + * @tsf_delta: TSF Delta in units of microseconds. + * The difference between the AP TSF and the device local clock. + * @location_req: Location Request Bit[0] LCI should be sent in the FTMR + * Bit[1] Civic should be sent in the FTMR + * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided) + * @enable_dyn_ack: Enable Dynamic ACK BW. + * 0 Initiator interact with regular AP + * 1 Initiator interact with Responder machine: need to send the + * Initiator Acks with HT 40MHz / 80MHz, since the Responder should + * use it for its ch est measurement (this flag will be set when we + * configure the opposite machine to be Responder). + * @rssi: Last received value + * leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value. + */ +struct iwl_tof_range_req_ap_entry { + u8 channel_num; + u8 bandwidth; + u8 tsf_delta_direction; + u8 ctrl_ch_position; + u8 bssid[ETH_ALEN]; + u8 measure_type; + u8 num_of_bursts; + __le16 burst_period; + u8 samples_per_burst; + u8 retries_per_sample; + __le32 tsf_delta; + u8 location_req; + u8 asap_mode; + u8 enable_dyn_ack; + s8 rssi; +} __packed; + +/** + * enum iwl_tof_response_mode + * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as + * possible (not supported for this release) + * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon + * timeout expiration + * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the + * earlier of: measurements completion / timeout + * expiration. + */ +enum iwl_tof_response_mode { + IWL_MVM_TOF_RESPOSE_ASAP = 1, + IWL_MVM_TOF_RESPOSE_TIMEOUT, + IWL_MVM_TOF_RESPOSE_COMPLETE, +}; + +/** + * struct iwl_tof_range_req_cmd - start measurement cmd + * @request_id: A Token incremented per request. The same Token will be + * sent back in the range response + * @initiator: 0- NW initiated, 1 - Client Initiated + * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided, + * '1' - run ML-Algo for ToF only + * @req_timeout: Requested timeout of the response in units of 100ms. + * This is equivalent to the session time configured to the + * LMAC in Initiator Request + * @report_policy: Supported partially for this release: For current release - + * the range report will be uploaded as a batch when ready or + * when the session is done (successfully / partially). + * one of iwl_tof_response_mode. + * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @macaddr_random: '0' Use default source MAC address (i.e. p2_p), + * '1' Use MAC Address randomization according to the below + * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. + * Bits set to 1 shall be randomized by the UMAC + */ +struct iwl_tof_range_req_cmd { + __le32 sub_grp_cmd_id; + u8 request_id; + u8 initiator; + u8 one_sided_los_disable; + u8 req_timeout; + u8 report_policy; + u8 los_det_disable; + u8 num_of_ap; + u8 macaddr_random; + u8 macaddr_template[ETH_ALEN]; + u8 macaddr_mask[ETH_ALEN]; + struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS]; +} __packed; + +/** + * struct iwl_tof_gen_resp_cmd - generic ToF response + */ +struct iwl_tof_gen_resp_cmd { + __le32 sub_grp_cmd_id; + u8 data[]; +} __packed; + +/** + * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response) + * @measure_status: current APs measurement status + * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz + * @rtt: The Round Trip Time that took for the last measurement for + * current AP [nSec] + * @rtt_variance: The Variance of the RTT values measured for current AP + * @rtt_spread: The Difference between the maximum and the minimum RTT + * values measured for current AP in the current session [nsec] + * @rssi: RSSI as uploaded in the Channel Estimation notification + * @rssi_spread: The Difference between the maximum and the minimum RSSI values + * measured for current AP in the current session + * @range: Measured range [cm] + * @range_variance: Measured range variance [cm] + * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was + * uploaded by the LMAC + */ +struct iwl_tof_range_rsp_ap_entry_ntfy { + u8 bssid[ETH_ALEN]; + u8 measure_status; + u8 measure_bw; + __le32 rtt; + __le32 rtt_variance; + __le32 rtt_spread; + s8 rssi; + u8 rssi_spread; + __le16 reserved; + __le32 range; + __le32 range_variance; + __le32 timestamp; +} __packed; + +/** + * struct iwl_tof_range_rsp_ntfy - + * @request_id: A Token ID of the corresponding Range request + * @request_status: status of current measurement session + * @last_in_batch: reprot policy (when not all responses are uploaded at once) + * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + */ +struct iwl_tof_range_rsp_ntfy { + u8 request_id; + u8 request_status; + u8 last_in_batch; + u8 num_of_aps; + struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS]; +} __packed; + +#define IWL_MVM_TOF_MCSI_BUF_SIZE (245) +/** + * struct iwl_tof_mcsi_notif - used for debug + * @token: token ID for the current session + * @role: '0' - initiator, '1' - responder + * @initiator_bssid: initiator machine + * @responder_bssid: responder machine + * @mcsi_buffer: debug data + */ +struct iwl_tof_mcsi_notif { + u8 token; + u8 role; + __le16 reserved; + u8 initiator_bssid[ETH_ALEN]; + u8 responder_bssid[ETH_ALEN]; + u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4]; +} __packed; + +/** + * struct iwl_tof_neighbor_report_notif + * @bssid: BSSID of the AP which sent the report + * @request_token: same token as the corresponding request + * @status: + * @report_ie_len: the length of the response frame starting from the Element ID + * @data: the IEs + */ +struct iwl_tof_neighbor_report { + u8 bssid[ETH_ALEN]; + u8 request_token; + u8 status; + __le16 report_ie_len; + u8 data[]; +} __packed; + +/** + * struct iwl_tof_range_abort_cmd + * @request_id: corresponds to a range request + */ +struct iwl_tof_range_abort_cmd { + __le32 sub_grp_cmd_id; + u8 request_id; + u8 reserved[3]; +} __packed; + +#endif diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h index 81c4ea3c6..853698ab8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h @@ -124,6 +124,18 @@ enum iwl_tx_flags { TX_CMD_FLG_HCCA_CHUNK = BIT(31) }; /* TX_FLAGS_BITS_API_S_VER_1 */ +/** + * enum iwl_tx_pm_timeouts - pm timeout values in TX command + * @PM_FRAME_NONE: no need to suspend sleep mode + * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU + * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec + */ +enum iwl_tx_pm_timeouts { + PM_FRAME_NONE = 0, + PM_FRAME_MGMT = 2, + PM_FRAME_ASSOC = 3, +}; + /* * TX command security control */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 16e9ef493..4af7513ad 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -75,6 +75,7 @@ #include "fw-api-coex.h" #include "fw-api-scan.h" #include "fw-api-stats.h" +#include "fw-api-tof.h" /* Tx queue numbers */ enum { @@ -119,6 +120,9 @@ enum { ADD_STA = 0x18, REMOVE_STA = 0x19, + /* paging get item */ + FW_GET_ITEM_CMD = 0x1a, + /* TX */ TX_CMD = 0x1c, TXPATH_FLUSH = 0x1e, @@ -148,6 +152,9 @@ enum { LQ_CMD = 0x4e, + /* paging block to FW cpu2 */ + FW_PAGING_BLOCK_CMD = 0x4f, + /* Scan offload */ SCAN_OFFLOAD_REQUEST_CMD = 0x51, SCAN_OFFLOAD_ABORT_CMD = 0x52, @@ -163,6 +170,10 @@ enum { CALIB_RES_NOTIF_PHY_DB = 0x6b, /* PHY_DB_CMD = 0x6c, */ + /* ToF - 802.11mc FTM */ + TOF_CMD = 0x10, + TOF_NOTIFICATION = 0x11, + /* Power - legacy power table command */ POWER_TABLE_CMD = 0x77, PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78, @@ -365,6 +376,50 @@ struct iwl_nvm_access_cmd { u8 data[]; } __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */ +#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */ + +/* + * struct iwl_fw_paging_cmd - paging layout + * + * (FW_PAGING_BLOCK_CMD = 0x4f) + * + * Send to FW the paging layout in the driver. + * + * @flags: various flags for the command + * @block_size: the block size in powers of 2 + * @block_num: number of blocks specified in the command. + * @device_phy_addr: virtual addresses from device side +*/ +struct iwl_fw_paging_cmd { + __le32 flags; + __le32 block_size; + __le32 block_num; + __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS]; +} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */ + +/* + * Fw items ID's + * + * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload + * download + */ +enum iwl_fw_item_id { + IWL_FW_ITEM_ID_PAGING = 3, +}; + +/* + * struct iwl_fw_get_item_cmd - get an item from the fw + */ +struct iwl_fw_get_item_cmd { + __le32 item_id; +} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */ + +struct iwl_fw_get_item_resp { + __le32 item_id; + __le32 item_byte_cnt; + __le32 item_val; +} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */ + /** * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD * @offset: offset in bytes into the section @@ -1080,10 +1135,33 @@ struct iwl_rx_phy_info { __le16 frame_time; } __packed; +/* + * TCP offload Rx assist info + * + * bits 0:3 - reserved + * bits 4:7 - MIC CRC length + * bits 8:12 - MAC header length + * bit 13 - Padding indication + * bit 14 - A-AMSDU indication + * bit 15 - Offload enabled + */ +enum iwl_csum_rx_assist_info { + CSUM_RXA_RESERVED_MASK = 0x000f, + CSUM_RXA_MICSIZE_MASK = 0x00f0, + CSUM_RXA_HEADERLEN_MASK = 0x1f00, + CSUM_RXA_PADD = BIT(13), + CSUM_RXA_AMSDU = BIT(14), + CSUM_RXA_ENA = BIT(15) +}; + +/** + * struct iwl_rx_mpdu_res_start - phy info + * @assist: see CSUM_RX_ASSIST_ above + */ struct iwl_rx_mpdu_res_start { __le16 byte_count; - __le16 reserved; -} __packed; + __le16 assist; +} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */ /** * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags @@ -1136,6 +1214,8 @@ enum iwl_rx_phy_flags { * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP: * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT: * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame + * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw + * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors * @RX_MPDU_RES_STATUS_HASH_INDEX_MSK: * @RX_MPDU_RES_STATUS_STA_ID_MSK: * @RX_MPDU_RES_STATUS_RRF_KILL: @@ -1165,6 +1245,8 @@ enum iwl_mvm_rx_status { RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13), RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14), RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15), + RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16), + RX_MPDU_RES_STATUS_CSUM_OK = BIT(17), RX_MPDU_RES_STATUS_HASH_INDEX_MSK = (0x3F0000), RX_MPDU_RES_STATUS_STA_ID_MSK = (0x1f000000), RX_MPDU_RES_STATUS_RRF_KILL = BIT(29), diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index eb10c5ee4..5c7f7cc9f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -106,6 +106,306 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) sizeof(tx_ant_cmd), &tx_ant_cmd); } +static void iwl_free_fw_paging(struct iwl_mvm *mvm) +{ + int i; + + if (!mvm->fw_paging_db[0].fw_paging_block) + return; + + for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) { + if (!mvm->fw_paging_db[i].fw_paging_block) { + IWL_DEBUG_FW(mvm, + "Paging: block %d already freed, continue to next page\n", + i); + + continue; + } + + __free_pages(mvm->fw_paging_db[i].fw_paging_block, + get_order(mvm->fw_paging_db[i].fw_paging_size)); + } + kfree(mvm->trans->paging_download_buf); + memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); +} + +static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) +{ + int sec_idx, idx; + u32 offset = 0; + + /* + * find where is the paging image start point: + * if CPU2 exist and it's in paging format, then the image looks like: + * CPU1 sections (2 or more) + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2 + * CPU2 sections (not paged) + * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2 + * non paged to CPU2 paging sec + * CPU2 paging CSS + * CPU2 paging image (including instruction and data) + */ + for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) { + if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { + sec_idx++; + break; + } + } + + if (sec_idx >= IWL_UCODE_SECTION_MAX) { + IWL_ERR(mvm, "driver didn't find paging image\n"); + iwl_free_fw_paging(mvm); + return -EINVAL; + } + + /* copy the CSS block to the dram */ + IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n", + sec_idx); + + memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block), + image->sec[sec_idx].data, + mvm->fw_paging_db[0].fw_paging_size); + + IWL_DEBUG_FW(mvm, + "Paging: copied %d CSS bytes to first block\n", + mvm->fw_paging_db[0].fw_paging_size); + + sec_idx++; + + /* + * copy the paging blocks to the dram + * loop index start from 1 since that CSS block already copied to dram + * and CSS index is 0. + * loop stop at num_of_paging_blk since that last block is not full. + */ + for (idx = 1; idx < mvm->num_of_paging_blk; idx++) { + memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block), + image->sec[sec_idx].data + offset, + mvm->fw_paging_db[idx].fw_paging_size); + + IWL_DEBUG_FW(mvm, + "Paging: copied %d paging bytes to block %d\n", + mvm->fw_paging_db[idx].fw_paging_size, + idx); + + offset += mvm->fw_paging_db[idx].fw_paging_size; + } + + /* copy the last paging block */ + if (mvm->num_of_pages_in_last_blk > 0) { + memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block), + image->sec[sec_idx].data + offset, + FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk); + + IWL_DEBUG_FW(mvm, + "Paging: copied %d pages in the last block %d\n", + mvm->num_of_pages_in_last_blk, idx); + } + + return 0; +} + +static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, + const struct fw_img *image) +{ + struct page *block; + dma_addr_t phys = 0; + int blk_idx = 0; + int order, num_of_pages; + int dma_enabled; + + if (mvm->fw_paging_db[0].fw_paging_block) + return 0; + + dma_enabled = is_device_dma_capable(mvm->trans->dev); + + /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */ + BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); + + num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; + mvm->num_of_paging_blk = ((num_of_pages - 1) / + NUM_OF_PAGE_PER_GROUP) + 1; + + mvm->num_of_pages_in_last_blk = + num_of_pages - + NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1); + + IWL_DEBUG_FW(mvm, + "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n", + mvm->num_of_paging_blk, + mvm->num_of_pages_in_last_blk); + + /* allocate block of 4Kbytes for paging CSS */ + order = get_order(FW_PAGING_SIZE); + block = alloc_pages(GFP_KERNEL, order); + if (!block) { + /* free all the previous pages since we failed */ + iwl_free_fw_paging(mvm); + return -ENOMEM; + } + + mvm->fw_paging_db[blk_idx].fw_paging_block = block; + mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE; + + if (dma_enabled) { + phys = dma_map_page(mvm->trans->dev, block, 0, + PAGE_SIZE << order, DMA_BIDIRECTIONAL); + if (dma_mapping_error(mvm->trans->dev, phys)) { + /* + * free the previous pages and the current one since + * we failed to map_page. + */ + iwl_free_fw_paging(mvm); + return -ENOMEM; + } + mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; + } else { + mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG | + blk_idx << BLOCK_2_EXP_SIZE; + } + + IWL_DEBUG_FW(mvm, + "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", + order); + + /* + * allocate blocks in dram. + * since that CSS allocated in fw_paging_db[0] loop start from index 1 + */ + for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { + /* allocate block of PAGING_BLOCK_SIZE (32K) */ + order = get_order(PAGING_BLOCK_SIZE); + block = alloc_pages(GFP_KERNEL, order); + if (!block) { + /* free all the previous pages since we failed */ + iwl_free_fw_paging(mvm); + return -ENOMEM; + } + + mvm->fw_paging_db[blk_idx].fw_paging_block = block; + mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE; + + if (dma_enabled) { + phys = dma_map_page(mvm->trans->dev, block, 0, + PAGE_SIZE << order, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(mvm->trans->dev, phys)) { + /* + * free the previous pages and the current one + * since we failed to map_page. + */ + iwl_free_fw_paging(mvm); + return -ENOMEM; + } + mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; + } else { + mvm->fw_paging_db[blk_idx].fw_paging_phys = + PAGING_ADDR_SIG | + blk_idx << BLOCK_2_EXP_SIZE; + } + + IWL_DEBUG_FW(mvm, + "Paging: allocated 32K bytes (order %d) for firmware paging.\n", + order); + } + + return 0; +} + +static int iwl_save_fw_paging(struct iwl_mvm *mvm, + const struct fw_img *fw) +{ + int ret; + + ret = iwl_alloc_fw_paging_mem(mvm, fw); + if (ret) + return ret; + + return iwl_fill_paging_mem(mvm, fw); +} + +/* send paging cmd to FW in case CPU2 has paging image */ +static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw) +{ + int blk_idx; + __le32 dev_phy_addr; + struct iwl_fw_paging_cmd fw_paging_cmd = { + .flags = + cpu_to_le32(PAGING_CMD_IS_SECURED | + PAGING_CMD_IS_ENABLED | + (mvm->num_of_pages_in_last_blk << + PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)), + .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), + .block_num = cpu_to_le32(mvm->num_of_paging_blk), + }; + + /* loop for for all paging blocks + CSS block */ + for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { + dev_phy_addr = + cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >> + PAGE_2_EXP_SIZE); + fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr; + } + + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(fw_paging_cmd), &fw_paging_cmd); +} + +/* + * Send paging item cmd to FW in case CPU2 has paging image + */ +static int iwl_trans_get_paging_item(struct iwl_mvm *mvm) +{ + int ret; + struct iwl_fw_get_item_cmd fw_get_item_cmd = { + .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING), + }; + + struct iwl_fw_get_item_resp *item_resp; + struct iwl_host_cmd cmd = { + .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0), + .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, + .data = { &fw_get_item_cmd, }, + }; + + cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) { + IWL_ERR(mvm, + "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n", + ret); + return ret; + } + + item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data; + if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) { + IWL_ERR(mvm, + "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n", + le32_to_cpu(item_resp->item_id)); + ret = -EIO; + goto exit; + } + + mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE, + GFP_KERNEL); + if (!mvm->trans->paging_download_buf) { + ret = -ENOMEM; + goto exit; + } + mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val); + mvm->trans->paging_db = mvm->fw_paging_db; + IWL_DEBUG_FW(mvm, + "Paging: got paging request address (paging_req_addr 0x%08x)\n", + mvm->trans->paging_req_addr); + +exit: + iwl_free_resp(&cmd); + + return ret; +} + static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { @@ -213,7 +513,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, const struct fw_img *fw; int ret, i; enum iwl_ucode_type old_type = mvm->cur_ucode; - static const u8 alive_cmd[] = { MVM_ALIVE }; + static const u16 alive_cmd[] = { MVM_ALIVE }; struct iwl_sf_region st_fwrd_space; if (ucode_type == IWL_UCODE_REGULAR && @@ -244,6 +544,11 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, MVM_UCODE_ALIVE_TIMEOUT); if (ret) { + if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + IWL_ERR(mvm, + "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", + iwl_read_prph(mvm->trans, SB_CPU_1_STATUS), + iwl_read_prph(mvm->trans, SB_CPU_2_STATUS)); mvm->cur_ucode = old_type; return ret; } @@ -268,6 +573,40 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); + /* + * configure and operate fw paging mechanism. + * driver configures the paging flow only once, CPU2 paging image + * included in the IWL_UCODE_INIT image. + */ + if (fw->paging_mem_size) { + /* + * When dma is not enabled, the driver needs to copy / write + * the downloaded / uploaded page to / from the smem. + * This gets the location of the place were the pages are + * stored. + */ + if (!is_device_dma_capable(mvm->trans->dev)) { + ret = iwl_trans_get_paging_item(mvm); + if (ret) { + IWL_ERR(mvm, "failed to get FW paging item\n"); + return ret; + } + } + + ret = iwl_save_fw_paging(mvm, fw); + if (ret) { + IWL_ERR(mvm, "failed to save the FW paging image\n"); + return ret; + } + + ret = iwl_send_paging_cmd(mvm, fw); + if (ret) { + IWL_ERR(mvm, "failed to send the paging cmd\n"); + iwl_free_fw_paging(mvm); + return ret; + } + } + /* * Note: all the queues are enabled as part of the interface * initialization, but in firmware restart scenarios they @@ -314,7 +653,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) { struct iwl_notification_wait calib_wait; - static const u8 init_complete[] = { + static const u16 init_complete[] = { INIT_COMPLETE_NOTIF, CALIB_RES_NOTIF_PHY_DB }; @@ -364,7 +703,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) * abort after reading the nvm in case RF Kill is on, we will complete * the init seq later when RF kill will switch to off */ - if (iwl_mvm_is_radio_killed(mvm)) { + if (iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "jump over all phy activities due to RF kill\n"); iwl_remove_notification(&mvm->notif_wait, &calib_wait); @@ -397,7 +736,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, MVM_UCODE_CALIB_TIMEOUT); - if (ret && iwl_mvm_is_radio_killed(mvm)) { + if (ret && iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); ret = 1; } @@ -444,12 +783,6 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) return; pkt = cmd.resp_pkt; - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(mvm, "Bad return from SHARED_MEM_CFG (0x%08X)\n", - pkt->hdr.flags); - goto exit; - } - mem_cfg = (void *)pkt->data; mvm->shared_mem_cfg.shared_mem_addr = @@ -473,14 +806,18 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) le32_to_cpu(mem_cfg->page_buff_size); IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n"); -exit: iwl_free_resp(&cmd); } int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, struct iwl_mvm_dump_desc *desc, - unsigned int delay) + struct iwl_fw_dbg_trigger_tlv *trigger) { + unsigned int delay = 0; + + if (trigger) + delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); + if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status)) return -EBUSY; @@ -491,6 +828,7 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, le32_to_cpu(desc->trig_desc.type)); mvm->fw_dump_desc = desc; + mvm->fw_dump_trig = trigger; queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay); @@ -498,7 +836,8 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, } int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, unsigned int delay) + const char *str, size_t len, + struct iwl_fw_dbg_trigger_tlv *trigger) { struct iwl_mvm_dump_desc *desc; @@ -510,14 +849,13 @@ int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, desc->trig_desc.type = cpu_to_le32(trig); memcpy(desc->trig_desc.data, str, len); - return iwl_mvm_fw_dbg_collect_desc(mvm, desc, delay); + return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger); } int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tlv *trigger, const char *fmt, ...) { - unsigned int delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); u16 occurrences = le16_to_cpu(trigger->occurrences); int ret, len = 0; char buf[64]; @@ -541,8 +879,9 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, len = strlen(buf) + 1; } - ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, - len, delay); + ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len, + trigger); + if (ret) return ret; @@ -676,8 +1015,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } - if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) - iwl_mvm_get_shared_mem_conf(mvm); + iwl_mvm_get_shared_mem_conf(mvm); ret = iwl_mvm_sf_update(mvm, NULL, false); if (ret) @@ -760,6 +1098,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } + if (iwl_mvm_is_csum_supported(mvm) && + mvm->cfg->features & NETIF_F_RXCSUM) + iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3); + /* allow FW/transport low power modes if not during restart */ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); @@ -815,9 +1157,8 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) return ret; } -int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_card_state_notif *card_state_notif = (void *)pkt->data; @@ -828,13 +1169,10 @@ int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, (flags & SW_CARD_DISABLED) ? "Kill" : "On", (flags & CT_KILL_CARD_DISABLED) ? "Reached" : "Not reached"); - - return 0; } -int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; @@ -845,5 +1183,4 @@ int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, le32_to_cpu(mfuart_notif->external_ver), le32_to_cpu(mfuart_notif->status), le32_to_cpu(mfuart_notif->duration)); - return 0; } diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index 1812dd018..3424315dd 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -1312,9 +1312,8 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm, } } -int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_extended_beacon_notif *beacon = (void *)pkt->data; @@ -1365,8 +1364,6 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); } } - - return 0; } static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, @@ -1415,9 +1412,8 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL); } -int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_missed_beacons_notif *mb = (void *)pkt->data; @@ -1434,5 +1430,4 @@ int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_beacon_loss_iterator, mb); - return 0; } diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index dfdab38e2..7c2944a72 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -641,6 +641,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; + ieee80211_hw_set(hw, TDLS_WIDER_BW); } if (fw_has_capa(&mvm->fw->ucode_capa, @@ -649,6 +650,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; } + hw->netdev_features |= mvm->cfg->features; + if (!iwl_mvm_is_csum_supported(mvm)) + hw->netdev_features &= ~NETIF_F_RXCSUM; + ret = ieee80211_register_hw(mvm->hw); if (ret) iwl_mvm_leds_exit(mvm); @@ -1120,9 +1125,14 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) u32 file_len, fifo_data_len = 0; u32 smem_len = mvm->cfg->smem_len; u32 sram2_len = mvm->cfg->dccm2_len; + bool monitor_dump_only = false; lockdep_assert_held(&mvm->mutex); + if (mvm->fw_dump_trig && + mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY) + monitor_dump_only = true; + fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); if (!fw_error_dump) return; @@ -1174,6 +1184,20 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) fifo_data_len + sizeof(*dump_info); + /* Make room for the SMEM, if it exists */ + if (smem_len) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; + + /* Make room for the secondary SRAM, if it exists */ + if (sram2_len) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; + + /* If we only want a monitor dump, reset the file length */ + if (monitor_dump_only) { + file_len = sizeof(*dump_file) + sizeof(*dump_data) + + sizeof(*dump_info); + } + /* * In 8000 HW family B-step include the ICCM (which resides separately) */ @@ -1186,14 +1210,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + mvm->fw_dump_desc->len; - /* Make room for the SMEM, if it exists */ - if (smem_len) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; - - /* Make room for the secondary SRAM, if it exists */ - if (sram2_len) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; - dump_file = vzalloc(file_len); if (!dump_file) { kfree(fw_error_dump); @@ -1239,6 +1255,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_data = iwl_fw_error_next_data(dump_data); } + /* In case we only want monitor dump, skip to dump trasport data */ + if (monitor_dump_only) + goto dump_trans_data; + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; @@ -1282,7 +1302,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_mem->data, IWL8260_ICCM_LEN); } - fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans); +dump_trans_data: + fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans, + mvm->fw_dump_trig); fw_error_dump->op_mode_len = file_len; if (fw_error_dump->trans_ptr) file_len += fw_error_dump->trans_ptr->len; @@ -1291,6 +1313,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0, GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump); + mvm->fw_dump_trig = NULL; clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status); } @@ -1433,22 +1456,9 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) static void iwl_mvm_resume_complete(struct iwl_mvm *mvm) { - bool exit_now; - if (!iwl_mvm_is_d0i3_supported(mvm)) return; - mutex_lock(&mvm->d0i3_suspend_mutex); - __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); - exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP, - &mvm->d0i3_suspend_flags); - mutex_unlock(&mvm->d0i3_suspend_mutex); - - if (exit_now) { - IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n"); - _iwl_mvm_exit_d0i3(mvm); - } - if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) if (!wait_event_timeout(mvm->d0i3_exit_waitq, !test_bit(IWL_MVM_STATUS_IN_D0I3, @@ -1585,20 +1595,23 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, s16 tx_power) { struct iwl_dev_tx_power_cmd cmd = { - .set_mode = 0, - .mac_context_id = + .v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), + .v2.mac_context_id = cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), - .pwr_restriction = cpu_to_le16(8 * tx_power), + .v2.pwr_restriction = cpu_to_le16(8 * tx_power), }; + int len = sizeof(cmd); if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_DEV)) return iwl_mvm_set_tx_power_old(mvm, vif, tx_power); if (tx_power == IWL_DEFAULT_MAX_TX_POWER) - cmd.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); + cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); - return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, - sizeof(cmd), &cmd); + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN)) + len = sizeof(cmd.v2); + + return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); } static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, @@ -1664,6 +1677,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, goto out_unlock; } + mvmvif->features |= hw->netdev_features; + ret = iwl_mvm_mac_ctxt_add(mvm, vif); if (ret) goto out_release; @@ -2373,6 +2388,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); RCU_INIT_POINTER(mvm->csa_vif, NULL); + mvmvif->csa_countdown = false; } if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { @@ -2880,10 +2896,11 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - /* fall-through */ - case WLAN_CIPHER_SUITE_CCMP: key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; break; + case WLAN_CIPHER_SUITE_CCMP: + key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; + break; case WLAN_CIPHER_SUITE_AES_CMAC: WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); break; @@ -3025,7 +3042,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, int res, time_reg = DEVICE_SYSTEM_TIME_REG; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; - static const u8 time_event_response[] = { HOT_SPOT_CMD }; + static const u16 time_event_response[] = { HOT_SPOT_CMD }; struct iwl_notification_wait wait_time_event; struct iwl_hs20_roc_req aux_roc_req = { .action = cpu_to_le32(FW_CTXT_ACTION_ADD), diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 2d4bad5fe..c754051a4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -80,6 +80,7 @@ #include "sta.h" #include "fw-api.h" #include "constants.h" +#include "tof.h" #define IWL_INVALID_MAC80211_QUEUE 0xff #define IWL_MVM_MAX_ADDRESSES 5 @@ -122,8 +123,7 @@ extern const struct ieee80211_ops iwl_mvm_hw_ops; * be up'ed after the INIT fw asserted. This is useful to be able to use * proprietary tools over testmode to debug the INIT fw. * @tfd_q_hang_detect: enabled the detection of hung transmit queues - * @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power - * Save)-2(default), LP(Low Power)-3 + * @power_scheme: one of enum iwl_power_scheme */ struct iwl_mvm_mod_params { bool init_dbg; @@ -357,6 +357,7 @@ struct iwl_mvm_vif_bf_data { * # of received beacons accumulated over FW restart, and the current * average signal of beacons retrieved from the firmware * @csa_failed: CSA failed to schedule time event, report an error later + * @features: hw features active for this vif */ struct iwl_mvm_vif { struct iwl_mvm *mvm; @@ -437,6 +438,9 @@ struct iwl_mvm_vif { /* Indicates that CSA countdown may be started */ bool csa_countdown; bool csa_failed; + + /* TCP Checksum Offload */ + netdev_features_t features; }; static inline struct iwl_mvm_vif * @@ -606,6 +610,11 @@ struct iwl_mvm { /* NVM sections */ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; + /* Paging section */ + struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS]; + u16 num_of_paging_blk; + u16 num_of_pages_in_last_blk; + /* EEPROM MAC addresses */ struct mac_address addresses[IWL_MVM_MAX_ADDRESSES]; @@ -686,6 +695,7 @@ struct iwl_mvm { * can hold 16 keys at most. Reflect this fact. */ unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)]; + u8 fw_key_deleted[STA_KEY_MAX_NUM]; /* references taken by the driver and spinlock protecting them */ spinlock_t refs_lock; @@ -698,6 +708,7 @@ struct iwl_mvm { u8 fw_dbg_conf; struct delayed_work fw_dump_wk; struct iwl_mvm_dump_desc *fw_dump_desc; + struct iwl_fw_dbg_trigger_tlv *fw_dump_trig; #ifdef CONFIG_IWLWIFI_LEDS struct led_classdev led; @@ -822,6 +833,7 @@ struct iwl_mvm { struct iwl_mvm_shared_mem_cfg shared_mem_cfg; u32 ciphers[6]; + struct iwl_mvm_tof_data tof_data; }; /* Extract MVM priv from op_mode and _hw */ @@ -848,6 +860,11 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); } +static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm) +{ + return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); +} + /* Must be called with rcu_read_lock() held and it can only be * released when mvmsta is not needed anymore. */ @@ -941,6 +958,12 @@ static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm) IWL_MVM_BT_COEX_RRC; } +static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm) +{ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CSUM_SUPPORT); +} + extern const u8 iwl_mvm_ac_to_tx_fifo[]; struct iwl_rate_info { @@ -974,12 +997,12 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); /* Tx / Host Commands */ int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd); -int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id, +int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, u32 flags, u16 len, const void *data); int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, u32 *status); -int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, +int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, const void *data, u32 *status); int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, @@ -988,10 +1011,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, u8 sta_id); -void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, - struct ieee80211_tx_info *info, - struct iwl_tx_cmd *tx_cmd, - struct sk_buff *skb_frag); void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc); @@ -1003,6 +1022,17 @@ static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; } int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync); void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm); +static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info, + struct iwl_tx_cmd *tx_cmd) +{ + struct ieee80211_key_conf *keyconf = info->control.hw_key; + + tx_cmd->sec_ctl = TX_CMD_SEC_CCM; + memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); + if (info->flags & IEEE80211_TX_CTL_AMPDU) + tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG); +} + static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm) { flush_work(&mvm->async_handlers_wk); @@ -1011,9 +1041,8 @@ static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm) /* Statistics */ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt); -int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear); void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm); @@ -1059,27 +1088,20 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, * FW notifications / CMD responses handlers * Convention: iwl_mvm_rx_ */ -int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); /* MVM PHY */ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, @@ -1106,12 +1128,10 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif); unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, @@ -1135,29 +1155,24 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); /* Scheduled scan */ -int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies, int type); -int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); /* UMAC scan */ int iwl_mvm_config_scan(struct iwl_mvm *mvm); -int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); /* MVM debugfs */ #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -1196,9 +1211,8 @@ int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, char *buf, int bufsz); void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); #ifdef CONFIG_IWLWIFI_LEDS int iwl_mvm_leds_init(struct iwl_mvm *mvm); @@ -1254,9 +1268,8 @@ int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm); /* BT Coex */ int iwl_send_bt_init_conf(struct iwl_mvm *mvm); -int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum ieee80211_rssi_event_data); void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm); @@ -1274,9 +1287,8 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm); void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm); int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm); -int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum ieee80211_rssi_event_data); u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm, @@ -1285,9 +1297,8 @@ bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm, struct ieee80211_sta *sta); bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm, enum ieee80211_band band); -int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); /* beacon filtering */ #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -1376,9 +1387,8 @@ static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue, /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); -int iwl_mvm_temp_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_temp_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tt_handler(struct iwl_mvm *mvm); void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff); void iwl_mvm_tt_exit(struct iwl_mvm *mvm); @@ -1390,9 +1400,8 @@ struct iwl_mcc_update_resp * iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, enum iwl_mcc_source src_id); int iwl_mvm_init_mcc(struct iwl_mvm *mvm); -int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, const char *alpha2, enum iwl_mcc_source src_id, @@ -1431,8 +1440,7 @@ void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tdls_ch_switch_work(struct work_struct *work); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); @@ -1442,10 +1450,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id); int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, unsigned int delay); + const char *str, size_t len, + struct iwl_fw_dbg_trigger_tlv *trigger); int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, struct iwl_mvm_dump_desc *desc, - unsigned int delay); + struct iwl_fw_dbg_trigger_tlv *trigger); void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm); int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tlv *trigger, diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c index dfdf0f5f5..16f60096e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c @@ -139,12 +139,6 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, return ret; pkt = cmd.resp_pkt; - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(mvm, "Bad return from NVM_ACCES_COMMAND (0x%08X)\n", - pkt->hdr.flags); - ret = -EIO; - goto exit; - } /* Extract NVM response */ nvm_resp = (void *)pkt->data; @@ -652,12 +646,6 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, return ERR_PTR(ret); pkt = cmd.resp_pkt; - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(mvm, "Bad return from MCC_UPDATE_COMMAND (0x%08X)\n", - pkt->hdr.flags); - ret = -EIO; - goto exit; - } /* Extract MCC response */ mcc_resp = (void *)pkt->data; @@ -839,9 +827,8 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm) return retval; } -int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mcc_chub_notif *notif = (void *)pkt->data; @@ -852,7 +839,7 @@ int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) - return 0; + return; mcc[0] = notif->mcc >> 8; mcc[1] = notif->mcc & 0xff; @@ -864,10 +851,8 @@ int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, mcc, src); regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL); if (IS_ERR_OR_NULL(regd)) - return 0; + return; regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); kfree(regd); - - return 0; } diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index e4fa50075..f0cb092f9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -201,14 +201,15 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) } struct iwl_rx_handlers { - u8 cmd_id; + u16 cmd_id; bool async; - int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); + void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); }; #define RX_HANDLER(_cmd_id, _fn, _async) \ { .cmd_id = _cmd_id , .fn = _fn , .async = _async } +#define RX_HANDLER_GRP(_grp, _cmd, _fn, _async) \ + { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async } /* * Handlers for fw notifications @@ -221,7 +222,6 @@ struct iwl_rx_handlers { * called from a worker with mvm->mutex held. */ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { - RX_HANDLER(REPLY_RX_MPDU_CMD, iwl_mvm_rx_rx_mpdu, false), RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false), RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false), RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false), @@ -261,12 +261,14 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif, true), RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false), + RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true), }; #undef RX_HANDLER +#undef RX_HANDLER_GRP #define CMD(x) [x] = #x -static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = { +static const char *const iwl_mvm_cmd_strings[REPLY_MAX + 1] = { CMD(MVM_ALIVE), CMD(REPLY_ERROR), CMD(INIT_COMPLETE_NOTIF), @@ -286,8 +288,10 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(PHY_CONFIGURATION_CMD), CMD(CALIB_RES_NOTIF_PHY_DB), CMD(SET_CALIB_DEFAULT_CMD), + CMD(FW_PAGING_BLOCK_CMD), CMD(ADD_STA_KEY), CMD(ADD_STA), + CMD(FW_GET_ITEM_CMD), CMD(REMOVE_STA), CMD(LQ_CMD), CMD(SCAN_OFFLOAD_CONFIG_CMD), @@ -470,6 +474,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.no_reclaim_cmds = no_reclaim_cmds; trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K; + trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_WIDE_CMD_HDR); if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE) trans_cfg.bc_table_dword = true; @@ -576,12 +582,15 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, /* rpm starts with a taken ref. only set the appropriate bit here. */ mvm->refs[IWL_MVM_REF_UCODE_DOWN] = 1; + iwl_mvm_tof_init(mvm); + return op_mode; out_unregister: ieee80211_unregister_hw(mvm->hw); iwl_mvm_leds_exit(mvm); out_free: + flush_delayed_work(&mvm->fw_dump_wk); iwl_phy_db_free(mvm->phy_db); kfree(mvm->scan_cmd); if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name) @@ -623,14 +632,15 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) kfree(mvm->nvm_sections[i].data); + iwl_mvm_tof_clean(mvm); + ieee80211_free_hw(mvm->hw); } struct iwl_async_handler_entry { struct list_head list; struct iwl_rx_cmd_buffer rxb; - int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); + void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); }; void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm) @@ -667,9 +677,7 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk) spin_unlock_bh(&mvm->async_handlers_lock); list_for_each_entry_safe(entry, tmp, &local_list, list) { - if (entry->fn(mvm, &entry->rxb, NULL)) - IWL_WARN(mvm, - "returned value from ASYNC handlers are ignored\n"); + entry->fn(mvm, &entry->rxb); iwl_free_rxb(&entry->rxb); list_del(&entry->list); kfree(entry); @@ -698,24 +706,30 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, if (!cmds_trig->cmds[i].cmd_id) break; - if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd) + if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd || + cmds_trig->cmds[i].group_id != pkt->hdr.group_id) continue; iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "CMD 0x%02x received", - pkt->hdr.cmd); + "CMD 0x%02x.%02x received", + pkt->hdr.group_id, pkt->hdr.cmd); break; } } -static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode, + struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); u8 i; + if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) { + iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); + return; + } + iwl_mvm_rx_check_trigger(mvm, pkt); /* @@ -729,16 +743,18 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode, const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i]; struct iwl_async_handler_entry *entry; - if (rx_h->cmd_id != pkt->hdr.cmd) + if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) continue; - if (!rx_h->async) - return rx_h->fn(mvm, rxb, cmd); + if (!rx_h->async) { + rx_h->fn(mvm, rxb); + return; + } entry = kzalloc(sizeof(*entry), GFP_ATOMIC); /* we can't do much... */ if (!entry) - return 0; + return; entry->rxb._page = rxb_steal_page(rxb); entry->rxb._offset = rxb->_offset; @@ -750,8 +766,6 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode, schedule_work(&mvm->async_handlers_wk); break; } - - return 0; } static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) @@ -903,7 +917,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) * can't recover this since we're already half suspended. */ if (!mvm->restart_fw && fw_error) { - iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 0); + iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, + NULL); } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; @@ -1100,9 +1115,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n"); - /* make sure we have no running tx while configuring the qos */ set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); - synchronize_net(); /* * iwl_mvm_ref_sync takes a reference before checking the flag. @@ -1130,6 +1143,9 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) mvm->d0i3_offloading = false; } + /* make sure we have no running tx while configuring the seqno */ + synchronize_net(); + iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data); ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags, sizeof(wowlan_config_cmd), @@ -1156,15 +1172,25 @@ static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac, iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags); } -static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac, - struct ieee80211_vif *vif) +struct iwl_mvm_wakeup_reason_iter_data { + struct iwl_mvm *mvm; + u32 wakeup_reasons; +}; + +static void iwl_mvm_d0i3_wakeup_reason_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) { - struct iwl_mvm *mvm = data; + struct iwl_mvm_wakeup_reason_iter_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc && - mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) - iwl_mvm_connection_loss(mvm, vif, "D0i3"); + data->mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) { + if (data->wakeup_reasons & + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH) + iwl_mvm_connection_loss(data->mvm, vif, "D0i3"); + else + ieee80211_beacon_loss(vif); + } } void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq) @@ -1232,7 +1258,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) }; struct iwl_wowlan_status *status; int ret; - u32 disconnection_reasons, wakeup_reasons; + u32 handled_reasons, wakeup_reasons; __le16 *qos_seq = NULL; mutex_lock(&mvm->mutex); @@ -1249,13 +1275,18 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons); - disconnection_reasons = - IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | - IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; - if (wakeup_reasons & disconnection_reasons) + handled_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; + if (wakeup_reasons & handled_reasons) { + struct iwl_mvm_wakeup_reason_iter_data data = { + .mvm = mvm, + .wakeup_reasons = wakeup_reasons, + }; + ieee80211_iterate_active_interfaces( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_d0i3_disconnect_iter, mvm); + iwl_mvm_d0i3_wakeup_reason_iter, &data); + } out: iwl_mvm_d0i3_enable_tx(mvm, qos_seq); @@ -1308,17 +1339,6 @@ int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode) return _iwl_mvm_exit_d0i3(mvm); } -static void iwl_mvm_napi_add(struct iwl_op_mode *op_mode, - struct napi_struct *napi, - struct net_device *napi_dev, - int (*poll)(struct napi_struct *, int), - int weight) -{ - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - - ieee80211_napi_add(mvm->hw, napi, napi_dev, poll, weight); -} - static const struct iwl_op_mode_ops iwl_mvm_ops = { .start = iwl_op_mode_mvm_start, .stop = iwl_op_mode_mvm_stop, @@ -1332,5 +1352,4 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = { .nic_config = iwl_mvm_nic_config, .enter_d0i3 = iwl_mvm_enter_d0i3, .exit_d0i3 = iwl_mvm_exit_d0i3, - .napi_add = iwl_mvm_napi_add, }; diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index d2c6ba9d3..464587788 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -112,11 +112,12 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, static void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_beacon_filter_cmd *cmd) + struct iwl_beacon_filter_cmd *cmd, + bool d0i3) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (vif->bss_conf.cqm_rssi_thold) { + if (vif->bss_conf.cqm_rssi_thold && !d0i3) { cmd->bf_energy_delta = cpu_to_le32(vif->bss_conf.cqm_rssi_hyst); /* fw uses an absolute value for this */ @@ -287,27 +288,6 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, return true; } -static int iwl_mvm_power_get_skip_over_dtim(int dtimper, int bi) -{ - int numerator; - int dtim_interval = dtimper * bi; - - if (WARN_ON(!dtim_interval)) - return 0; - - if (dtimper == 1) { - if (bi > 100) - numerator = 408; - else - numerator = 510; - } else if (dtimper < 10) { - numerator = 612; - } else { - return 0; - } - return max(1, (numerator / dtim_interval)); -} - static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif) { struct ieee80211_chanctx_conf *chanctx_conf; @@ -357,8 +337,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); - if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) || - !mvmvif->pm_enabled) + if (!vif->bss_conf.ps || !mvmvif->pm_enabled || + (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p)) return; cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); @@ -377,11 +357,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, if (!radar_detect && (dtimper < 10) && (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP || mvm->cur_ucode == IWL_UCODE_WOWLAN)) { - cmd->skip_dtim_periods = - iwl_mvm_power_get_skip_over_dtim(dtimper, bi); - if (cmd->skip_dtim_periods) - cmd->flags |= - cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); + cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); + cmd->skip_dtim_periods = 3; } if (mvm->cur_ucode != IWL_UCODE_WOWLAN) { @@ -509,9 +486,8 @@ static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac, ETH_ALEN); } -int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data; @@ -520,8 +496,6 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id); - - return 0; } struct iwl_power_vifs { @@ -810,7 +784,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; - iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd); + iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3); if (!d0i3) iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd); ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags); diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index daff1d0a8..5ae9c8aa8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -177,7 +177,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p) + if (IWL_MVM_RS_DISABLE_P2P_MIMO && + iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p) return false; if (mvm->nvm_data->sku_cap_mimo_disabled) @@ -2403,7 +2404,7 @@ struct rs_init_rate_info { u8 rate_idx; }; -static const struct rs_init_rate_info rs_init_rates_24ghz[] = { +static const struct rs_init_rate_info rs_optimal_rates_24ghz_legacy[] = { { -60, IWL_RATE_54M_INDEX }, { -64, IWL_RATE_48M_INDEX }, { -68, IWL_RATE_36M_INDEX }, @@ -2416,7 +2417,7 @@ static const struct rs_init_rate_info rs_init_rates_24ghz[] = { { S8_MIN, IWL_RATE_1M_INDEX }, }; -static const struct rs_init_rate_info rs_init_rates_5ghz[] = { +static const struct rs_init_rate_info rs_optimal_rates_5ghz_legacy[] = { { -60, IWL_RATE_54M_INDEX }, { -64, IWL_RATE_48M_INDEX }, { -72, IWL_RATE_36M_INDEX }, @@ -2427,6 +2428,124 @@ static const struct rs_init_rate_info rs_init_rates_5ghz[] = { { S8_MIN, IWL_RATE_6M_INDEX }, }; +static const struct rs_init_rate_info rs_optimal_rates_ht[] = { + { -60, IWL_RATE_MCS_7_INDEX }, + { -64, IWL_RATE_MCS_6_INDEX }, + { -68, IWL_RATE_MCS_5_INDEX }, + { -72, IWL_RATE_MCS_4_INDEX }, + { -80, IWL_RATE_MCS_3_INDEX }, + { -84, IWL_RATE_MCS_2_INDEX }, + { -85, IWL_RATE_MCS_1_INDEX }, + { S8_MIN, IWL_RATE_MCS_0_INDEX}, +}; + +static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = { + { -60, IWL_RATE_MCS_8_INDEX }, + { -64, IWL_RATE_MCS_7_INDEX }, + { -68, IWL_RATE_MCS_6_INDEX }, + { -72, IWL_RATE_MCS_5_INDEX }, + { -80, IWL_RATE_MCS_4_INDEX }, + { -84, IWL_RATE_MCS_3_INDEX }, + { -85, IWL_RATE_MCS_2_INDEX }, + { -87, IWL_RATE_MCS_1_INDEX }, + { S8_MIN, IWL_RATE_MCS_0_INDEX}, +}; + +static const struct rs_init_rate_info rs_optimal_rates_vht_40_80mhz[] = { + { -60, IWL_RATE_MCS_9_INDEX }, + { -64, IWL_RATE_MCS_8_INDEX }, + { -68, IWL_RATE_MCS_7_INDEX }, + { -72, IWL_RATE_MCS_6_INDEX }, + { -80, IWL_RATE_MCS_5_INDEX }, + { -84, IWL_RATE_MCS_4_INDEX }, + { -85, IWL_RATE_MCS_3_INDEX }, + { -87, IWL_RATE_MCS_2_INDEX }, + { -88, IWL_RATE_MCS_1_INDEX }, + { S8_MIN, IWL_RATE_MCS_0_INDEX }, +}; + +/* Init the optimal rate based on STA caps + * This combined with rssi is used to report the last tx rate + * to userspace when we haven't transmitted enough frames. + */ +static void rs_init_optimal_rate(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta) +{ + struct rs_rate *rate = &lq_sta->optimal_rate; + + if (lq_sta->max_mimo2_rate_idx != IWL_RATE_INVALID) + rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2; + else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID) + rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO; + else if (lq_sta->band == IEEE80211_BAND_5GHZ) + rate->type = LQ_LEGACY_A; + else + rate->type = LQ_LEGACY_G; + + rate->bw = rs_bw_from_sta_bw(sta); + rate->sgi = rs_sgi_allow(mvm, sta, rate, NULL); + + /* ANT/LDPC/STBC aren't relevant for the rate reported to userspace */ + + if (is_mimo(rate)) { + lq_sta->optimal_rate_mask = lq_sta->active_mimo2_rate; + } else if (is_siso(rate)) { + lq_sta->optimal_rate_mask = lq_sta->active_siso_rate; + } else { + lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate; + + if (lq_sta->band == IEEE80211_BAND_5GHZ) { + lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy; + lq_sta->optimal_nentries = + ARRAY_SIZE(rs_optimal_rates_5ghz_legacy); + } else { + lq_sta->optimal_rates = rs_optimal_rates_24ghz_legacy; + lq_sta->optimal_nentries = + ARRAY_SIZE(rs_optimal_rates_24ghz_legacy); + } + } + + if (is_vht(rate)) { + if (rate->bw == RATE_MCS_CHAN_WIDTH_20) { + lq_sta->optimal_rates = rs_optimal_rates_vht_20mhz; + lq_sta->optimal_nentries = + ARRAY_SIZE(rs_optimal_rates_vht_20mhz); + } else { + lq_sta->optimal_rates = rs_optimal_rates_vht_40_80mhz; + lq_sta->optimal_nentries = + ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz); + } + } else if (is_ht(rate)) { + lq_sta->optimal_rates = rs_optimal_rates_ht; + lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_ht); + } +} + +/* Compute the optimal rate index based on RSSI */ +static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta) +{ + struct rs_rate *rate = &lq_sta->optimal_rate; + int i; + + rate->index = find_first_bit(&lq_sta->optimal_rate_mask, + BITS_PER_LONG); + + for (i = 0; i < lq_sta->optimal_nentries; i++) { + int rate_idx = lq_sta->optimal_rates[i].rate_idx; + + if ((lq_sta->pers.last_rssi >= lq_sta->optimal_rates[i].rssi) && + (BIT(rate_idx) & lq_sta->optimal_rate_mask)) { + rate->index = rate_idx; + break; + } + } + + rs_dump_rate(mvm, rate, "OPTIMAL RATE"); + return rate; +} + /* Choose an initial legacy rate and antenna to use based on the RSSI * of last Rx */ @@ -2468,12 +2587,12 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, if (band == IEEE80211_BAND_5GHZ) { rate->type = LQ_LEGACY_A; - initial_rates = rs_init_rates_5ghz; - nentries = ARRAY_SIZE(rs_init_rates_5ghz); + initial_rates = rs_optimal_rates_5ghz_legacy; + nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy); } else { rate->type = LQ_LEGACY_G; - initial_rates = rs_init_rates_24ghz; - nentries = ARRAY_SIZE(rs_init_rates_24ghz); + initial_rates = rs_optimal_rates_24ghz_legacy; + nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy); } if (IWL_MVM_RS_RSSI_BASED_INIT_RATE) { @@ -2496,10 +2615,21 @@ void rs_update_last_rssi(struct iwl_mvm *mvm, struct iwl_lq_sta *lq_sta, struct ieee80211_rx_status *rx_status) { + int i; + lq_sta->pers.chains = rx_status->chains; lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0]; lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1]; lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2]; + lq_sta->pers.last_rssi = S8_MIN; + + for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) { + if (!(lq_sta->pers.chains & BIT(i))) + continue; + + if (lq_sta->pers.chain_signal[i] > lq_sta->pers.last_rssi) + lq_sta->pers.last_rssi = lq_sta->pers.chain_signal[i]; + } } /** @@ -2538,6 +2668,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, rate = &tbl->rate; rs_get_initial_rate(mvm, lq_sta, band, rate); + rs_init_optimal_rate(mvm, sta, lq_sta); WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B); if (rate->ant == ANT_A) @@ -2560,6 +2691,8 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_lq_sta *lq_sta = mvm_sta; + struct rs_rate *optimal_rate; + u32 last_ucode_rate; if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) { /* if vif isn't initialized mvm doesn't know about @@ -2583,8 +2716,18 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags, info->band, &info->control.rates[0]); - info->control.rates[0].count = 1; + + /* Report the optimal rate based on rssi and STA caps if we haven't + * converged yet (too little traffic) or exploring other modulations + */ + if (lq_sta->rs_state != RS_STATE_STAY_IN_COLUMN) { + optimal_rate = rs_get_optimal_rate(mvm, lq_sta); + last_ucode_rate = ucode_rate_from_rs_rate(mvm, + optimal_rate); + iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band, + &txrc->reported_rate); + } } static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, @@ -2605,6 +2748,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, #endif lq_sta->pers.chains = 0; memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal)); + lq_sta->pers.last_rssi = S8_MIN; return &sta_priv->lq_sta; } diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h index 2a3da3143..81314ad9e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/iwlwifi/mvm/rs.h @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -316,6 +317,14 @@ struct iwl_lq_sta { u8 max_siso_rate_idx; u8 max_mimo2_rate_idx; + /* Optimal rate based on RSSI and STA caps. + * Used only to reflect link speed to userspace. + */ + struct rs_rate optimal_rate; + unsigned long optimal_rate_mask; + const struct rs_init_rate_info *optimal_rates; + int optimal_nentries; + u8 missed_rate_counter; struct iwl_lq_cmd lq; @@ -341,6 +350,7 @@ struct iwl_lq_sta { #endif u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; + s8 last_rssi; struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT]; struct iwl_mvm *drv; } pers; diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index 8f1d93b7a..c37c10a42 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -61,6 +61,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ +#include #include "iwl-trans.h" #include "mvm.h" #include "fw-api.h" @@ -71,8 +72,7 @@ * Copies the phy information in mvm->last_phy_info, it will be used when the * actual data will come from the fw in the next packet. */ -int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); @@ -86,8 +86,6 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, spin_unlock(&mvm->drv_stats_lock); } #endif - - return 0; } /* @@ -96,6 +94,7 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, * Adds the rxb to a new skb and give it to mac80211 */ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, + struct napi_struct *napi, struct sk_buff *skb, struct ieee80211_hdr *hdr, u16 len, u32 ampdu_status, u8 crypt_len, @@ -129,7 +128,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, fraglen, rxb->truesize); } - ieee80211_rx(mvm->hw, skb); + ieee80211_rx_napi(mvm->hw, skb, napi); } /* @@ -237,13 +236,26 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, return 0; } +static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, + struct sk_buff *skb, + u32 status) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + + if (mvmvif->features & NETIF_F_RXCSUM && + status & RX_MPDU_RES_STATUS_CSUM_DONE && + status & RX_MPDU_RES_STATUS_CSUM_OK) + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + /* * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler * * Handles the actual data of the Rx packet from the fw */ -int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb) { struct ieee80211_hdr *hdr; struct ieee80211_rx_status *rx_status; @@ -271,7 +283,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, skb = alloc_skb(128, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); - return 0; + return; } rx_status = IEEE80211_SKB_RXCB(skb); @@ -284,14 +296,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", rx_pkt_status); kfree_skb(skb); - return 0; + return; } if ((unlikely(phy_info->cfg_phy_cnt > 20))) { IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n", phy_info->cfg_phy_cnt); kfree_skb(skb); - return 0; + return; } /* @@ -366,6 +378,9 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, } } + if (sta && ieee80211_is_data(hdr->frame_control)) + iwl_mvm_rx_csum(sta, skb, rx_pkt_status); + rcu_read_unlock(); /* set the preamble flag if appropriate */ @@ -429,9 +444,8 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, iwl_mvm_update_frame_stats(mvm, rate_n_flags, rx_status->flag & RX_FLAG_AMPDU_DETAILS); #endif - iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status, + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status, crypt_len, rxb); - return 0; } static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm, @@ -623,10 +637,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, iwl_rx_packet_payload_len(pkt)); } -int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb)); - return 0; } diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 5514ad6d4..56559d4d3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -72,10 +72,60 @@ #define IWL_DENSE_EBS_SCAN_RATIO 5 #define IWL_SPARSE_EBS_SCAN_RATIO 1 -struct iwl_mvm_scan_params { - u32 max_out_time; +enum iwl_mvm_scan_type { + IWL_SCAN_TYPE_UNASSOC, + IWL_SCAN_TYPE_WILD, + IWL_SCAN_TYPE_MILD, + IWL_SCAN_TYPE_FRAGMENTED, +}; + +enum iwl_mvm_traffic_load { + IWL_MVM_TRAFFIC_LOW, + IWL_MVM_TRAFFIC_MEDIUM, + IWL_MVM_TRAFFIC_HIGH, +}; + +struct iwl_mvm_scan_timing_params { + u32 dwell_active; + u32 dwell_passive; + u32 dwell_fragmented; u32 suspend_time; - bool passive_fragmented; + u32 max_out_time; +}; + +static struct iwl_mvm_scan_timing_params scan_timing[] = { + [IWL_SCAN_TYPE_UNASSOC] = { + .dwell_active = 10, + .dwell_passive = 110, + .dwell_fragmented = 44, + .suspend_time = 0, + .max_out_time = 0, + }, + [IWL_SCAN_TYPE_WILD] = { + .dwell_active = 10, + .dwell_passive = 110, + .dwell_fragmented = 44, + .suspend_time = 30, + .max_out_time = 120, + }, + [IWL_SCAN_TYPE_MILD] = { + .dwell_active = 10, + .dwell_passive = 110, + .dwell_fragmented = 44, + .suspend_time = 120, + .max_out_time = 120, + }, + [IWL_SCAN_TYPE_FRAGMENTED] = { + .dwell_active = 10, + .dwell_passive = 110, + .dwell_fragmented = 44, + .suspend_time = 95, + .max_out_time = 44, + }, +}; + +struct iwl_mvm_scan_params { + enum iwl_mvm_scan_type type; u32 n_channels; u16 delay; int n_ssids; @@ -90,15 +140,7 @@ struct iwl_mvm_scan_params { int n_match_sets; struct iwl_scan_probe_req preq; struct cfg80211_match_set *match_sets; - struct _dwell { - u16 passive; - u16 active; - u16 fragmented; - } dwell[IEEE80211_NUM_BANDS]; - struct { - u8 iterations; - u8 full_scan_mul; /* not used for UMAC */ - } schedule[2]; + u8 iterations[2]; }; static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) @@ -147,34 +189,6 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band, return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant); } -/* - * If req->n_ssids > 0, it means we should do an active scan. - * In case of active scan w/o directed scan, we receive a zero-length SSID - * just to notify that this scan is active and not passive. - * In order to notify the FW of the number of SSIDs we wish to scan (including - * the zero-length one), we need to set the corresponding bits in chan->type, - * one for each SSID, and set the active bit (first). If the first SSID is - * already included in the probe template, so we need to set only - * req->n_ssids - 1 bits in addition to the first bit. - */ -static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm, - enum ieee80211_band band, int n_ssids) -{ - if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL)) - return 10; - if (band == IEEE80211_BAND_2GHZ) - return 20 + 3 * (n_ssids + 1); - return 10 + 2 * (n_ssids + 1); -} - -static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm, - enum ieee80211_band band) -{ - if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL)) - return 110; - return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10; -} - static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac, struct ieee80211_vif *vif) { @@ -186,90 +200,39 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac, *global_cnt += 1; } -static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mvm_scan_params *params) +static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm) +{ + return IWL_MVM_TRAFFIC_LOW; +} + +static enum +iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_scan_params *params) { int global_cnt = 0; - enum ieee80211_band band; - u8 frag_passive_dwell = 0; + enum iwl_mvm_traffic_load load; + bool low_latency; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_scan_condition_iterator, &global_cnt); if (!global_cnt) - goto not_bound; - - params->suspend_time = 30; - params->max_out_time = 120; - - if (iwl_mvm_low_latency(mvm)) { - if (fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { - - params->suspend_time = 105; - /* - * If there is more than one active interface make - * passive scan more fragmented. - */ - frag_passive_dwell = 40; - params->max_out_time = frag_passive_dwell; - } else { - params->suspend_time = 120; - params->max_out_time = 120; - } - } + return IWL_SCAN_TYPE_UNASSOC; - if (frag_passive_dwell && - fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { - /* - * P2P device scan should not be fragmented to avoid negative - * impact on P2P device discovery. Configure max_out_time to be - * equal to dwell time on passive channel. Take a longest - * possible value, one that corresponds to 2GHz band - */ - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - u32 passive_dwell = - iwl_mvm_get_passive_dwell(mvm, - IEEE80211_BAND_2GHZ); - params->max_out_time = passive_dwell; - } else { - params->passive_fragmented = true; - } - } - - if ((params->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) && - (params->max_out_time > 200)) - params->max_out_time = 200; - -not_bound: + load = iwl_mvm_get_traffic_load(mvm); + low_latency = iwl_mvm_low_latency(mvm); - for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { - if (params->passive_fragmented) - params->dwell[band].fragmented = frag_passive_dwell; + if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && + vif->type != NL80211_IFTYPE_P2P_DEVICE && + fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) + return IWL_SCAN_TYPE_FRAGMENTED; - params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm, - band); - params->dwell[band].active = - iwl_mvm_get_active_dwell(mvm, band, params->n_ssids); - } + if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency) + return IWL_SCAN_TYPE_MILD; - IWL_DEBUG_SCAN(mvm, - "scan parameters: max_out_time %d, suspend_time %d, passive_fragmented %d\n", - params->max_out_time, params->suspend_time, - params->passive_fragmented); - IWL_DEBUG_SCAN(mvm, - "dwell[IEEE80211_BAND_2GHZ]: passive %d, active %d, fragmented %d\n", - params->dwell[IEEE80211_BAND_2GHZ].passive, - params->dwell[IEEE80211_BAND_2GHZ].active, - params->dwell[IEEE80211_BAND_2GHZ].fragmented); - IWL_DEBUG_SCAN(mvm, - "dwell[IEEE80211_BAND_5GHZ]: passive %d, active %d, fragmented %d\n", - params->dwell[IEEE80211_BAND_5GHZ].passive, - params->dwell[IEEE80211_BAND_5GHZ].active, - params->dwell[IEEE80211_BAND_5GHZ].fragmented); + return IWL_SCAN_TYPE_WILD; } static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm) @@ -327,9 +290,8 @@ static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res, return buf; } -int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data; @@ -341,17 +303,13 @@ int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, iwl_mvm_dump_channel_list(notif->results, notif->scanned_channels, buf, sizeof(buf))); - return 0; } -int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n"); ieee80211_sched_scan_results(mvm->hw); - - return 0; } static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status) @@ -368,9 +326,8 @@ static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status) } } -int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data; @@ -395,6 +352,11 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n", aborted ? "aborted" : "completed", iwl_mvm_ebs_status_str(scan_notif->ebs_status)); + IWL_DEBUG_SCAN(mvm, + "Last line %d, Last iteration %d, Time after last iteration %d\n", + scan_notif->last_schedule_line, + scan_notif->last_schedule_iteration, + __le32_to_cpu(scan_notif->time_after_last_iter)); mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED; } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) { @@ -406,9 +368,14 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) { WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR); - IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n", + IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n", aborted ? "aborted" : "completed", iwl_mvm_ebs_status_str(scan_notif->ebs_status)); + IWL_DEBUG_SCAN(mvm, + "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n", + scan_notif->last_schedule_line, + scan_notif->last_schedule_iteration, + __le32_to_cpu(scan_notif->time_after_last_iter)); mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; ieee80211_sched_scan_stopped(mvm->hw); @@ -426,8 +393,6 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, mvm->last_ebs_successful = scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS || scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE; - - return 0; } static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list) @@ -751,13 +716,11 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_lmac *cmd, struct iwl_mvm_scan_params *params) { - cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active; - cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive; - if (params->passive_fragmented) - cmd->fragmented_dwell = - params->dwell[IEEE80211_BAND_2GHZ].fragmented; - cmd->max_out_time = cpu_to_le32(params->max_out_time); - cmd->suspend_time = cpu_to_le32(params->suspend_time); + cmd->active_dwell = scan_timing[params->type].dwell_active; + cmd->passive_dwell = scan_timing[params->type].dwell_passive; + cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented; + cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); + cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); } @@ -794,7 +757,7 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params) { - return params->schedule[0].iterations + params->schedule[1].iterations; + return params->iterations[0] + params->iterations[1]; } static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, @@ -808,7 +771,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; - if (params->passive_fragmented) + if (params->type == IWL_SCAN_TYPE_FRAGMENTED) flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED; if (iwl_mvm_rrm_scan_needed(mvm)) @@ -861,11 +824,11 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ssid_bitmap <<= 1; cmd->schedule[0].delay = cpu_to_le16(params->interval); - cmd->schedule[0].iterations = params->schedule[0].iterations; - cmd->schedule[0].full_scan_mul = params->schedule[0].full_scan_mul; + cmd->schedule[0].iterations = params->iterations[0]; + cmd->schedule[0].full_scan_mul = 1; cmd->schedule[1].delay = cpu_to_le16(params->interval); - cmd->schedule[1].iterations = params->schedule[1].iterations; - cmd->schedule[1].full_scan_mul = params->schedule[1].iterations; + cmd->schedule[1].iterations = params->iterations[1]; + cmd->schedule[1].full_scan_mul = 1; if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) { cmd->channel_opt[0].flags = @@ -937,9 +900,9 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) int num_channels = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels + mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; - int ret, i, j = 0, cmd_size, data_size; + int ret, i, j = 0, cmd_size; struct iwl_host_cmd cmd = { - .id = SCAN_CFG_CMD, + .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), }; if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) @@ -951,8 +914,6 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) if (!scan_config) return -ENOMEM; - data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr); - scan_config->hdr.size = cpu_to_le16(data_size); scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE | SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS | SCAN_CONFIG_FLAG_SET_TX_CHAINS | @@ -1013,13 +974,11 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_umac *cmd, struct iwl_mvm_scan_params *params) { - cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active; - cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive; - if (params->passive_fragmented) - cmd->fragmented_dwell = - params->dwell[IEEE80211_BAND_2GHZ].fragmented; - cmd->max_out_time = cpu_to_le32(params->max_out_time); - cmd->suspend_time = cpu_to_le32(params->suspend_time); + cmd->active_dwell = scan_timing[params->type].dwell_active; + cmd->passive_dwell = scan_timing[params->type].dwell_passive; + cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented; + cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); + cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); cmd->scan_priority = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); @@ -1059,7 +1018,7 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT; - if (params->passive_fragmented) + if (params->type == IWL_SCAN_TYPE_FRAGMENTED) flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED; if (iwl_mvm_rrm_scan_needed(mvm)) @@ -1099,8 +1058,6 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return uid; memset(cmd, 0, ksize(cmd)); - cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) - - sizeof(struct iwl_mvm_umac_cmd_hdr)); iwl_mvm_scan_umac_dwell(mvm, cmd, params); @@ -1230,17 +1187,15 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, params.n_match_sets = 0; params.match_sets = NULL; - params.schedule[0].iterations = 1; - params.schedule[0].full_scan_mul = 0; - params.schedule[1].iterations = 0; - params.schedule[1].full_scan_mul = 0; + params.iterations[0] = 1; + params.iterations[1] = 0; - iwl_mvm_scan_calc_dwell(mvm, vif, ¶ms); + params.type = iwl_mvm_get_scan_type(mvm, vif, ¶ms); iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { - hcmd.id = SCAN_REQ_UMAC; + hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0); ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, IWL_MVM_SCAN_REGULAR); } else { @@ -1313,10 +1268,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, params.n_match_sets = req->n_match_sets; params.match_sets = req->match_sets; - params.schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS; - params.schedule[0].full_scan_mul = 1; - params.schedule[1].iterations = 0xff; - params.schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER; + params.iterations[0] = 0; + params.iterations[1] = 0xff; + + params.type = iwl_mvm_get_scan_type(mvm, vif, ¶ms); if (req->interval > U16_MAX) { IWL_DEBUG_SCAN(mvm, @@ -1339,8 +1294,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, params.delay = req->delay; } - iwl_mvm_scan_calc_dwell(mvm, vif, ¶ms); - ret = iwl_mvm_config_sched_scan_profiles(mvm, req); if (ret) return ret; @@ -1348,7 +1301,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { - hcmd.id = SCAN_REQ_UMAC; + hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0); ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, IWL_MVM_SCAN_SCHED); } else { hcmd.id = SCAN_OFFLOAD_REQUEST_CMD; @@ -1374,9 +1327,8 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, return ret; } -int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_umac_scan_complete *notif = (void *)pkt->data; @@ -1384,7 +1336,7 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED); if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status))) - return 0; + return; /* if the scan is already stopping, we don't need to notify mac80211 */ if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) { @@ -1395,26 +1347,26 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, } mvm->scan_status &= ~mvm->scan_uid_status[uid]; - IWL_DEBUG_SCAN(mvm, "Scan completed, uid %u type %u, status %s, EBS status %s\n", uid, mvm->scan_uid_status[uid], notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? "completed" : "aborted", iwl_mvm_ebs_status_str(notif->ebs_status)); + IWL_DEBUG_SCAN(mvm, + "Last line %d, Last iteration %d, Time from last iteration %d\n", + notif->last_schedule, notif->last_iter, + __le32_to_cpu(notif->time_from_last_iter)); if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS && notif->ebs_status != IWL_SCAN_EBS_INACTIVE) mvm->last_ebs_successful = false; mvm->scan_uid_status[uid] = 0; - - return 0; } -int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data; @@ -1426,15 +1378,11 @@ int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, iwl_mvm_dump_channel_list(notif->results, notif->scanned_channels, buf, sizeof(buf))); - return 0; } static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) { - struct iwl_umac_scan_abort cmd = { - .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) - - sizeof(struct iwl_mvm_umac_cmd_hdr)), - }; + struct iwl_umac_scan_abort cmd = {}; int uid, ret; lockdep_assert_held(&mvm->mutex); @@ -1451,7 +1399,10 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid); - ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, + iwl_cmd_id(SCAN_ABORT_UMAC, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(cmd), &cmd); if (!ret) mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT; @@ -1461,7 +1412,7 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) { struct iwl_notification_wait wait_scan_done; - static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, + static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC, SCAN_OFFLOAD_COMPLETE, }; int ret; diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 26f076e82..df216cd0c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -1148,18 +1148,31 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) { - int i; + int i, max = -1, max_offs = -1; lockdep_assert_held(&mvm->mutex); - i = find_first_zero_bit(mvm->fw_key_table, STA_KEY_MAX_NUM); + /* Pick the unused key offset with the highest 'deleted' + * counter. Every time a key is deleted, all the counters + * are incremented and the one that was just deleted is + * reset to zero. Thus, the highest counter is the one + * that was deleted longest ago. Pick that one. + */ + for (i = 0; i < STA_KEY_MAX_NUM; i++) { + if (test_bit(i, mvm->fw_key_table)) + continue; + if (mvm->fw_key_deleted[i] > max) { + max = mvm->fw_key_deleted[i]; + max_offs = i; + } + } - if (i == STA_KEY_MAX_NUM) + if (max_offs < 0) return STA_KEY_IDX_INVALID; - __set_bit(i, mvm->fw_key_table); + __set_bit(max_offs, mvm->fw_key_table); - return i; + return max_offs; } static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, @@ -1277,8 +1290,6 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, const u8 *pn; memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen); - ieee80211_aes_cmac_calculate_k1_k2(keyconf, - igtk_cmd.K1, igtk_cmd.K2); ieee80211_get_key_rx_seq(keyconf, 0, &seq); pn = seq.aes_cmac.pn; igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | @@ -1479,7 +1490,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); u8 sta_id; - int ret; + int ret, i; lockdep_assert_held(&mvm->mutex); @@ -1498,6 +1509,13 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, return -ENOENT; } + /* track which key was deleted last */ + for (i = 0; i < STA_KEY_MAX_NUM; i++) { + if (mvm->fw_key_deleted[i] < U8_MAX) + mvm->fw_key_deleted[i]++; + } + mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; + if (sta_id == IWL_MVM_STATION_COUNT) { IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); return 0; @@ -1661,9 +1679,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } -int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_eosp_notification *notif = (void *)pkt->data; @@ -1671,15 +1688,13 @@ int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, u32 sta_id = le32_to_cpu(notif->sta_id); if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT)) - return 0; + return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (!IS_ERR_OR_NULL(sta)) ieee80211_sta_eosp(sta); rcu_read_unlock(); - - return 0; } void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h index 748f5dc3f..eedb215eb 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/iwlwifi/mvm/sta.h @@ -378,9 +378,8 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key); -int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); /* AMPDU */ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/iwlwifi/mvm/tdls.c b/drivers/net/wireless/iwlwifi/mvm/tdls.c index a87b506c8..fe2fa5650 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/iwlwifi/mvm/tdls.c @@ -169,18 +169,11 @@ static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return; pkt = cmd.resp_pkt; - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(mvm, "Bad return from TDLS_CONFIG_COMMAND (0x%08X)\n", - pkt->hdr.flags); - goto exit; - } - if (WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp))) - goto exit; + WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)); /* we don't really care about the response at this point */ -exit: iwl_free_resp(&cmd); } @@ -261,8 +254,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm, mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT; } -int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data; @@ -277,17 +269,17 @@ int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, /* can fail sometimes */ if (!le32_to_cpu(notif->status)) { iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); - goto out; + return; } if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT)) - goto out; + return; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); /* the station may not be here, but if it is, it must be a TDLS peer */ if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls)) - goto out; + return; mvmsta = iwl_mvm_sta_from_mac80211(sta); vif = mvmsta->vif; @@ -301,9 +293,6 @@ int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, msecs_to_jiffies(delay)); iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE); - -out: - return 0; } static int @@ -471,13 +460,19 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2); info = IEEE80211_SKB_CB(skb); - if (info->control.hw_key) - iwl_mvm_set_tx_cmd_crypto(mvm, info, &cmd.frame.tx_cmd, skb); + hdr = (void *)skb->data; + if (info->control.hw_key) { + if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) { + rcu_read_unlock(); + ret = -EINVAL; + goto out; + } + iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd); + } iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info, mvmsta->sta_id); - hdr = (void *)skb->data; iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta, hdr->frame_control); rcu_read_unlock(); diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index e472729e5..dbd7d5445 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -410,9 +410,8 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm, /* * The Rx handler for time event notifications */ -int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_time_event_notif *notif = (void *)pkt->data; @@ -433,8 +432,6 @@ int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, } unlock: spin_unlock_bh(&mvm->time_event_lock); - - return 0; } static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait, @@ -503,7 +500,7 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data, struct iwl_time_event_cmd *te_cmd) { - static const u8 time_event_response[] = { TIME_EVENT_CMD }; + static const u16 time_event_response[] = { TIME_EVENT_CMD }; struct iwl_notification_wait wait_time_event; int ret; @@ -566,7 +563,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; - const u8 te_notif_response[] = { TIME_EVENT_NOTIFICATION }; + const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION }; struct iwl_notification_wait wait_te_notif; struct iwl_time_event_cmd time_cmd = {}; @@ -599,8 +596,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC); - time_cmd.apply_time = - cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG)); + time_cmd.apply_time = cpu_to_le32(0); time_cmd.max_frags = TE_V2_FRAG_NONE; time_cmd.max_delay = cpu_to_le32(max_delay); diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h index de4fbc6d5..cbdf8e52a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.h +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h @@ -157,9 +157,8 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm, /* * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION. */ -int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); /** * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.c b/drivers/net/wireless/iwlwifi/mvm/tof.c new file mode 100644 index 000000000..380972f8f --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/tof.c @@ -0,0 +1,304 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "mvm.h" +#include "fw-api-tof.h" + +#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256 + +void iwl_mvm_tof_init(struct iwl_mvm *mvm) +{ + struct iwl_mvm_tof_data *tof_data = &mvm->tof_data; + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return; + + memset(tof_data, 0, sizeof(*tof_data)); + + tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (IWL_MVM_TOF_IS_RESPONDER) { + tof_data->responder_cfg.sub_grp_cmd_id = + cpu_to_le32(TOF_RESPONDER_CONFIG_CMD); + tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT; + } +#endif + + tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD); + tof_data->range_req.req_timeout = 1; + tof_data->range_req.initiator = 1; + tof_data->range_req.report_policy = 3; + + tof_data->range_req_ext.sub_grp_cmd_id = + cpu_to_le32(TOF_RANGE_REQ_EXT_CMD); + + mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; +} + +void iwl_mvm_tof_clean(struct iwl_mvm *mvm) +{ + struct iwl_mvm_tof_data *tof_data = &mvm->tof_data; + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return; + + memset(tof_data, 0, sizeof(*tof_data)); + mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; +} + +static void iwl_tof_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + bool *enabled = _data; + + /* non bss vif exists */ + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) + *enabled = false; +} + +int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm) +{ + struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg; + bool enabled; + + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return -EINVAL; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_tof_iterator, &enabled); + if (!enabled) { + IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n"); + return -EINVAL; + } + + mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(*cmd), cmd); +} + +int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id) +{ + struct iwl_tof_range_abort_cmd cmd = { + .sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD), + .request_id = id, + }; + + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return -EINVAL; + + if (id != mvm->tof_data.active_range_request) { + IWL_ERR(mvm, "Invalid range request id %d (active %d)\n", + id, mvm->tof_data.active_range_request); + return -EINVAL; + } + + /* after abort is sent there's no active request anymore */ + mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; + + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(cmd), &cmd); +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return -EINVAL; + + if (vif->p2p || vif->type != NL80211_IFTYPE_AP) { + IWL_ERR(mvm, "Cannot start responder, not in AP mode\n"); + return -EIO; + } + + cmd->sta_id = mvmvif->bcast_sta.sta_id; + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(*cmd), cmd); +} +#endif + +int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_host_cmd cmd = { + .id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0), + .len = { sizeof(mvm->tof_data.range_req), }, + /* no copy because of the command size */ + .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + }; + + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return -EINVAL; + + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { + IWL_ERR(mvm, "Cannot send range request, not STA mode\n"); + return -EIO; + } + + /* nesting of range requests is not supported in FW */ + if (mvm->tof_data.active_range_request != + IWL_MVM_TOF_RANGE_REQ_MAX_ID) { + IWL_ERR(mvm, "Cannot send range req, already active req %d\n", + mvm->tof_data.active_range_request); + return -EIO; + } + + mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id; + + cmd.data[0] = &mvm->tof_data.range_req; + return iwl_mvm_send_cmd(mvm, &cmd); +} + +int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return -EINVAL; + + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { + IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n"); + return -EIO; + } + + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(mvm->tof_data.range_req_ext), + &mvm->tof_data.range_req_ext); +} + +static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data) +{ + struct iwl_tof_range_rsp_ntfy *resp = (void *)data; + + if (resp->request_id != mvm->tof_data.active_range_request) { + IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n", + resp->request_id, mvm->tof_data.active_range_request); + return -EIO; + } + + memcpy(&mvm->tof_data.range_resp, resp, + sizeof(struct iwl_tof_range_rsp_ntfy)); + mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; + + return 0; +} + +static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data) +{ + struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data; + + IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token); + return 0; +} + +static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data) +{ + struct iwl_tof_neighbor_report *report = + (struct iwl_tof_neighbor_report *)data; + + IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n", + report->bssid, report->request_token, report->status); + return 0; +} + +void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data; + + lockdep_assert_held(&mvm->mutex); + + switch (le32_to_cpu(resp->sub_grp_cmd_id)) { + case TOF_RANGE_RESPONSE_NOTIF: + iwl_mvm_tof_range_resp(mvm, resp->data); + break; + case TOF_MCSI_DEBUG_NOTIF: + iwl_mvm_tof_mcsi_notif(mvm, resp->data); + break; + case TOF_NEIGHBOR_REPORT_RSP_NOTIF: + iwl_mvm_tof_nb_report_notif(mvm, resp->data); + break; + default: + IWL_ERR(mvm, "Unknown sub-group command 0x%x\n", + resp->sub_grp_cmd_id); + break; + } +} diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.h b/drivers/net/wireless/iwlwifi/mvm/tof.h new file mode 100644 index 000000000..50ae8adaa --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/tof.h @@ -0,0 +1,94 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __tof +#define __tof_h__ + +#include "fw-api-tof.h" + +struct iwl_mvm_tof_data { + struct iwl_tof_config_cmd tof_cfg; + struct iwl_tof_range_req_cmd range_req; + struct iwl_tof_range_req_ext_cmd range_req_ext; +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct iwl_tof_responder_config_cmd responder_cfg; +#endif + struct iwl_tof_range_rsp_ntfy range_resp; + u8 last_abort_id; + u16 active_range_request; +}; + +void iwl_mvm_tof_init(struct iwl_mvm *mvm); +void iwl_mvm_tof_clean(struct iwl_mvm *mvm); +int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm); +int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id); +int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +#ifdef CONFIG_IWLWIFI_DEBUGFS +int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +#endif +#endif /* __tof_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c index 80d07db6e..fe7145c2c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/iwlwifi/mvm/tt.c @@ -33,6 +33,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -154,24 +155,20 @@ static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait, return true; } -int iwl_mvm_temp_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); int temp; /* the notification is handled synchronously in ctkill, so skip here */ if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) - return 0; + return; temp = iwl_mvm_temp_notif_parse(mvm, pkt); if (temp < 0) - return 0; + return; iwl_mvm_tt_temp_changed(mvm, temp); - - return 0; } static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) @@ -187,7 +184,7 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) int iwl_mvm_get_temp(struct iwl_mvm *mvm) { struct iwl_notification_wait wait_temp_notif; - static const u8 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION }; + static const u16 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION }; int ret, temp; lockdep_assert_held(&mvm->mutex); diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 89116864d..6df5aada4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -153,18 +153,20 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, if (ieee80211_is_mgmt(fc)) { if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) - tx_cmd->pm_frame_timeout = cpu_to_le16(3); + tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC); + else if (ieee80211_is_action(fc)) + tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); else - tx_cmd->pm_frame_timeout = cpu_to_le16(2); + tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); /* The spec allows Action frames in A-MPDU, we don't support * it */ WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU); } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { - tx_cmd->pm_frame_timeout = cpu_to_le16(2); + tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); } else { - tx_cmd->pm_frame_timeout = 0; + tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); } if (ieee80211_is_data(fc) && len > mvm->rts_threshold && @@ -268,19 +270,29 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, /* * Sets the fields in the Tx cmd that are crypto related */ -void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, - struct ieee80211_tx_info *info, - struct iwl_tx_cmd *tx_cmd, - struct sk_buff *skb_frag) +static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + struct iwl_tx_cmd *tx_cmd, + struct sk_buff *skb_frag, + int hdrlen) { struct ieee80211_key_conf *keyconf = info->control.hw_key; + u8 *crypto_hdr = skb_frag->data + hdrlen; + u64 pn; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_CCMP: - tx_cmd->sec_ctl = TX_CMD_SEC_CCM; - memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); - if (info->flags & IEEE80211_TX_CTL_AMPDU) - tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG); + case WLAN_CIPHER_SUITE_CCMP_256: + iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); + pn = atomic64_inc_return(&keyconf->tx_pn); + crypto_hdr[0] = pn; + crypto_hdr[2] = 0; + crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6); + crypto_hdr[1] = pn >> 8; + crypto_hdr[4] = pn >> 16; + crypto_hdr[5] = pn >> 24; + crypto_hdr[6] = pn >> 32; + crypto_hdr[7] = pn >> 40; break; case WLAN_CIPHER_SUITE_TKIP: @@ -308,7 +320,7 @@ void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, */ static struct iwl_device_cmd * iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_sta *sta, u8 sta_id) + int hdrlen, struct ieee80211_sta *sta, u8 sta_id) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -325,7 +337,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; if (info->control.hw_key) - iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb); + iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen); iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id); @@ -346,6 +358,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) struct iwl_device_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; u8 sta_id; + int hdrlen = ieee80211_hdrlen(hdr->frame_control); if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU)) return -1; @@ -366,23 +379,34 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; /* - * If the interface on which frame is sent is the P2P_DEVICE + * If the interface on which the frame is sent is the P2P_DEVICE * or an AP/GO interface use the broadcast station associated - * with it; otherwise use the AUX station. + * with it; otherwise if the interface is a managed interface + * use the AP station associated with it for multicast traffic + * (this is not possible for unicast packets as a TLDS discovery + * response are sent without a station entry); otherwise use the + * AUX station. */ - if (info->control.vif && - (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE || - info->control.vif->type == NL80211_IFTYPE_AP)) { + sta_id = mvm->aux_sta.sta_id; + if (info->control.vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); - sta_id = mvmvif->bcast_sta.sta_id; - } else { - sta_id = mvm->aux_sta.sta_id; + + if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE || + info->control.vif->type == NL80211_IFTYPE_AP) + sta_id = mvmvif->bcast_sta.sta_id; + else if (info->control.vif->type == NL80211_IFTYPE_STATION && + is_multicast_ether_addr(hdr->addr1)) { + u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); + + if (ap_sta_id != IWL_MVM_STATION_COUNT) + sta_id = ap_sta_id; + } } IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue); - dev_cmd = iwl_mvm_set_tx_params(mvm, skb, NULL, sta_id); + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id); if (!dev_cmd) return -1; @@ -390,7 +414,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control)); + memcpy(tx_cmd->hdr, hdr, hdrlen); if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); @@ -416,9 +440,11 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, u8 tid = IWL_MAX_TID_COUNT; u8 txq_id = info->hw_queue; bool is_data_qos = false, is_ampdu = false; + int hdrlen; mvmsta = iwl_mvm_sta_from_mac80211(sta); fc = hdr->frame_control; + hdrlen = ieee80211_hdrlen(fc); if (WARN_ON_ONCE(!mvmsta)) return -1; @@ -426,7 +452,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) return -1; - dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id); + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id); if (!dev_cmd) goto drop; @@ -458,7 +484,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, } /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(fc)); + memcpy(tx_cmd->hdr, hdr, hdrlen); WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); @@ -911,8 +937,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, rcu_read_unlock(); } -int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; @@ -921,8 +946,6 @@ int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, iwl_mvm_rx_tx_cmd_single(mvm, pkt); else iwl_mvm_rx_tx_cmd_agg(mvm, pkt); - - return 0; } static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info, @@ -942,8 +965,7 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info, (void *)(uintptr_t)tid_data->rate_n_flags; } -int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data; @@ -965,7 +987,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT || tid >= IWL_MAX_TID_COUNT, "sta_id %d tid %d", sta_id, tid)) - return 0; + return; rcu_read_lock(); @@ -974,7 +996,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, /* Reclaiming frames for a station that has been deleted ? */ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); - return 0; + return; } mvmsta = iwl_mvm_sta_from_mac80211(sta); @@ -985,7 +1007,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, "invalid BA notification: Q %d, tid %d, flow %d\n", tid_data->txq_id, tid, scd_flow); rcu_read_unlock(); - return 0; + return; } spin_lock_bh(&mvmsta->lock); @@ -1072,8 +1094,6 @@ out: skb = __skb_dequeue(&reclaimed_skbs); ieee80211_tx_status(mvm->hw, skb); } - - return 0; } /* diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index 03f8e06dd..a7d434256 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -108,7 +108,7 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd) return ret; } -int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id, +int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, u32 flags, u16 len, const void *data) { struct iwl_host_cmd cmd = { @@ -166,11 +166,6 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, goto out_free_resp; } - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - ret = -EIO; - goto out_free_resp; - } - resp_len = iwl_rx_packet_payload_len(pkt); if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { ret = -EIO; @@ -187,7 +182,7 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, /* * We assume that the caller set the status to the sucess value */ -int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, u16 len, +int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, const void *data, u32 *status) { struct iwl_host_cmd cmd = { @@ -243,8 +238,7 @@ u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx) return fw_rate_idx_to_plcp[rate_idx]; } -int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_error_resp *err_resp = (void *)pkt->data; @@ -256,7 +250,6 @@ int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, le32_to_cpu(err_resp->error_service)); IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n", le64_to_cpu(err_resp->timestamp)); - return 0; } /* diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 9f65c1cff..639761fb2 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -414,18 +414,30 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)}, /* 8000 Series */ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, @@ -433,18 +445,28 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)}, {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, #endif /* CONFIG_IWLMVM */ {0} @@ -614,6 +636,7 @@ static int iwl_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct iwl_trans *trans = pci_get_drvdata(pdev); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; /* Before you put code here, think about WoWLAN. You cannot check here @@ -631,20 +654,16 @@ static int iwl_pci_resume(struct device *device) return 0; /* - * On suspend, ict is disabled, and the interrupt mask - * gets cleared. Reconfigure them both in case of d0i3 - * image. Otherwise, only enable rfkill interrupt (in - * order to keep track of the rfkill status) + * Enable rfkill interrupt (in order to keep track of + * the rfkill status) */ - if (trans->wowlan_d0i3) { - iwl_pcie_reset_ict(trans); - iwl_enable_interrupts(trans); - } else { - iwl_enable_rfkill_int(trans); - } + iwl_enable_rfkill_int(trans); hw_rfkill = iwl_is_rfkill_set(trans); + + mutex_lock(&trans_pcie->mutex); iwl_trans_pcie_rf_kill(trans, hw_rfkill); + mutex_unlock(&trans_pcie->mutex); return 0; } diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 376b84e54..feb2f7e81 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -44,6 +44,21 @@ #include "iwl-io.h" #include "iwl-op-mode.h" +/* We need 2 entries for the TX command and header, and another one might + * be needed for potential data in the SKB's head. The remaining ones can + * be used for frags. + */ +#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3) + +/* + * RX related structures and functions + */ +#define RX_NUM_QUEUES 1 +#define RX_POST_REQ_ALLOC 2 +#define RX_CLAIM_REQ_ALLOC 8 +#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) +#define RX_LOW_WATERMARK 8 + struct iwl_host_cmd; /*This file includes the declaration that are internal to the @@ -77,29 +92,29 @@ struct isr_statistics { * struct iwl_rxq - Rx queue * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) - * @pool: - * @queue: * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @free_count: Number of pre-allocated buffers in rx_free + * @used_count: Number of RBDs handled to allocator to use for allocation * @write_actual: - * @rx_free: list of free SKBs for use - * @rx_used: List of Rx buffers with no SKB + * @rx_free: list of RBDs with allocated RB ready for use + * @rx_used: list of RBDs with no RB attached * @need_update: flag to indicate we need to update read/write index * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status * @lock: + * @pool: initial pool of iwl_rx_mem_buffer for the queue + * @queue: actual rx queue * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ struct iwl_rxq { __le32 *bd; dma_addr_t bd_dma; - struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; - struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; u32 read; u32 write; u32 free_count; + u32 used_count; u32 write_actual; struct list_head rx_free; struct list_head rx_used; @@ -107,6 +122,32 @@ struct iwl_rxq { struct iwl_rb_status *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; + struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; +}; + +/** + * struct iwl_rb_allocator - Rx allocator + * @pool: initial pool of allocator + * @req_pending: number of requests the allcator had not processed yet + * @req_ready: number of requests honored and ready for claiming + * @rbd_allocated: RBDs with pages allocated and ready to be handled to + * the queue. This is a list of &struct iwl_rx_mem_buffer + * @rbd_empty: RBDs with no page attached for allocator use. This is a list + * of &struct iwl_rx_mem_buffer + * @lock: protects the rbd_allocated and rbd_empty lists + * @alloc_wq: work queue for background calls + * @rx_alloc: work struct for background calls + */ +struct iwl_rb_allocator { + struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; + atomic_t req_pending; + atomic_t req_ready; + struct list_head rbd_allocated; + struct list_head rbd_empty; + spinlock_t lock; + struct workqueue_struct *alloc_wq; + struct work_struct rx_alloc; }; struct iwl_dma_ptr { @@ -250,7 +291,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data - * @rx_replenish: work that will be called when buffers need to be allocated + * @rba: allocator for RX replenishing * @drv - pointer to iwl_drv * @trans: pointer to the generic transport area * @scd_base_addr: scheduler sram base address in SRAM @@ -264,8 +305,10 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) * @rx_buf_size_8k: 8 kB RX buffer size * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) * @scd_set_active: should the transport configure the SCD for HCMD queue + * @wide_cmd_header: true when ucode supports wide command header format * @rx_page_order: page order for receive buffer size * @reg_lock: protect hw register access + * @mutex: to protect stop_device / start_fw / start_hw * @cmd_in_flight: true when we have a host command in flight * @fw_mon_phys: physical address of the buffer for the firmware monitor * @fw_mon_page: points to the first page of the buffer for the firmware monitor @@ -273,7 +316,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) */ struct iwl_trans_pcie { struct iwl_rxq rxq; - struct work_struct rx_replenish; + struct iwl_rb_allocator rba; struct iwl_trans *trans; struct iwl_drv *drv; @@ -285,9 +328,11 @@ struct iwl_trans_pcie { dma_addr_t ict_tbl_dma; int ict_index; bool use_ict; + bool is_down; struct isr_statistics isr_stats; spinlock_t irq_lock; + struct mutex mutex; u32 inta_mask; u32 scd_base_addr; struct iwl_dma_ptr scd_bc_tbls; @@ -314,6 +359,7 @@ struct iwl_trans_pcie { bool rx_buf_size_8k; bool bc_table_dword; bool scd_set_active; + bool wide_cmd_header; u32 rx_page_order; const char *const *command_names; @@ -385,7 +431,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_pcie_hcmd_complete(struct iwl_trans *trans, - struct iwl_rx_cmd_buffer *rxb, int handler_status); + struct iwl_rx_cmd_buffer *rxb); void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs); void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index adad8d0fa..e06591f62 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -1,7 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -74,16 +74,29 @@ * resets the Rx queue buffers with new memory. * * The management in the driver is as follows: - * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When - * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled - * to replenish the iwl->rxq->rx_free. - * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the - * iwl->rxq is replenished and the READ INDEX is updated (updating the - * 'processed' and 'read' driver indexes as well) + * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. + * When the interrupt handler is called, the request is processed. + * The page is either stolen - transferred to the upper layer + * or reused - added immediately to the iwl->rxq->rx_free list. + * + When the page is stolen - the driver updates the matching queue's used + * count, detaches the RBD and transfers it to the queue used list. + * When there are two used RBDs - they are transferred to the allocator empty + * list. Work is then scheduled for the allocator to start allocating + * eight buffers. + * When there are another 6 used RBDs - they are transferred to the allocator + * empty list and the driver tries to claim the pre-allocated buffers and + * add them to iwl->rxq->rx_free. If it fails - it continues to claim them + * until ready. + * When there are 8+ buffers in the free list - either from allocation or from + * 8 reused unstolen pages - restock is called to update the FW and indexes. + * + In order to make sure the allocator always has RBDs to use for allocation + * the allocator has initial pool in the size of num_queues*(8-2) - the + * maximum missing RBDs per allocation request (request posted with 2 + * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). + * The queues supplies the recycle of the rest of the RBDs. * + A received packet is processed and handed to the kernel network stack, * detached from the iwl->rxq. The driver 'processed' index is updated. - * + The Host/Firmware iwl->rxq is replenished at irq thread time from the - * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free, + * + If there are no allocated buffers in iwl->rxq->rx_free, * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. * If there were enough free buffers and RX_STALLED is set it is cleared. * @@ -92,18 +105,32 @@ * * iwl_rxq_alloc() Allocates rx_free * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls - * iwl_pcie_rxq_restock + * iwl_pcie_rxq_restock. + * Used only during initialization. * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx * queue, updates firmware pointers, and updates - * the WRITE index. If insufficient rx_free buffers - * are available, schedules iwl_pcie_rx_replenish + * the WRITE index. + * iwl_pcie_rx_allocator() Background work for allocating pages. * * -- enable interrupts -- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the * READ INDEX, detaching the SKB from the pool. * Moves the packet buffer from queue to rx_used. + * Posts and claims requests to the allocator. * Calls iwl_pcie_rxq_restock to refill any empty * slots. + * + * RBD life-cycle: + * + * Init: + * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue + * + * Regular Receive interrupt: + * Page Stolen: + * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> + * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue + * Page not Stolen: + * rxq.queue -> rxq.rx_free -> rxq.queue * ... * */ @@ -240,10 +267,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) rxq->free_count--; } spin_unlock(&rxq->lock); - /* If the pre-allocated buffer pool is dropping low, schedule to - * refill it */ - if (rxq->free_count <= RX_LOW_WATERMARK) - schedule_work(&trans_pcie->rx_replenish); /* If we've added more space for the firmware to place data, tell it. * Increment device's write pointer in multiples of 8. */ @@ -254,6 +277,45 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) } } +/* + * iwl_pcie_rx_alloc_page - allocates and returns a page. + * + */ +static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, + gfp_t priority) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + struct page *page; + gfp_t gfp_mask = priority; + + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (trans_pcie->rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", + trans_pcie->rx_page_order); + /* Issue an error if the hardware has consumed more than half + * of its free buffer list and we don't have enough + * pre-allocated buffers. +` */ + if (rxq->free_count <= RX_LOW_WATERMARK && + iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) && + net_ratelimit()) + IWL_CRIT(trans, + "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n", + rxq->free_count); + return NULL; + } + return page; +} + /* * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD * @@ -269,7 +331,6 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; struct page *page; - gfp_t gfp_mask = priority; while (1) { spin_lock(&rxq->lock); @@ -279,32 +340,10 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) } spin_unlock(&rxq->lock); - if (rxq->free_count > RX_LOW_WATERMARK) - gfp_mask |= __GFP_NOWARN; - - if (trans_pcie->rx_page_order > 0) - gfp_mask |= __GFP_COMP; - /* Alloc a new receive buffer */ - page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); - if (!page) { - if (net_ratelimit()) - IWL_DEBUG_INFO(trans, "alloc_pages failed, " - "order: %d\n", - trans_pcie->rx_page_order); - - if ((rxq->free_count <= RX_LOW_WATERMARK) && - net_ratelimit()) - IWL_CRIT(trans, "Failed to alloc_pages with %s." - "Only %u free buffers remaining.\n", - priority == GFP_ATOMIC ? - "GFP_ATOMIC" : "GFP_KERNEL", - rxq->free_count); - /* We don't reschedule replenish work here -- we will - * call the restock method and if it still needs - * more buffers it will schedule replenish */ + page = iwl_pcie_rx_alloc_page(trans, priority); + if (!page) return; - } spin_lock(&rxq->lock); @@ -355,7 +394,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) lockdep_assert_held(&rxq->lock); - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + for (i = 0; i < RX_QUEUE_SIZE; i++) { if (!rxq->pool[i].page) continue; dma_unmap_page(trans->dev, rxq->pool[i].page_dma, @@ -372,32 +411,164 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) * When moving to rx_free an page is allocated for the slot. * * Also restock the Rx queue via iwl_pcie_rxq_restock. - * This is called as a scheduled work item (except for during initialization) + * This is called only during initialization */ -static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp) +static void iwl_pcie_rx_replenish(struct iwl_trans *trans) { - iwl_pcie_rxq_alloc_rbs(trans, gfp); + iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL); iwl_pcie_rxq_restock(trans); } -static void iwl_pcie_rx_replenish_work(struct work_struct *data) +/* + * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues + * + * Allocates for each received request 8 pages + * Called as a scheduled work item. + */ +static void iwl_pcie_rx_allocator(struct iwl_trans *trans) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + struct list_head local_empty; + int pending = atomic_xchg(&rba->req_pending, 0); + + IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending); + + /* If we were scheduled - there is at least one request */ + spin_lock(&rba->lock); + /* swap out the rba->rbd_empty to a local list */ + list_replace_init(&rba->rbd_empty, &local_empty); + spin_unlock(&rba->lock); + + while (pending) { + int i; + struct list_head local_allocated; + + INIT_LIST_HEAD(&local_allocated); + + for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { + struct iwl_rx_mem_buffer *rxb; + struct page *page; + + /* List should never be empty - each reused RBD is + * returned to the list, and initial pool covers any + * possible gap between the time the page is allocated + * to the time the RBD is added. + */ + BUG_ON(list_empty(&local_empty)); + /* Get the first rxb from the rbd list */ + rxb = list_first_entry(&local_empty, + struct iwl_rx_mem_buffer, list); + BUG_ON(rxb->page); + + /* Alloc a new receive buffer */ + page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL); + if (!page) + continue; + rxb->page = page; + + /* Get physical address of the RB */ + rxb->page_dma = dma_map_page(trans->dev, page, 0, + PAGE_SIZE << trans_pcie->rx_page_order, + DMA_FROM_DEVICE); + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + rxb->page = NULL; + __free_pages(page, trans_pcie->rx_page_order); + continue; + } + /* dma address must be no more than 36 bits */ + BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); + /* and also 256 byte aligned! */ + BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); + + /* move the allocated entry to the out list */ + list_move(&rxb->list, &local_allocated); + i++; + } + + pending--; + if (!pending) { + pending = atomic_xchg(&rba->req_pending, 0); + IWL_DEBUG_RX(trans, + "Pending allocation requests = %d\n", + pending); + } + + spin_lock(&rba->lock); + /* add the allocated rbds to the allocator allocated list */ + list_splice_tail(&local_allocated, &rba->rbd_allocated); + /* get more empty RBDs for current pending requests */ + list_splice_tail_init(&rba->rbd_empty, &local_empty); + spin_unlock(&rba->lock); + + atomic_inc(&rba->req_ready); + } + + spin_lock(&rba->lock); + /* return unused rbds to the allocator empty list */ + list_splice_tail(&local_empty, &rba->rbd_empty); + spin_unlock(&rba->lock); +} + +/* + * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages +.* +.* Called by queue when the queue posted allocation request and + * has freed 8 RBDs in order to restock itself. + */ +static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, + struct iwl_rx_mem_buffer + *out[RX_CLAIM_REQ_ALLOC]) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i; + + /* + * atomic_dec_if_positive returns req_ready - 1 for any scenario. + * If req_ready is 0 atomic_dec_if_positive will return -1 and this + * function will return -ENOMEM, as there are no ready requests. + * atomic_dec_if_positive will perofrm the *actual* decrement only if + * req_ready > 0, i.e. - there are ready requests and the function + * hands one request to the caller. + */ + if (atomic_dec_if_positive(&rba->req_ready) < 0) + return -ENOMEM; + + spin_lock(&rba->lock); + for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { + /* Get next free Rx buffer, remove it from free list */ + out[i] = list_first_entry(&rba->rbd_allocated, + struct iwl_rx_mem_buffer, list); + list_del(&out[i]->list); + } + spin_unlock(&rba->lock); + + return 0; +} + +static void iwl_pcie_rx_allocator_work(struct work_struct *data) +{ + struct iwl_rb_allocator *rba_p = + container_of(data, struct iwl_rb_allocator, rx_alloc); struct iwl_trans_pcie *trans_pcie = - container_of(data, struct iwl_trans_pcie, rx_replenish); + container_of(rba_p, struct iwl_trans_pcie, rba); - iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL); + iwl_pcie_rx_allocator(trans_pcie->trans); } static int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; struct device *dev = trans->dev; memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); spin_lock_init(&rxq->lock); + spin_lock_init(&rba->lock); if (WARN_ON(rxq->bd || rxq->rb_stts)) return -EINVAL; @@ -487,15 +658,49 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); rxq->free_count = 0; + rxq->used_count = 0; - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) + for (i = 0; i < RX_QUEUE_SIZE; i++) list_add(&rxq->pool[i].list, &rxq->rx_used); } +static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) +{ + int i; + + lockdep_assert_held(&rba->lock); + + INIT_LIST_HEAD(&rba->rbd_allocated); + INIT_LIST_HEAD(&rba->rbd_empty); + + for (i = 0; i < RX_POOL_SIZE; i++) + list_add(&rba->pool[i].list, &rba->rbd_empty); +} + +static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i; + + lockdep_assert_held(&rba->lock); + + for (i = 0; i < RX_POOL_SIZE; i++) { + if (!rba->pool[i].page) + continue; + dma_unmap_page(trans->dev, rba->pool[i].page_dma, + PAGE_SIZE << trans_pcie->rx_page_order, + DMA_FROM_DEVICE); + __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); + rba->pool[i].page = NULL; + } +} + int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, err; if (!rxq->bd) { @@ -503,11 +708,21 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) if (err) return err; } + if (!rba->alloc_wq) + rba->alloc_wq = alloc_workqueue("rb_allocator", + WQ_HIGHPRI | WQ_UNBOUND, 1); + INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); + + spin_lock(&rba->lock); + atomic_set(&rba->req_pending, 0); + atomic_set(&rba->req_ready, 0); + /* free all first - we might be reconfigured for a different size */ + iwl_pcie_rx_free_rba(trans); + iwl_pcie_rx_init_rba(rba); + spin_unlock(&rba->lock); spin_lock(&rxq->lock); - INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work); - /* free all first - we might be reconfigured for a different size */ iwl_pcie_rxq_free_rbs(trans); iwl_pcie_rx_init_rxb_lists(rxq); @@ -522,7 +737,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); spin_unlock(&rxq->lock); - iwl_pcie_rx_replenish(trans, GFP_KERNEL); + iwl_pcie_rx_replenish(trans); iwl_pcie_rx_hw_init(trans, rxq); @@ -537,6 +752,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; /*if rxq->bd is NULL, it means that nothing has been allocated, * exit now */ @@ -545,7 +761,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) return; } - cancel_work_sync(&trans_pcie->rx_replenish); + cancel_work_sync(&rba->rx_alloc); + if (rba->alloc_wq) { + destroy_workqueue(rba->alloc_wq); + rba->alloc_wq = NULL; + } + + spin_lock(&rba->lock); + iwl_pcie_rx_free_rba(trans); + spin_unlock(&rba->lock); spin_lock(&rxq->lock); iwl_pcie_rxq_free_rbs(trans); @@ -566,8 +790,49 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) rxq->rb_stts = NULL; } +/* + * iwl_pcie_rx_reuse_rbd - Recycle used RBDs + * + * Called when a RBD can be reused. The RBD is transferred to the allocator. + * When there are 2 empty RBDs - a request for allocation is posted + */ +static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, + struct iwl_rx_mem_buffer *rxb, + struct iwl_rxq *rxq, bool emergency) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + + /* Move the RBD to the used list, will be moved to allocator in batches + * before claiming or posting a request*/ + list_add_tail(&rxb->list, &rxq->rx_used); + + if (unlikely(emergency)) + return; + + /* Count the allocator owned RBDs */ + rxq->used_count++; + + /* If we have RX_POST_REQ_ALLOC new released rx buffers - + * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is + * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, + * after but we still need to post another request. + */ + if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { + /* Move the 2 RBDs to the allocator ownership. + Allocator has another 6 from pool for the request completion*/ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); + spin_unlock(&rba->lock); + + atomic_inc(&rba->req_pending); + queue_work(rba->alloc_wq, &rba->rx_alloc); + } +} + static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, - struct iwl_rx_mem_buffer *rxb) + struct iwl_rx_mem_buffer *rxb, + bool emergency) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; @@ -583,10 +848,9 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { struct iwl_rx_packet *pkt; - struct iwl_device_cmd *cmd; u16 sequence; bool reclaim; - int index, cmd_index, err, len; + int index, cmd_index, len; struct iwl_rx_cmd_buffer rxcb = { ._offset = offset, ._rx_page_order = trans_pcie->rx_page_order, @@ -634,12 +898,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, index = SEQ_TO_INDEX(sequence); cmd_index = get_cmd_index(&txq->q, index); - if (reclaim) - cmd = txq->entries[cmd_index].cmd; - else - cmd = NULL; - - err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); + iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb); if (reclaim) { kzfree(txq->entries[cmd_index].free_buf); @@ -657,7 +916,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, * iwl_trans_send_cmd() * as we reclaim the driver command queue */ if (!rxcb._page_stolen) - iwl_pcie_hcmd_complete(trans, &rxcb, err); + iwl_pcie_hcmd_complete(trans, &rxcb); else IWL_WARN(trans, "Claim null rxb?\n"); } @@ -688,13 +947,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, */ __free_pages(rxb->page, trans_pcie->rx_page_order); rxb->page = NULL; - list_add_tail(&rxb->list, &rxq->rx_used); + iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); } else { list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } } else - list_add_tail(&rxb->list, &rxq->rx_used); + iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); } /* @@ -704,10 +963,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - u32 r, i; - u8 fill_rx = 0; - u32 count = 8; - int total_empty; + u32 r, i, j, count = 0; + bool emergency = false; restart: spin_lock(&rxq->lock); @@ -720,47 +977,95 @@ restart: if (i == r) IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); - /* calculate total frames need to be restock after handling RX */ - total_empty = r - rxq->write_actual; - if (total_empty < 0) - total_empty += RX_QUEUE_SIZE; - - if (total_empty > (RX_QUEUE_SIZE / 2)) - fill_rx = 1; - while (i != r) { struct iwl_rx_mem_buffer *rxb; + if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2)) + emergency = true; + rxb = rxq->queue[i]; rxq->queue[i] = NULL; IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", r, i, rxb); - iwl_pcie_rx_handle_rb(trans, rxb); + iwl_pcie_rx_handle_rb(trans, rxb, emergency); i = (i + 1) & RX_QUEUE_MASK; - /* If there are a lot of unused frames, - * restock the Rx queue so ucode wont assert. */ - if (fill_rx) { + + /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - + * try to claim the pre-allocated buffers from the allocator */ + if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { + struct iwl_rb_allocator *rba = &trans_pcie->rba; + struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; + + if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && + !emergency) { + /* Add the remaining 6 empty RBDs + * for allocator use + */ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, + &rba->rbd_empty); + spin_unlock(&rba->lock); + } + + /* If not ready - continue, will try to reclaim later. + * No need to reschedule work - allocator exits only on + * success */ + if (!iwl_pcie_rx_allocator_get(trans, out)) { + /* If success - then RX_CLAIM_REQ_ALLOC + * buffers were retrieved and should be added + * to free list */ + rxq->used_count -= RX_CLAIM_REQ_ALLOC; + for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { + list_add_tail(&out[j]->list, + &rxq->rx_free); + rxq->free_count++; + } + } + } + if (emergency) { count++; - if (count >= 8) { - rxq->read = i; - spin_unlock(&rxq->lock); - iwl_pcie_rx_replenish(trans, GFP_ATOMIC); + if (count == 8) { count = 0; - goto restart; + if (rxq->used_count < RX_QUEUE_SIZE / 3) + emergency = false; + spin_unlock(&rxq->lock); + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); + spin_lock(&rxq->lock); } } + /* handle restock for three cases, can be all of them at once: + * - we just pulled buffers from the allocator + * - we have 8+ unstolen pages accumulated + * - we are in emergency and allocated buffers + */ + if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { + rxq->read = i; + spin_unlock(&rxq->lock); + iwl_pcie_rxq_restock(trans); + goto restart; + } } /* Backtrack one entry */ rxq->read = i; spin_unlock(&rxq->lock); - if (fill_rx) - iwl_pcie_rx_replenish(trans, GFP_ATOMIC); - else - iwl_pcie_rxq_restock(trans); + /* + * handle a case where in emergency there are some unallocated RBDs. + * those RBDs are in the used list, but are not tracked by the queue's + * used_count which counts allocator owned RBDs. + * unallocated emergency RBDs must be allocated on exit, otherwise + * when called again the function may not be in emergency mode and + * they will be handed to the allocator with no tracking in the RBD + * allocator counters, which will lead to them never being claimed back + * by the queue. + * by allocating them here, they are now in the queue free list, and + * will be restocked by the next call of iwl_pcie_rxq_restock. + */ + if (unlikely(emergency && count)) + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); if (trans_pcie->napi.poll) napi_gro_flush(&trans_pcie->napi, false); @@ -772,6 +1077,7 @@ restart: static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ if (trans->cfg->internal_wimax_coex && @@ -795,6 +1101,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) iwl_trans_fw_error(trans); local_bh_enable(); + for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) + del_timer(&trans_pcie->txq[i].stuck_timer); + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); wake_up(&trans_pcie->wait_command_queue); } @@ -1003,7 +1312,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) isr_stats->rfkill++; + mutex_lock(&trans_pcie->mutex); iwl_trans_pcie_rf_kill(trans, hw_rfkill); + mutex_unlock(&trans_pcie->mutex); if (hw_rfkill) { set_bit(STATUS_RFKILL, &trans->status); if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, @@ -1195,8 +1506,9 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans) val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; - val |= CSR_DRAM_INT_TBL_ENABLE; - val |= CSR_DRAM_INIT_TBL_WRAP_CHECK; + val |= CSR_DRAM_INT_TBL_ENABLE | + CSR_DRAM_INIT_TBL_WRAP_CHECK | + CSR_DRAM_INIT_TBL_WRITE_POINTER; IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 9e144e71d..902834530 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -592,10 +592,8 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) do { ret = iwl_pcie_set_hw_ready(trans); - if (ret >= 0) { - ret = 0; - goto out; - } + if (ret >= 0) + return 0; usleep_range(200, 1000); t += 200; @@ -605,10 +603,6 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) IWL_ERR(trans, "Couldn't prepare the card\n"); -out: - iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, - CSR_RESET_LINK_PWR_MGMT_DISABLED); - return ret; } @@ -780,8 +774,15 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { last_read_idx = i; + /* + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between + * CPU1 to CPU2. + * PAGING_SEPARATOR_SECTION delimiter - separate between + * CPU2 non paged to CPU2 paging sec. + */ if (!image->sec[i].data || - image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) { + image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || + image->sec[i].offset == PAGING_SEPARATOR_SECTION) { IWL_DEBUG_FW(trans, "Break since Data not valid or Empty section, sec = %d\n", i); @@ -829,8 +830,15 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { last_read_idx = i; + /* + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between + * CPU1 to CPU2. + * PAGING_SEPARATOR_SECTION delimiter - separate between + * CPU2 non paged to CPU2 paging sec. + */ if (!image->sec[i].data || - image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) { + image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || + image->sec[i].offset == PAGING_SEPARATOR_SECTION) { IWL_DEBUG_FW(trans, "Break since Data not valid or Empty section, sec = %d\n", i); @@ -897,6 +905,14 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans) case PRPH_CLEARBIT: iwl_clear_bits_prph(trans, addr, BIT(val)); break; + case PRPH_BLOCKBIT: + if (iwl_read_prph(trans, addr) & BIT(val)) { + IWL_ERR(trans, + "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", + val, addr); + goto monitor; + } + break; default: IWL_ERR(trans, "FW debug - unknown OP %d\n", dest->reg_ops[i].op); @@ -904,6 +920,7 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans) } } +monitor: if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) { iwl_write_prph(trans, le32_to_cpu(dest->base_reg), trans_pcie->fw_mon_phys >> dest->base_shift); @@ -998,13 +1015,25 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill) { - int ret; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; + int ret; + + mutex_lock(&trans_pcie->mutex); + + /* Someone called stop_device, don't try to start_fw */ + if (trans_pcie->is_down) { + IWL_WARN(trans, + "Can't start_fw since the HW hasn't been started\n"); + ret = EIO; + goto out; + } /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); - return -EIO; + ret = -EIO; + goto out; } iwl_enable_rfkill_int(trans); @@ -1016,15 +1045,17 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, else clear_bit(STATUS_RFKILL, &trans->status); iwl_trans_pcie_rf_kill(trans, hw_rfkill); - if (hw_rfkill && !run_in_rfkill) - return -ERFKILL; + if (hw_rfkill && !run_in_rfkill) { + ret = -ERFKILL; + goto out; + } iwl_write32(trans, CSR_INT, 0xFFFFFFFF); ret = iwl_pcie_nic_init(trans); if (ret) { IWL_ERR(trans, "Unable to init nic\n"); - return ret; + goto out; } /* make sure rfkill handshake bits are cleared */ @@ -1042,9 +1073,13 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, /* Load the given image to the HW */ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) - return iwl_pcie_load_given_ucode_8000(trans, fw); + ret = iwl_pcie_load_given_ucode_8000(trans, fw); else - return iwl_pcie_load_given_ucode(trans, fw); + ret = iwl_pcie_load_given_ucode(trans, fw); + +out: + mutex_unlock(&trans_pcie->mutex); + return ret; } static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) @@ -1053,11 +1088,18 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) iwl_pcie_tx_start(trans, scd_addr); } -static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) +static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill, was_hw_rfkill; + lockdep_assert_held(&trans_pcie->mutex); + + if (trans_pcie->is_down) + return; + + trans_pcie->is_down = true; + was_hw_rfkill = iwl_is_rfkill_set(trans); /* tell the device to stop sending interrupts */ @@ -1147,14 +1189,36 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) iwl_pcie_prepare_card_hw(trans); } +static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + mutex_lock(&trans_pcie->mutex); + _iwl_trans_pcie_stop_device(trans, low_power); + mutex_unlock(&trans_pcie->mutex); +} + void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) { + struct iwl_trans_pcie __maybe_unused *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(trans); + + lockdep_assert_held(&trans_pcie->mutex); + if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) - iwl_trans_pcie_stop_device(trans, true); + _iwl_trans_pcie_stop_device(trans, true); } static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (trans->wowlan_d0i3) { + /* Enable persistence mode to avoid reset */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PERSIST_MODE); + } + iwl_disable_interrupts(trans); /* @@ -1166,17 +1230,21 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) iwl_pcie_disable_ict(trans); + synchronize_irq(trans_pcie->pci_dev->irq); + iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - /* - * reset TX queues -- some of their registers reset during S3 - * so if we don't reset everything here the D3 image would try - * to execute some invalid memory upon resume - */ - iwl_trans_pcie_tx_reset(trans); + if (!trans->wowlan_d0i3) { + /* + * reset TX queues -- some of their registers reset during S3 + * so if we don't reset everything here the D3 image would try + * to execute some invalid memory upon resume + */ + iwl_trans_pcie_tx_reset(trans); + } iwl_pcie_set_pwr(trans, true); } @@ -1218,12 +1286,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, iwl_pcie_set_pwr(trans, false); - iwl_trans_pcie_tx_reset(trans); + if (trans->wowlan_d0i3) { + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + } else { + iwl_trans_pcie_tx_reset(trans); - ret = iwl_pcie_rx_init(trans); - if (ret) { - IWL_ERR(trans, "Failed to resume the device (RX reset)\n"); - return ret; + ret = iwl_pcie_rx_init(trans); + if (ret) { + IWL_ERR(trans, + "Failed to resume the device (RX reset)\n"); + return ret; + } } val = iwl_read32(trans, CSR_RESET); @@ -1235,11 +1309,14 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return 0; } -static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) +static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; int err; + lockdep_assert_held(&trans_pcie->mutex); + err = iwl_pcie_prepare_card_hw(trans); if (err) { IWL_ERR(trans, "Error while preparing HW: %d\n", err); @@ -1256,20 +1333,38 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) /* From now on, the op_mode will be kept updated about RF kill state */ iwl_enable_rfkill_int(trans); + /* Set is_down to false here so that...*/ + trans_pcie->is_down = false; + hw_rfkill = iwl_is_rfkill_set(trans); if (hw_rfkill) set_bit(STATUS_RFKILL, &trans->status); else clear_bit(STATUS_RFKILL, &trans->status); + /* ... rfkill can call stop_device and set it false if needed */ iwl_trans_pcie_rf_kill(trans, hw_rfkill); return 0; } +static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + mutex_lock(&trans_pcie->mutex); + ret = _iwl_trans_pcie_start_hw(trans, low_power); + mutex_unlock(&trans_pcie->mutex); + + return ret; +} + static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + mutex_lock(&trans_pcie->mutex); + /* disable interrupts - don't enable HW RF kill interrupt */ spin_lock(&trans_pcie->irq_lock); iwl_disable_interrupts(trans); @@ -1282,6 +1377,10 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) spin_unlock(&trans_pcie->irq_lock); iwl_pcie_disable_ict(trans); + + mutex_unlock(&trans_pcie->mutex); + + synchronize_irq(trans_pcie->pci_dev->irq); } static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) @@ -1342,6 +1441,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, else trans_pcie->rx_page_order = get_order(4 * 1024); + trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header; trans_pcie->command_names = trans_cfg->command_names; trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; trans_pcie->scd_set_active = trans_cfg->scd_set_active; @@ -1354,11 +1454,10 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, * As this function may be called again in some corner cases don't * do anything if NAPI was already initialized. */ - if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) { + if (!trans_pcie->napi.poll) { init_dummy_netdev(&trans_pcie->napi_dev); - iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi, - &trans_pcie->napi_dev, - iwl_pcie_dummy_napi_poll, 64); + netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi, + iwl_pcie_dummy_napi_poll, 64); } } @@ -2185,6 +2284,47 @@ static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans, return prph_len; } +static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data, + int allocated_rb_nums) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int max_len = PAGE_SIZE << trans_pcie->rx_page_order; + struct iwl_rxq *rxq = &trans_pcie->rxq; + u32 i, r, j, rb_len = 0; + + spin_lock(&rxq->lock); + + r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; + + for (i = rxq->read, j = 0; + i != r && j < allocated_rb_nums; + i = (i + 1) & RX_QUEUE_MASK, j++) { + struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; + struct iwl_fw_error_dump_rb *rb; + + dma_unmap_page(trans->dev, rxb->page_dma, max_len, + DMA_FROM_DEVICE); + + rb_len += sizeof(**data) + sizeof(*rb) + max_len; + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); + (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); + rb = (void *)(*data)->data; + rb->index = cpu_to_le32(i); + memcpy(rb->data, page_address(rxb->page), max_len); + /* remap the page for the free benefit */ + rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, + max_len, + DMA_FROM_DEVICE); + + *data = iwl_fw_error_next_data(*data); + } + + spin_unlock(&rxq->lock); + + return rb_len; +} #define IWL_CSR_TO_DUMP (0x250) static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, @@ -2254,17 +2394,97 @@ iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, return monitor_len; } -static -struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans) +static u32 +iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data, + u32 monitor_len) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 len = 0; + + if ((trans_pcie->fw_mon_page && + trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) || + trans->dbg_dest_tlv) { + struct iwl_fw_error_dump_fw_mon *fw_mon_data; + u32 base, write_ptr, wrap_cnt; + + /* If there was a dest TLV - use the values from there */ + if (trans->dbg_dest_tlv) { + write_ptr = + le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg); + wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count); + base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); + } else { + base = MON_BUFF_BASE_ADDR; + write_ptr = MON_BUFF_WRPTR; + wrap_cnt = MON_BUFF_CYCLE_CNT; + } + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); + fw_mon_data = (void *)(*data)->data; + fw_mon_data->fw_mon_wr_ptr = + cpu_to_le32(iwl_read_prph(trans, write_ptr)); + fw_mon_data->fw_mon_cycle_cnt = + cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); + fw_mon_data->fw_mon_base_ptr = + cpu_to_le32(iwl_read_prph(trans, base)); + + len += sizeof(**data) + sizeof(*fw_mon_data); + if (trans_pcie->fw_mon_page) { + /* + * The firmware is now asserted, it won't write anything + * to the buffer. CPU can take ownership to fetch the + * data. The buffer will be handed back to the device + * before the firmware will be restarted. + */ + dma_sync_single_for_cpu(trans->dev, + trans_pcie->fw_mon_phys, + trans_pcie->fw_mon_size, + DMA_FROM_DEVICE); + memcpy(fw_mon_data->data, + page_address(trans_pcie->fw_mon_page), + trans_pcie->fw_mon_size); + + monitor_len = trans_pcie->fw_mon_size; + } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) { + /* + * Update pointers to reflect actual values after + * shifting + */ + base = iwl_read_prph(trans, base) << + trans->dbg_dest_tlv->base_shift; + iwl_trans_read_mem(trans, base, fw_mon_data->data, + monitor_len / sizeof(u32)); + } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) { + monitor_len = + iwl_trans_pci_dump_marbh_monitor(trans, + fw_mon_data, + monitor_len); + } else { + /* Didn't match anything - output no monitor data */ + monitor_len = 0; + } + + len += monitor_len; + (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); + } + + return len; +} + +static struct iwl_trans_dump_data +*iwl_trans_pcie_dump_data(struct iwl_trans *trans, + struct iwl_fw_dbg_trigger_tlv *trigger) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_fw_error_dump_data *data; struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_trans_dump_data *dump_data; - u32 len; + u32 len, num_rbs; u32 monitor_len; int i, ptr; + bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status); /* transport dump header */ len = sizeof(*dump_data); @@ -2273,22 +2493,6 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans) len += sizeof(*data) + cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); - /* CSR registers */ - len += sizeof(*data) + IWL_CSR_TO_DUMP; - - /* PRPH registers */ - for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { - /* The range includes both boundaries */ - int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - - iwl_prph_dump_addr[i].start + 4; - - len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) + - num_bytes_in_chunk; - } - - /* FH registers */ - len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); - /* FW monitor */ if (trans_pcie->fw_mon_page) { len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + @@ -2316,6 +2520,45 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans) monitor_len = 0; } + if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) { + dump_data = vzalloc(len); + if (!dump_data) + return NULL; + + data = (void *)dump_data->data; + len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); + dump_data->len = len; + + return dump_data; + } + + /* CSR registers */ + len += sizeof(*data) + IWL_CSR_TO_DUMP; + + /* PRPH registers */ + for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { + /* The range includes both boundaries */ + int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - + iwl_prph_dump_addr[i].start + 4; + + len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) + + num_bytes_in_chunk; + } + + /* FH registers */ + len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); + + if (dump_rbs) { + /* RBs */ + num_rbs = le16_to_cpu(ACCESS_ONCE( + trans_pcie->rxq.rb_stts->closed_rb_num)) + & 0x0FFF; + num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK; + len += num_rbs * (sizeof(*data) + + sizeof(struct iwl_fw_error_dump_rb) + + (PAGE_SIZE << trans_pcie->rx_page_order)); + } + dump_data = vzalloc(len); if (!dump_data) return NULL; @@ -2352,74 +2595,10 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans) len += iwl_trans_pcie_dump_prph(trans, &data); len += iwl_trans_pcie_dump_csr(trans, &data); len += iwl_trans_pcie_fh_regs_dump(trans, &data); - /* data is already pointing to the next section */ - - if ((trans_pcie->fw_mon_page && - trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) || - trans->dbg_dest_tlv) { - struct iwl_fw_error_dump_fw_mon *fw_mon_data; - u32 base, write_ptr, wrap_cnt; - - /* If there was a dest TLV - use the values from there */ - if (trans->dbg_dest_tlv) { - write_ptr = - le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg); - wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count); - base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); - } else { - base = MON_BUFF_BASE_ADDR; - write_ptr = MON_BUFF_WRPTR; - wrap_cnt = MON_BUFF_CYCLE_CNT; - } - - data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); - fw_mon_data = (void *)data->data; - fw_mon_data->fw_mon_wr_ptr = - cpu_to_le32(iwl_read_prph(trans, write_ptr)); - fw_mon_data->fw_mon_cycle_cnt = - cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); - fw_mon_data->fw_mon_base_ptr = - cpu_to_le32(iwl_read_prph(trans, base)); - - len += sizeof(*data) + sizeof(*fw_mon_data); - if (trans_pcie->fw_mon_page) { - /* - * The firmware is now asserted, it won't write anything - * to the buffer. CPU can take ownership to fetch the - * data. The buffer will be handed back to the device - * before the firmware will be restarted. - */ - dma_sync_single_for_cpu(trans->dev, - trans_pcie->fw_mon_phys, - trans_pcie->fw_mon_size, - DMA_FROM_DEVICE); - memcpy(fw_mon_data->data, - page_address(trans_pcie->fw_mon_page), - trans_pcie->fw_mon_size); - - monitor_len = trans_pcie->fw_mon_size; - } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) { - /* - * Update pointers to reflect actual values after - * shifting - */ - base = iwl_read_prph(trans, base) << - trans->dbg_dest_tlv->base_shift; - iwl_trans_read_mem(trans, base, fw_mon_data->data, - monitor_len / sizeof(u32)); - } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) { - monitor_len = - iwl_trans_pci_dump_marbh_monitor(trans, - fw_mon_data, - monitor_len); - } else { - /* Didn't match anything - output no monitor data */ - monitor_len = 0; - } + if (dump_rbs) + len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); - len += monitor_len; - data->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); - } + len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); dump_data->len = len; @@ -2482,12 +2661,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, if (!trans) return ERR_PTR(-ENOMEM); + trans->max_skb_frags = IWL_PCIE_MAX_FRAGS; + trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans_pcie->trans = trans; spin_lock_init(&trans_pcie->irq_lock); spin_lock_init(&trans_pcie->reg_lock); spin_lock_init(&trans_pcie->ref_lock); + mutex_init(&trans_pcie->mutex); init_waitqueue_head(&trans_pcie->ucode_write_waitq); ret = pci_enable_device(pdev); diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 607acb53c..a8c8a4a74 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -219,8 +219,6 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; - WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); - sta_id = tx_cmd->sta_id; sec_ctl = tx_cmd->sec_ctl; @@ -239,6 +237,9 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, if (trans_pcie->bc_table_dword) len = DIV_ROUND_UP(len, 4); + if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) + return; + bc_ent = cpu_to_le16(len | (sta_id << 12)); scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; @@ -387,11 +388,18 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, /* first TB is never freed - it's the scratchbuf data */ - for (i = 1; i < num_tbs; i++) - dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), - iwl_pcie_tfd_tb_get_len(tfd, i), - DMA_TO_DEVICE); - + for (i = 1; i < num_tbs; i++) { + if (meta->flags & BIT(i + CMD_TB_BITMAP_POS)) + dma_unmap_page(trans->dev, + iwl_pcie_tfd_tb_get_addr(tfd, i), + iwl_pcie_tfd_tb_get_len(tfd, i), + DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, + iwl_pcie_tfd_tb_get_addr(tfd, i), + iwl_pcie_tfd_tb_get_len(tfd, i), + DMA_TO_DEVICE); + } tfd->num_tbs = 0; } @@ -467,7 +475,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len); - return 0; + return num_tbs; } static int iwl_pcie_txq_alloc(struct iwl_trans *trans, @@ -915,6 +923,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) } } + iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); if (trans->cfg->base_params->num_of_queues > 20) iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_ENABLE_31_QUEUES); @@ -1320,13 +1329,24 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, int idx; u16 copy_size, cmd_size, scratch_size; bool had_nocopy = false; + u8 group_id = iwl_cmd_groupid(cmd->id); int i, ret; u32 cmd_pos; const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; - copy_size = sizeof(out_cmd->hdr); - cmd_size = sizeof(out_cmd->hdr); + if (WARN(!trans_pcie->wide_cmd_header && + group_id > IWL_ALWAYS_LONG_GROUP, + "unsupported wide command %#x\n", cmd->id)) + return -EINVAL; + + if (group_id != 0) { + copy_size = sizeof(struct iwl_cmd_header_wide); + cmd_size = sizeof(struct iwl_cmd_header_wide); + } else { + copy_size = sizeof(struct iwl_cmd_header); + cmd_size = sizeof(struct iwl_cmd_header); + } /* need one for the header if the first is NOCOPY */ BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); @@ -1416,16 +1436,32 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, out_meta->source = cmd; /* set up the header */ - - out_cmd->hdr.cmd = cmd->id; - out_cmd->hdr.flags = 0; - out_cmd->hdr.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | - INDEX_TO_SEQ(q->write_ptr)); + if (group_id != 0) { + out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); + out_cmd->hdr_wide.group_id = group_id; + out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); + out_cmd->hdr_wide.length = + cpu_to_le16(cmd_size - + sizeof(struct iwl_cmd_header_wide)); + out_cmd->hdr_wide.reserved = 0; + out_cmd->hdr_wide.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + INDEX_TO_SEQ(q->write_ptr)); + + cmd_pos = sizeof(struct iwl_cmd_header_wide); + copy_size = sizeof(struct iwl_cmd_header_wide); + } else { + out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); + out_cmd->hdr.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + INDEX_TO_SEQ(q->write_ptr)); + out_cmd->hdr.group_id = 0; + + cmd_pos = sizeof(struct iwl_cmd_header); + copy_size = sizeof(struct iwl_cmd_header); + } /* and copy the data that needs to be copied */ - cmd_pos = offsetof(struct iwl_device_cmd, payload); - copy_size = sizeof(out_cmd->hdr); for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { int copy; @@ -1464,9 +1500,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } IWL_DEBUG_HC(trans, - "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", + "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", get_cmd_string(trans_pcie, out_cmd->hdr.cmd), - out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), + group_id, out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); /* start the TFD with the scratchbuf */ @@ -1516,12 +1553,14 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); } + BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS > + sizeof(out_meta->flags) * BITS_PER_BYTE); out_meta->flags = cmd->flags; if (WARN_ON_ONCE(txq->entries[idx].free_buf)) kzfree(txq->entries[idx].free_buf); txq->entries[idx].free_buf = dup_buf; - trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr); + trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); /* start timer if queue currently empty */ if (q->read_ptr == q->write_ptr && txq->wd_timeout) @@ -1552,15 +1591,13 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, /* * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them * @rxb: Rx buffer to reclaim - * @handler_status: return value of the handler of the command - * (put in setup_rx_handlers) * * If an Rx buffer has an async callback associated with it the callback * will be executed. The attached skb (if present) will only be freed * if the callback returns 1 */ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, - struct iwl_rx_cmd_buffer *rxb, int handler_status) + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); @@ -1599,7 +1636,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, meta->source->resp_pkt = pkt; meta->source->_rx_page_addr = (unsigned long)page_address(p); meta->source->_rx_page_order = trans_pcie->rx_page_order; - meta->source->handler_status = handler_status; } iwl_pcie_cmdq_reclaim(trans, txq_id, index); @@ -1762,7 +1798,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_hdr *hdr; struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; struct iwl_cmd_meta *out_meta; struct iwl_txq *txq; @@ -1771,9 +1807,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, void *tb1_addr; u16 len, tb1_len, tb2_len; bool wait_write_ptr; - __le16 fc = hdr->frame_control; - u8 hdr_len = ieee80211_hdrlen(fc); + __le16 fc; + u8 hdr_len; u16 wifi_seq; + int i; txq = &trans_pcie->txq[txq_id]; q = &txq->q; @@ -1782,6 +1819,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, "TX on unused queue %d\n", txq_id)) return -EINVAL; + if (skb_is_nonlinear(skb) && + skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS && + __skb_linearize(skb)) + return -ENOMEM; + + /* mac80211 always puts the full header into the SKB's head, + * so there's no need to check if it's readable there + */ + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + hdr_len = ieee80211_hdrlen(fc); + spin_lock(&txq->lock); /* In AGG mode, the index in the ring must correspond to the WiFi @@ -1812,6 +1861,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_meta = &txq->entries[q->write_ptr].meta; + out_meta->flags = 0; /* * The second TB (tb1) points to the remainder of the TX command @@ -1845,9 +1895,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* * Set up TFD's third entry to point directly to remainder - * of skb, if any (802.11 null frames have no payload). + * of skb's head, if any */ - tb2_len = skb->len - hdr_len; + tb2_len = skb_headlen(skb) - hdr_len; if (tb2_len > 0) { dma_addr_t tb2_phys = dma_map_single(trans->dev, skb->data + hdr_len, @@ -1860,6 +1910,29 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); } + /* set up the remaining entries to point to the data */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + dma_addr_t tb_phys; + int tb_idx; + + if (!skb_frag_size(frag)) + continue; + + tb_phys = skb_frag_dma_map(trans->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { + iwl_pcie_tfd_unmap(trans, out_meta, + &txq->tfds[q->write_ptr]); + goto out_err; + } + tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, + skb_frag_size(frag), false); + + out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS); + } + /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); @@ -1869,7 +1942,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len, skb->data + hdr_len, tb2_len); trace_iwlwifi_dev_tx_data(trans->dev, skb, - skb->data + hdr_len, tb2_len); + hdr_len, skb->len - hdr_len); wait_write_ptr = ieee80211_has_morefrags(fc); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 99e873dc8..520bef807 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2399,6 +2399,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, TDLS_WIDER_BW); if (rctbl) ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); @@ -2676,7 +2677,7 @@ static void hwsim_mon_setup(struct net_device *dev) dev->netdev_ops = &hwsim_netdev_ops; dev->destructor = free_netdev; ether_setup(dev); - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; dev->type = ARPHRD_IEEE80211_RADIOTAP; eth_zero_addr(dev->dev_addr); dev->dev_addr[0] = 0x12; @@ -3120,8 +3121,10 @@ static int hwsim_init_netlink(void) goto failure; rc = netlink_register_notifier(&hwsim_netlink_notifier); - if (rc) + if (rc) { + genl_unregister_family(&hwsim_genl_family); goto failure; + } return 0; diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c index 7217da4f1..57a80cfa3 100644 --- a/drivers/net/wireless/mediatek/mt7601u/dma.c +++ b/drivers/net/wireless/mediatek/mt7601u/dma.c @@ -112,7 +112,9 @@ static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data, if (!skb) return; - ieee80211_rx_ni(dev->hw, skb); + spin_lock(&dev->mac_lock); + ieee80211_rx(dev->hw, skb); + spin_unlock(&dev->mac_lock); } static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len) @@ -236,23 +238,42 @@ static void mt7601u_complete_tx(struct urb *urb) skb = q->e[q->start].skb; trace_mt_tx_dma_done(dev, skb); - mt7601u_tx_status(dev, skb); + __skb_queue_tail(&dev->tx_skb_done, skb); + tasklet_schedule(&dev->tx_tasklet); if (q->used == q->entries - q->entries / 8) ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb)); q->start = (q->start + 1) % q->entries; q->used--; +out: + spin_unlock_irqrestore(&dev->tx_lock, flags); +} - if (urb->status) - goto out; +static void mt7601u_tx_tasklet(unsigned long data) +{ + struct mt7601u_dev *dev = (struct mt7601u_dev *) data; + struct sk_buff_head skbs; + unsigned long flags; + + __skb_queue_head_init(&skbs); + + spin_lock_irqsave(&dev->tx_lock, flags); set_bit(MT7601U_STATE_MORE_STATS, &dev->state); if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state)) queue_delayed_work(dev->stat_wq, &dev->stat_work, msecs_to_jiffies(10)); -out: + + skb_queue_splice_init(&dev->tx_skb_done, &skbs); + spin_unlock_irqrestore(&dev->tx_lock, flags); + + while (!skb_queue_empty(&skbs)) { + struct sk_buff *skb = __skb_dequeue(&skbs); + + mt7601u_tx_status(dev, skb); + } } static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, @@ -475,6 +496,7 @@ int mt7601u_dma_init(struct mt7601u_dev *dev) { int ret = -ENOMEM; + tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev); tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev); ret = mt7601u_alloc_tx(dev); @@ -502,4 +524,6 @@ void mt7601u_dma_cleanup(struct mt7601u_dev *dev) mt7601u_free_rx(dev); mt7601u_free_tx(dev); + + tasklet_kill(&dev->tx_tasklet); } diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c index df3dd5619..26190fd33 100644 --- a/drivers/net/wireless/mediatek/mt7601u/init.c +++ b/drivers/net/wireless/mediatek/mt7601u/init.c @@ -454,8 +454,10 @@ struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev) spin_lock_init(&dev->tx_lock); spin_lock_init(&dev->rx_lock); spin_lock_init(&dev->lock); + spin_lock_init(&dev->mac_lock); spin_lock_init(&dev->con_mon_lock); atomic_set(&dev->avg_ampdu_len, 1); + skb_queue_head_init(&dev->tx_skb_done); dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0); if (!dev->stat_wq) { diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c index 7514bce1a..e21c53ed0 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mac.c +++ b/drivers/net/wireless/mediatek/mt7601u/mac.c @@ -181,7 +181,11 @@ void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat) } mt76_mac_fill_tx_status(dev, &info, stat); + + spin_lock_bh(&dev->mac_lock); ieee80211_tx_status_noskb(dev->hw, sta, &info); + spin_unlock_bh(&dev->mac_lock); + rcu_read_unlock(); } diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h index 9102be6b9..428bd2f10 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h +++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h @@ -141,12 +141,13 @@ enum { /** * struct mt7601u_dev - adapter structure * @lock: protects @wcid->tx_rate. + * @mac_lock: locks out mac80211's tx status and rx paths. * @tx_lock: protects @tx_q and changes of MT7601U_STATE_*_STATS - flags in @state. + * flags in @state. * @rx_lock: protects @rx_q. * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi. * @mutex: ensures exclusive access from mac80211 callbacks. - * @vendor_req_mutex: ensures atomicity of vendor requests. + * @vendor_req_mutex: protects @vend_buf, ensures atomicity of split writes. * @reg_atomic_mutex: ensures atomicity of indirect register accesses * (accesses to RF and BBP). * @hw_atomic_mutex: ensures exclusive access to HW during critical @@ -177,6 +178,7 @@ struct mt7601u_dev { struct mt76_wcid __rcu *wcid[N_WCIDS]; spinlock_t lock; + spinlock_t mac_lock; const u16 *beacon_offsets; @@ -184,6 +186,8 @@ struct mt7601u_dev { struct mt7601u_eeprom_params *ee; struct mutex vendor_req_mutex; + void *vend_buf; + struct mutex reg_atomic_mutex; struct mutex hw_atomic_mutex; @@ -197,7 +201,9 @@ struct mt7601u_dev { /* TX */ spinlock_t tx_lock; + struct tasklet_struct tx_tasklet; struct mt7601u_tx_queue *tx_q; + struct sk_buff_head tx_skb_done; atomic_t avg_ampdu_len; diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c index 0be2080ce..a0a33dc8f 100644 --- a/drivers/net/wireless/mediatek/mt7601u/tx.c +++ b/drivers/net/wireless/mediatek/mt7601u/tx.c @@ -116,7 +116,10 @@ void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb) ieee80211_tx_info_clear_status(info); info->status.rates[0].idx = -1; info->flags |= IEEE80211_TX_STAT_ACK; + + spin_lock(&dev->mac_lock); ieee80211_tx_status(dev->hw, skb); + spin_unlock(&dev->mac_lock); } static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb) diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c index 951c9dc60..ee9edd869 100644 --- a/drivers/net/wireless/mediatek/mt7601u/usb.c +++ b/drivers/net/wireless/mediatek/mt7601u/usb.c @@ -92,10 +92,9 @@ void mt7601u_complete_urb(struct urb *urb) complete(cmpl); } -static int -__mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, - const u8 direction, const u16 val, const u16 offset, - void *buf, const size_t buflen) +int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, + const u8 direction, const u16 val, const u16 offset, + void *buf, const size_t buflen) { int i, ret; struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); @@ -110,6 +109,8 @@ __mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, trace_mt_vend_req(dev, pipe, req, req_type, val, offset, buf, buflen, ret); + if (ret == -ENODEV) + set_bit(MT7601U_STATE_REMOVED, &dev->state); if (ret >= 0 || ret == -ENODEV) return ret; @@ -122,25 +123,6 @@ __mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, return ret; } -int -mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, - const u8 direction, const u16 val, const u16 offset, - void *buf, const size_t buflen) -{ - int ret; - - mutex_lock(&dev->vendor_req_mutex); - - ret = __mt7601u_vendor_request(dev, req, direction, val, offset, - buf, buflen); - if (ret == -ENODEV) - set_bit(MT7601U_STATE_REMOVED, &dev->state); - - mutex_unlock(&dev->vendor_req_mutex); - - return ret; -} - void mt7601u_vendor_reset(struct mt7601u_dev *dev) { mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT, @@ -150,19 +132,21 @@ void mt7601u_vendor_reset(struct mt7601u_dev *dev) u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset) { int ret; - __le32 reg; - u32 val; + u32 val = ~0; WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset); + mutex_lock(&dev->vendor_req_mutex); + ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN, - 0, offset, ®, sizeof(reg)); - val = le32_to_cpu(reg); - if (ret > 0 && ret != sizeof(reg)) { + 0, offset, dev->vend_buf, MT_VEND_BUF); + if (ret == MT_VEND_BUF) + val = get_unaligned_le32(dev->vend_buf); + else if (ret > 0) dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n", ret, offset); - val = ~0; - } + + mutex_unlock(&dev->vendor_req_mutex); trace_reg_read(dev, offset, val); return val; @@ -173,12 +157,17 @@ int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req, { int ret; + mutex_lock(&dev->vendor_req_mutex); + ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT, val & 0xffff, offset, NULL, 0); - if (ret) - return ret; - return mt7601u_vendor_request(dev, req, USB_DIR_OUT, - val >> 16, offset + 2, NULL, 0); + if (!ret) + ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT, + val >> 16, offset + 2, NULL, 0); + + mutex_unlock(&dev->vendor_req_mutex); + + return ret; } void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val) @@ -275,6 +264,12 @@ static int mt7601u_probe(struct usb_interface *usb_intf, usb_set_intfdata(usb_intf, dev); + dev->vend_buf = devm_kmalloc(dev->dev, MT_VEND_BUF, GFP_KERNEL); + if (!dev->vend_buf) { + ret = -ENOMEM; + goto err; + } + ret = mt7601u_assign_pipes(usb_intf, dev); if (ret) goto err; diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.h b/drivers/net/wireless/mediatek/mt7601u/usb.h index c7e9e7738..14c635f94 100644 --- a/drivers/net/wireless/mediatek/mt7601u/usb.h +++ b/drivers/net/wireless/mediatek/mt7601u/usb.h @@ -23,6 +23,8 @@ #define MT_VEND_DEV_MODE_RESET 1 +#define MT_VEND_BUF sizeof(__le32) + enum mt_vendor_req { MT_VEND_DEV_MODE = 1, MT_VEND_WRITE = 2, diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig index 48edf3876..317d99189 100644 --- a/drivers/net/wireless/mwifiex/Kconfig +++ b/drivers/net/wireless/mwifiex/Kconfig @@ -9,36 +9,36 @@ config MWIFIEX mwifiex. config MWIFIEX_SDIO - tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897" + tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8997" depends on MWIFIEX && MMC select FW_LOADER select WANT_DEV_COREDUMP ---help--- This adds support for wireless adapters based on Marvell - 8786/8787/8797/8887/8897 chipsets with SDIO interface. + 8786/8787/8797/8887/8897/8997 chipsets with SDIO interface. If you choose to build it as a module, it will be called mwifiex_sdio. config MWIFIEX_PCIE - tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897" + tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897/8997" depends on MWIFIEX && PCI select FW_LOADER select WANT_DEV_COREDUMP ---help--- This adds support for wireless adapters based on Marvell - 8766/8897 chipsets with PCIe interface. + 8766/8897/8997 chipsets with PCIe interface. If you choose to build it as a module, it will be called mwifiex_pcie. config MWIFIEX_USB - tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897" + tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897/8997" depends on MWIFIEX && USB select FW_LOADER ---help--- This adds support for wireless adapters based on Marvell - 8797/8897 chipset with USB interface. + 8797/8897/8997 chipset with USB interface. If you choose to build it as a module, it will be called mwifiex_usb. diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index b15e4c7ac..ff63cb563 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -19,6 +19,7 @@ #include "cfg80211.h" #include "main.h" +#include "11n.h" static char *reg_alpha2; module_param(reg_alpha2, charp, 0); @@ -34,12 +35,38 @@ static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = { }, }; -static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = { +static const struct ieee80211_iface_combination +mwifiex_iface_comb_ap_sta = { .limits = mwifiex_ap_sta_limits, .num_different_channels = 1, .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits), .max_interfaces = MWIFIEX_MAX_BSS_NUM, .beacon_int_infra_match = true, + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40), +}; + +static const struct ieee80211_iface_combination +mwifiex_iface_comb_ap_sta_vht = { + .limits = mwifiex_ap_sta_limits, + .num_different_channels = 1, + .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits), + .max_interfaces = MWIFIEX_MAX_BSS_NUM, + .beacon_int_infra_match = true, + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80), +}; + +static const struct +ieee80211_iface_combination mwifiex_iface_comb_ap_sta_drcs = { + .limits = mwifiex_ap_sta_limits, + .num_different_channels = 2, + .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits), + .max_interfaces = MWIFIEX_MAX_BSS_NUM, + .beacon_int_infra_match = true, }; /* @@ -441,7 +468,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev, * - Country codes * - Sub bands (first channel, number of channels, maximum Tx power) */ -static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy) +int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy) { u8 no_of_triplet = 0; struct ieee80211_country_ie_triplet *t; @@ -804,10 +831,13 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv, priv->bss_type = MWIFIEX_BSS_TYPE_STA; break; case NL80211_IFTYPE_P2P_CLIENT: - case NL80211_IFTYPE_P2P_GO: priv->bss_role = MWIFIEX_BSS_ROLE_STA; priv->bss_type = MWIFIEX_BSS_TYPE_P2P; break; + case NL80211_IFTYPE_P2P_GO: + priv->bss_role = MWIFIEX_BSS_ROLE_UAP; + priv->bss_type = MWIFIEX_BSS_TYPE_P2P; + break; case NL80211_IFTYPE_AP: priv->bss_type = MWIFIEX_BSS_TYPE_UAP; priv->bss_role = MWIFIEX_BSS_ROLE_UAP; @@ -1115,8 +1145,10 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_P2P_GO: switch (type) { case NL80211_IFTYPE_STATION: - if (mwifiex_cfg80211_init_p2p_client(priv)) + if (mwifiex_cfg80211_deinit_p2p(priv)) return -EFAULT; + priv->adapter->curr_iface_comb.p2p_intf--; + priv->adapter->curr_iface_comb.sta_intf++; dev->ieee80211_ptr->iftype = type; break; case NL80211_IFTYPE_ADHOC: @@ -2788,6 +2820,7 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev); struct mwifiex_adapter *adapter = priv->adapter; + struct sk_buff *skb, *tmp; #ifdef CONFIG_DEBUG_FS mwifiex_dev_debugfs_remove(priv); @@ -2795,6 +2828,9 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) mwifiex_stop_net_dev_queue(priv->netdev, adapter); + skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) + mwifiex_write_data_complete(priv->adapter, skb, 0, -1); + if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); @@ -2954,7 +2990,6 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv, MWIFIEX_MEF_MAX_BYTESEQ)) { mwifiex_dbg(priv->adapter, ERROR, "Pattern not supported\n"); - kfree(mef_entry); return -EOPNOTSUPP; } @@ -3036,9 +3071,12 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv, mwifiex_set_auto_arp_mef_entry(priv, &mef_entry[0]); - if (wowlan->n_patterns || wowlan->magic_pkt) + if (wowlan->n_patterns || wowlan->magic_pkt) { ret = mwifiex_set_wowlan_mef_entry(priv, &mef_cfg, &mef_entry[1], wowlan); + if (ret) + goto err; + } if (!mef_cfg.criteria) mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST | @@ -3048,6 +3086,8 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv, ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG, HostCmd_ACT_GEN_SET, 0, &mef_cfg, true); + +err: kfree(mef_entry); return ret; } @@ -3359,6 +3399,72 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, return mwifiex_tdls_oper(priv, peer, action); } +static int +mwifiex_cfg80211_tdls_chan_switch(struct wiphy *wiphy, struct net_device *dev, + const u8 *addr, u8 oper_class, + struct cfg80211_chan_def *chandef) +{ + struct mwifiex_sta_node *sta_ptr; + unsigned long flags; + u16 chan; + u8 second_chan_offset, band; + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + + spin_lock_irqsave(&priv->sta_list_spinlock, flags); + sta_ptr = mwifiex_get_sta_entry(priv, addr); + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + + if (!sta_ptr) { + wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n", + __func__, addr); + return -ENOENT; + } + + if (!(sta_ptr->tdls_cap.extcap.ext_capab[3] & + WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)) { + wiphy_err(wiphy, "%pM do not support tdls cs\n", addr); + return -ENOENT; + } + + if (sta_ptr->tdls_status == TDLS_CHAN_SWITCHING || + sta_ptr->tdls_status == TDLS_IN_OFF_CHAN) { + wiphy_err(wiphy, "channel switch is running, abort request\n"); + return -EALREADY; + } + + chan = chandef->chan->hw_value; + second_chan_offset = mwifiex_get_sec_chan_offset(chan); + band = chandef->chan->band; + mwifiex_start_tdls_cs(priv, addr, chan, second_chan_offset, band); + + return 0; +} + +static void +mwifiex_cfg80211_tdls_cancel_chan_switch(struct wiphy *wiphy, + struct net_device *dev, + const u8 *addr) +{ + struct mwifiex_sta_node *sta_ptr; + unsigned long flags; + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + + spin_lock_irqsave(&priv->sta_list_spinlock, flags); + sta_ptr = mwifiex_get_sta_entry(priv, addr); + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + + if (!sta_ptr) { + wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n", + __func__, addr); + } else if (!(sta_ptr->tdls_status == TDLS_CHAN_SWITCHING || + sta_ptr->tdls_status == TDLS_IN_BASE_CHAN || + sta_ptr->tdls_status == TDLS_IN_OFF_CHAN)) { + wiphy_err(wiphy, "tdls chan switch not initialize by %pM\n", + addr); + } else + mwifiex_stop_tdls_cs(priv, addr); +} + static int mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_parameters *params) @@ -3575,6 +3681,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { .set_coalesce = mwifiex_cfg80211_set_coalesce, .tdls_mgmt = mwifiex_cfg80211_tdls_mgmt, .tdls_oper = mwifiex_cfg80211_tdls_oper, + .tdls_channel_switch = mwifiex_cfg80211_tdls_chan_switch, + .tdls_cancel_channel_switch = mwifiex_cfg80211_tdls_cancel_chan_switch, .add_station = mwifiex_cfg80211_add_station, .change_station = mwifiex_cfg80211_change_station, .get_channel = mwifiex_cfg80211_get_channel, @@ -3672,7 +3780,12 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) else wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; - wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta; + if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info)) + wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs; + else if (adapter->is_hw_11ac_capable) + wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_vht; + else + wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta; wiphy->n_iface_combinations = 1; /* Initialize cipher suits */ @@ -3709,6 +3822,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) NL80211_FEATURE_INACTIVITY_TIMER | NL80211_FEATURE_NEED_OBSS_SCAN; + if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) + wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; + if (adapter->fw_api_ver == MWIFIEX_FW_V15) wiphy->features |= NL80211_FEATURE_SK_TX_STATUS; diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 207da4050..45ae38e32 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -167,8 +167,6 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, mwifiex_dbg(adapter, ERROR, "DNLD_CMD: FW in reset state, ignore cmd %#x\n", cmd_code); - if (cmd_node->wait_q_enabled) - mwifiex_complete_cmd(adapter, cmd_node); mwifiex_recycle_cmd_node(adapter, cmd_node); queue_work(adapter->workqueue, &adapter->main_work); return -1; @@ -809,17 +807,6 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) adapter->is_cmd_timedout = 0; resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data; - if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) { - mwifiex_dbg(adapter, ERROR, - "CMD_RESP: %#x been canceled\n", - le16_to_cpu(resp->command)); - mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); - spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); - adapter->curr_cmd = NULL; - spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); - return -1; - } - if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) { /* Copy original response back to response buffer */ struct mwifiex_ds_misc_cmd *hostcmd; @@ -989,12 +976,13 @@ mwifiex_cmd_timeout_func(unsigned long function_context) if (cmd_node->wait_q_enabled) { adapter->cmd_wait_q.status = -ETIMEDOUT; - wake_up_interruptible(&adapter->cmd_wait_q.wait); mwifiex_cancel_pending_ioctl(adapter); } } - if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) + if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) { mwifiex_init_fw_complete(adapter); + return; + } if (adapter->if_ops.device_dump) adapter->if_ops.device_dump(adapter); @@ -1024,6 +1012,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) adapter->curr_cmd->wait_q_enabled = false; adapter->cmd_wait_q.status = -1; mwifiex_complete_cmd(adapter, adapter->curr_cmd); + /* no recycle probably wait for response */ } /* Cancel all pending command */ spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags); @@ -1032,11 +1021,8 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags); - if (cmd_node->wait_q_enabled) { + if (cmd_node->wait_q_enabled) adapter->cmd_wait_q.status = -1; - mwifiex_complete_cmd(adapter, cmd_node); - cmd_node->wait_q_enabled = false; - } mwifiex_recycle_cmd_node(adapter, cmd_node); spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags); } @@ -1094,12 +1080,18 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) (adapter->curr_cmd->wait_q_enabled)) { spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); cmd_node = adapter->curr_cmd; - cmd_node->wait_q_enabled = false; - cmd_node->cmd_flag |= CMD_F_CANCELED; - mwifiex_recycle_cmd_node(adapter, cmd_node); - mwifiex_complete_cmd(adapter, adapter->curr_cmd); + /* setting curr_cmd to NULL is quite dangerous, because + * mwifiex_process_cmdresp checks curr_cmd to be != NULL + * at the beginning then relies on it and dereferences + * it at will + * this probably works since mwifiex_cmd_timeout_func + * is the only caller of this function and responses + * at that point + */ adapter->curr_cmd = NULL; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); + + mwifiex_recycle_cmd_node(adapter, cmd_node); } /* Cancel all pending scan command */ @@ -1129,7 +1121,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) } } } - adapter->cmd_wait_q.status = -1; } /* diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c index 5a0636d43..5583856fc 100644 --- a/drivers/net/wireless/mwifiex/debugfs.c +++ b/drivers/net/wireless/mwifiex/debugfs.c @@ -731,7 +731,7 @@ mwifiex_rdeeprom_read(struct file *file, char __user *ubuf, (struct mwifiex_private *) file->private_data; unsigned long addr = get_zeroed_page(GFP_KERNEL); char *buf = (char *) addr; - int pos = 0, ret = 0, i; + int pos, ret, i; u8 value[MAX_EEPROM_DATA]; if (!buf) @@ -739,7 +739,7 @@ mwifiex_rdeeprom_read(struct file *file, char __user *ubuf, if (saved_offset == -1) { /* No command has been given */ - pos += snprintf(buf, PAGE_SIZE, "0"); + pos = snprintf(buf, PAGE_SIZE, "0"); goto done; } @@ -748,17 +748,17 @@ mwifiex_rdeeprom_read(struct file *file, char __user *ubuf, (u16) saved_bytes, value); if (ret) { ret = -EINVAL; - goto done; + goto out_free; } - pos += snprintf(buf, PAGE_SIZE, "%d %d ", saved_offset, saved_bytes); + pos = snprintf(buf, PAGE_SIZE, "%d %d ", saved_offset, saved_bytes); for (i = 0; i < saved_bytes; i++) - pos += snprintf(buf + strlen(buf), PAGE_SIZE, "%d ", value[i]); - - ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos); + pos += scnprintf(buf + pos, PAGE_SIZE - pos, "%d ", value[i]); done: + ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos); +out_free: free_page(addr); return ret; } diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h index 51e344789..098e1f14d 100644 --- a/drivers/net/wireless/mwifiex/decl.h +++ b/drivers/net/wireless/mwifiex/decl.h @@ -141,6 +141,9 @@ enum mwifiex_tdls_status { TDLS_SETUP_COMPLETE, TDLS_SETUP_FAILURE, TDLS_LINK_TEARDOWN, + TDLS_CHAN_SWITCHING, + TDLS_IN_BASE_CHAN, + TDLS_IN_OFF_CHAN, }; enum mwifiex_tdls_error_code { diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index cd0905171..3ec2ac82e 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -169,14 +169,17 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define TLV_TYPE_UAP_PS_AO_TIMER (PROPRIETARY_TLV_BASE_ID + 123) #define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145) #define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146) +#define TLV_TYPE_TX_PAUSE (PROPRIETARY_TLV_BASE_ID + 148) #define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154) #define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156) +#define TLV_TYPE_MULTI_CHAN_INFO (PROPRIETARY_TLV_BASE_ID + 183) #define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194) #define TLV_TYPE_SCAN_CHANNEL_GAP (PROPRIETARY_TLV_BASE_ID + 197) #define TLV_TYPE_API_REV (PROPRIETARY_TLV_BASE_ID + 199) #define TLV_TYPE_CHANNEL_STATS (PROPRIETARY_TLV_BASE_ID + 198) #define TLV_BTCOEX_WL_AGGR_WINSIZE (PROPRIETARY_TLV_BASE_ID + 202) #define TLV_BTCOEX_WL_SCANTIME (PROPRIETARY_TLV_BASE_ID + 203) +#define TLV_TYPE_BSS_MODE (PROPRIETARY_TLV_BASE_ID + 206) #define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048 @@ -200,6 +203,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11)) #define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14)) +#define ISSUPP_DRCS_ENABLED(FwCapInfo) (FwCapInfo & BIT(15)) #define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16)) #define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \ @@ -359,6 +363,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_MGMT_FRAME_REG 0x010c #define HostCmd_CMD_REMAIN_ON_CHAN 0x010d #define HostCmd_CMD_11AC_CFG 0x0112 +#define HostCmd_CMD_TDLS_CONFIG 0x0100 +#define HostCmd_CMD_MC_POLICY 0x0121 #define HostCmd_CMD_TDLS_OPER 0x0122 #define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG 0x0223 @@ -432,7 +438,6 @@ enum P2P_MODES { #define CMD_F_HOSTCMD (1 << 0) -#define CMD_F_CANCELED (1 << 1) #define HostCmd_CMD_ID_MASK 0x0fff @@ -509,8 +514,10 @@ enum P2P_MODES { #define EVENT_TDLS_GENERIC_EVENT 0x00000052 #define EVENT_RADAR_DETECTED 0x00000053 #define EVENT_CHANNEL_REPORT_RDY 0x00000054 +#define EVENT_TX_DATA_PAUSE 0x00000055 #define EVENT_EXT_SCAN_REPORT 0x00000058 #define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f +#define EVENT_MULTI_CHAN_INFO 0x0000006a #define EVENT_TX_STATUS_REPORT 0x00000074 #define EVENT_BT_COEX_WLAN_PARA_CHANGE 0X00000076 @@ -545,7 +552,27 @@ enum P2P_MODES { #define ACT_TDLS_DELETE 0x00 #define ACT_TDLS_CREATE 0x01 #define ACT_TDLS_CONFIG 0x02 -#define TDLS_EVENT_LINK_TEAR_DOWN 3 + +#define TDLS_EVENT_LINK_TEAR_DOWN 3 +#define TDLS_EVENT_CHAN_SWITCH_RESULT 7 +#define TDLS_EVENT_START_CHAN_SWITCH 8 +#define TDLS_EVENT_CHAN_SWITCH_STOPPED 9 + +#define TDLS_BASE_CHANNEL 0 +#define TDLS_OFF_CHANNEL 1 + +#define ACT_TDLS_CS_ENABLE_CONFIG 0x00 +#define ACT_TDLS_CS_INIT 0x06 +#define ACT_TDLS_CS_STOP 0x07 +#define ACT_TDLS_CS_PARAMS 0x08 + +#define MWIFIEX_DEF_CS_UNIT_TIME 2 +#define MWIFIEX_DEF_CS_THR_OTHERLINK 10 +#define MWIFIEX_DEF_THR_DIRECTLINK 0 +#define MWIFIEX_DEF_CS_TIME 10 +#define MWIFIEX_DEF_CS_TIMEOUT 16 +#define MWIFIEX_DEF_CS_REG_CLASS 12 +#define MWIFIEX_DEF_CS_PERIODICITY 1 #define MWIFIEX_FW_V15 15 @@ -658,6 +685,7 @@ struct mwifiex_fw_chan_stats { enum mwifiex_chan_scan_mode_bitmasks { MWIFIEX_PASSIVE_SCAN = BIT(0), MWIFIEX_DISABLE_CHAN_FILT = BIT(1), + MWIFIEX_HIDDEN_SSID_REPORT = BIT(4), }; struct mwifiex_chan_scan_param_set { @@ -1131,6 +1159,13 @@ struct host_cmd_ds_tx_rate_query { u8 ht_info; } __packed; +struct mwifiex_tx_pause_tlv { + struct mwifiex_ie_types_header header; + u8 peermac[ETH_ALEN]; + u8 tx_pause; + u8 pkt_cnt; +} __packed; + enum Host_Sleep_Action { HS_CONFIGURE = 0x0001, HS_ACTIVATE = 0x0002, @@ -1249,6 +1284,36 @@ struct host_cmd_ds_tdls_oper { u8 peer_mac[ETH_ALEN]; } __packed; +struct mwifiex_tdls_config { + __le16 enable; +}; + +struct mwifiex_tdls_config_cs_params { + u8 unit_time; + u8 thr_otherlink; + u8 thr_directlink; +}; + +struct mwifiex_tdls_init_cs_params { + u8 peer_mac[ETH_ALEN]; + u8 primary_chan; + u8 second_chan_offset; + u8 band; + __le16 switch_time; + __le16 switch_timeout; + u8 reg_class; + u8 periodicity; +} __packed; + +struct mwifiex_tdls_stop_cs_params { + u8 peer_mac[ETH_ALEN]; +}; + +struct host_cmd_ds_tdls_config { + __le16 tdls_action; + u8 tdls_data[1]; +} __packed; + struct mwifiex_chan_desc { __le16 start_freq; u8 chan_width; @@ -1370,6 +1435,11 @@ struct host_cmd_ds_802_11_scan_ext { u8 tlv_buffer[1]; } __packed; +struct mwifiex_ie_types_bss_mode { + struct mwifiex_ie_types_header header; + u8 bss_mode; +} __packed; + struct mwifiex_ie_types_bss_scan_rsp { struct mwifiex_ie_types_header header; u8 bssid[ETH_ALEN]; @@ -1908,6 +1978,12 @@ struct mwifiex_radar_det_event { __le32 passed; } __packed; +struct mwifiex_ie_types_multi_chan_info { + struct mwifiex_ie_types_header header; + __le16 status; + u8 tlv_buffer[0]; +} __packed; + struct meas_rpt_map { u8 rssi:3; u8 unmeasured:1; @@ -1927,10 +2003,18 @@ struct host_cmd_ds_802_11_subsc_evt { __le16 events; } __packed; +struct chan_switch_result { + u8 cur_chan; + u8 status; + u8 reason; +} __packed; + struct mwifiex_tdls_generic_event { __le16 type; u8 peer_mac[ETH_ALEN]; union { + struct chan_switch_result switch_result; + u8 cs_stop_reason; __le16 reason_code; __le16 reserved; } u; @@ -1971,6 +2055,11 @@ struct host_cmd_ds_coalesce_cfg { struct coalesce_receive_filt_rule rule[0]; } __packed; +struct host_cmd_ds_multi_chan_policy { + __le16 action; + __le16 policy; +} __packed; + struct host_cmd_ds_command { __le16 command; __le16 size; @@ -2035,9 +2124,11 @@ struct host_cmd_ds_command { struct host_cmd_ds_sta_list sta_list; struct host_cmd_11ac_vht_cfg vht_cfg; struct host_cmd_ds_coalesce_cfg coalesce_cfg; + struct host_cmd_ds_tdls_config tdls_config; struct host_cmd_ds_tdls_oper tdls_oper; struct host_cmd_ds_chan_rpt_req chan_rpt_req; struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg; + struct host_cmd_ds_multi_chan_policy mc_policy; } params; } __packed; diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c index 0ba894509..abf52d25b 100644 --- a/drivers/net/wireless/mwifiex/ie.c +++ b/drivers/net/wireless/mwifiex/ie.c @@ -409,6 +409,8 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv, int ret; ret = mwifiex_uap_parse_tail_ies(priv, info); + + if (ret) return ret; return mwifiex_set_mgmt_beacon_data_ies(priv, info); @@ -477,6 +479,7 @@ int mwifiex_del_mgmt_ies(struct mwifiex_private *priv) ar_ie, &priv->assocresp_idx); done: + kfree(gen_ie); kfree(beacon_ie); kfree(pr_ie); kfree(ar_ie); diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index df7fdc09d..5d3ae63ba 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -77,7 +77,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv) priv->media_connected = false; eth_broadcast_addr(priv->curr_addr); - + priv->port_open = false; priv->pkt_tx_ctrl = 0; priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; priv->data_rate = 0; /* Initially indicate the rate as auto */ @@ -301,7 +301,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) adapter->iface_limit.sta_intf = MWIFIEX_MAX_STA_NUM; adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM; adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM; - + adapter->active_scan_triggered = false; setup_timer(&adapter->wakeup_timer, wakeup_timer_fn, (unsigned long)adapter); } @@ -499,6 +499,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter) INIT_LIST_HEAD(&priv->sta_list); INIT_LIST_HEAD(&priv->auto_tdls_list); skb_queue_head_init(&priv->tdls_txq); + skb_queue_head_init(&priv->bypass_txq); spin_lock_init(&priv->tx_ba_stream_tbl_lock); spin_lock_init(&priv->rx_reorder_tbl_lock); @@ -550,11 +551,6 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter) } } - if (adapter->if_ops.init_fw_port) { - if (adapter->if_ops.init_fw_port(adapter)) - return -1; - } - for (i = 0; i < adapter->priv_num; i++) { if (adapter->priv[i]) { ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta, diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 56b024a6a..3cda1f956 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -783,6 +783,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled) priv->scan_block = true; + else + priv->port_open = true; done: /* Need to indicate IOCTL complete */ diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index d501b4780..a94fd0f4b 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -276,6 +276,7 @@ process_start: !adapter->pm_wakeup_fw_try) && (is_command_pending(adapter) || !skb_queue_empty(&adapter->tx_data_q) || + !mwifiex_bypass_txlist_empty(adapter) || !mwifiex_wmm_lists_empty(adapter))) { adapter->pm_wakeup_fw_try = true; mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3)); @@ -299,9 +300,16 @@ process_start: if ((!adapter->scan_chan_gap_enabled && adapter->scan_processing) || adapter->data_sent || + mwifiex_is_tdls_chan_switching + (mwifiex_get_priv(adapter, + MWIFIEX_BSS_ROLE_STA)) || (mwifiex_wmm_lists_empty(adapter) && + mwifiex_bypass_txlist_empty(adapter) && skb_queue_empty(&adapter->tx_data_q))) { if (adapter->cmd_sent || adapter->curr_cmd || + !mwifiex_is_send_cmd_allowed + (mwifiex_get_priv(adapter, + MWIFIEX_BSS_ROLE_STA)) || (!is_command_pending(adapter))) break; } @@ -342,7 +350,9 @@ process_start: continue; } - if (!adapter->cmd_sent && !adapter->curr_cmd) { + if (!adapter->cmd_sent && !adapter->curr_cmd && + mwifiex_is_send_cmd_allowed + (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) { if (mwifiex_exec_next_cmd(adapter) == -1) { ret = -1; break; @@ -365,7 +375,25 @@ process_start: if ((adapter->scan_chan_gap_enabled || !adapter->scan_processing) && - !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) { + !adapter->data_sent && + !mwifiex_bypass_txlist_empty(adapter) && + !mwifiex_is_tdls_chan_switching + (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) { + mwifiex_process_bypass_tx(adapter); + if (adapter->hs_activated) { + adapter->is_hs_configured = false; + mwifiex_hs_activated_event + (mwifiex_get_priv + (adapter, MWIFIEX_BSS_ROLE_ANY), + false); + } + } + + if ((adapter->scan_chan_gap_enabled || + !adapter->scan_processing) && + !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter) && + !mwifiex_is_tdls_chan_switching + (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) { mwifiex_wmm_process_tx(adapter); if (adapter->hs_activated) { adapter->is_hs_configured = false; @@ -379,6 +407,7 @@ process_start: if (adapter->delay_null_pkt && !adapter->cmd_sent && !adapter->curr_cmd && !is_command_pending(adapter) && (mwifiex_wmm_lists_empty(adapter) && + mwifiex_bypass_txlist_empty(adapter) && skb_queue_empty(&adapter->tx_data_q))) { if (!mwifiex_send_null_packet (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), @@ -649,6 +678,26 @@ mwifiex_close(struct net_device *dev) return 0; } +static bool +mwifiex_bypass_tx_queue(struct mwifiex_private *priv, + struct sk_buff *skb) +{ + struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; + + if (ntohs(eth_hdr->h_proto) == ETH_P_PAE || + mwifiex_is_skb_mgmt_frame(skb) || + (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA && + ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) && + (ntohs(eth_hdr->h_proto) == ETH_P_TDLS))) { + mwifiex_dbg(priv->adapter, DATA, + "bypass txqueue; eth type %#x, mgmt %d\n", + ntohs(eth_hdr->h_proto), + mwifiex_is_skb_mgmt_frame(skb)); + return true; + } + + return false; +} /* * Add buffer into wmm tx queue and queue work to transmit it. */ @@ -666,8 +715,14 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb) } } - atomic_inc(&priv->adapter->tx_pending); - mwifiex_wmm_add_buf_txqueue(priv, skb); + if (mwifiex_bypass_tx_queue(priv, skb)) { + atomic_inc(&priv->adapter->tx_pending); + atomic_inc(&priv->adapter->bypass_tx_pending); + mwifiex_wmm_add_buf_bypass_txqueue(priv, skb); + } else { + atomic_inc(&priv->adapter->tx_pending); + mwifiex_wmm_add_buf_txqueue(priv, skb); + } mwifiex_queue_main_work(priv->adapter); diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index ae98b5b83..6b9512140 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -281,6 +281,7 @@ struct mwifiex_ra_list_tbl { u8 amsdu_in_ampdu; u16 total_pkt_count; bool tdls_link; + bool tx_paused; }; struct mwifiex_tid_tbl { @@ -294,6 +295,7 @@ struct mwifiex_tid_tbl { struct mwifiex_wmm_desc { struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID]; u32 packets_out[MAX_NUM_TID]; + u32 pkts_paused[MAX_NUM_TID]; /* spin lock to protect ra_list */ spinlock_t ra_list_spinlock; struct mwifiex_wmm_ac_status ac_status[IEEE80211_NUM_ACS]; @@ -517,6 +519,7 @@ struct mwifiex_private { u8 frame_type; u8 curr_addr[ETH_ALEN]; u8 media_connected; + u8 port_open; u32 num_tx_timeout; /* track consecutive timeout */ u8 tx_timeout_cnt; @@ -662,6 +665,8 @@ struct mwifiex_private { struct cfg80211_beacon_data beacon_after; struct mwifiex_11h_intf_state state_11h; struct mwifiex_ds_mem_rw mem_rw; + struct sk_buff_head bypass_txq; + struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX]; }; @@ -768,6 +773,7 @@ struct mwifiex_sta_node { u8 tdls_status; struct mwifiex_tdls_capab tdls_cap; struct mwifiex_station_stats stats; + u8 tx_pause; }; struct mwifiex_auto_tdls_peer { @@ -831,6 +837,7 @@ struct mwifiex_adapter { wait_queue_head_t init_wait_q; void *card; struct mwifiex_if_ops if_ops; + atomic_t bypass_tx_pending; atomic_t rx_pending; atomic_t tx_pending; atomic_t cmd_pending; @@ -979,6 +986,8 @@ struct mwifiex_adapter { u8 coex_win_size; u8 coex_tx_win_size; u8 coex_rx_win_size; + bool drcs_enabled; + u8 active_scan_triggered; }; void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter); @@ -1330,6 +1339,21 @@ static inline u8 mwifiex_is_any_intf_active(struct mwifiex_private *priv) return 0; } +static inline u8 mwifiex_is_tdls_link_setup(u8 status) +{ + switch (status) { + case TDLS_SETUP_COMPLETE: + case TDLS_CHAN_SWITCHING: + case TDLS_IN_BASE_CHAN: + case TDLS_IN_OFF_CHAN: + return true; + default: + break; + } + + return false; +} + int mwifiex_init_shutdown_fw(struct mwifiex_private *priv, u32 func_init_shutdown); int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8); @@ -1458,6 +1482,9 @@ struct mwifiex_sta_node * mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac); struct mwifiex_sta_node * mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac); +u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv); +u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv); +u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv); int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, const u8 *extra_ies, @@ -1488,6 +1515,13 @@ void mwifiex_check_auto_tdls(unsigned long context); void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac); void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv); void mwifiex_clean_auto_tdls(struct mwifiex_private *priv); +int mwifiex_config_tdls_enable(struct mwifiex_private *priv); +int mwifiex_config_tdls_disable(struct mwifiex_private *priv); +int mwifiex_config_tdls_cs_params(struct mwifiex_private *priv); +int mwifiex_stop_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac); +int mwifiex_start_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac, + u8 primary_chan, u8 second_chan_offset, u8 band); + int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, void *data_buf); @@ -1522,6 +1556,12 @@ void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags); void mwifiex_queue_main_work(struct mwifiex_adapter *adapter); void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter); void mwifiex_11n_delba(struct mwifiex_private *priv, int tid); +int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy); +void mwifiex_process_tx_pause_event(struct mwifiex_private *priv, + struct sk_buff *event); +void mwifiex_process_multi_chan_event(struct mwifiex_private *priv, + struct sk_buff *event_skb); + #ifdef CONFIG_DEBUG_FS void mwifiex_debugfs_init(void); void mwifiex_debugfs_remove(void); diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 01210fb0f..d186d2864 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -266,12 +266,17 @@ static const struct pci_device_id mwifiex_ids[] = { { PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - .driver_data = (unsigned long) &mwifiex_pcie8766, + .driver_data = (unsigned long)&mwifiex_pcie8766, }, { PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8897, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - .driver_data = (unsigned long) &mwifiex_pcie8897, + .driver_data = (unsigned long)&mwifiex_pcie8897, + }, + { + PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + .driver_data = (unsigned long)&mwifiex_pcie8997, }, {}, }; @@ -1082,6 +1087,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter) card->txbd_rdptr++; break; case PCIE_DEVICE_ID_MARVELL_88W8897: + case PCIE_DEVICE_ID_MARVELL_88W8997: card->txbd_rdptr += reg->ring_tx_start_ptr; break; } @@ -1179,6 +1185,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, card->txbd_wrptr++; break; case PCIE_DEVICE_ID_MARVELL_88W8897: + case PCIE_DEVICE_ID_MARVELL_88W8997: card->txbd_wrptr += reg->ring_tx_start_ptr; break; } @@ -1807,6 +1814,7 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, if (!card->evt_buf_list[rdptr]) { skb_push(skb, INTF_HEADER_LEN); + skb_put(skb, MAX_EVENT_SIZE - skb->len); if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE, PCI_DMA_FROMDEVICE)) diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h index d97ff27f6..56633c9f0 100644 --- a/drivers/net/wireless/mwifiex/pcie.h +++ b/drivers/net/wireless/mwifiex/pcie.h @@ -30,10 +30,12 @@ #define PCIE8766_DEFAULT_FW_NAME "/*(DEBLOBBED)*/" #define PCIE8897_DEFAULT_FW_NAME "/*(DEBLOBBED)*/" +#define PCIE8997_DEFAULT_FW_NAME "/*(DEBLOBBED)*/" #define PCIE_VENDOR_ID_MARVELL (0x11ab) #define PCIE_DEVICE_ID_MARVELL_88W8766P (0x2b30) #define PCIE_DEVICE_ID_MARVELL_88W8897 (0x2b38) +#define PCIE_DEVICE_ID_MARVELL_88W8997 (0x2b42) /* Constants for Buffer Descriptor (BD) rings */ #define MWIFIEX_MAX_TXRX_BD 0x20 @@ -197,7 +199,38 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = { .sleep_cookie = 0, .fw_dump_ctrl = 0xcf4, .fw_dump_start = 0xcf8, - .fw_dump_end = 0xcff + .fw_dump_end = 0xcff, +}; + +static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = { + .cmd_addr_lo = PCIE_SCRATCH_0_REG, + .cmd_addr_hi = PCIE_SCRATCH_1_REG, + .cmd_size = PCIE_SCRATCH_2_REG, + .fw_status = PCIE_SCRATCH_3_REG, + .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG, + .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG, + .tx_rdptr = 0xC1A4, + .tx_wrptr = 0xC1A8, + .rx_rdptr = 0xC1A8, + .rx_wrptr = 0xC1A4, + .evt_rdptr = PCIE_SCRATCH_10_REG, + .evt_wrptr = PCIE_SCRATCH_11_REG, + .drv_rdy = PCIE_SCRATCH_12_REG, + .tx_start_ptr = 16, + .tx_mask = 0x0FFF0000, + .tx_wrap_mask = 0x01FF0000, + .rx_mask = 0x00000FFF, + .rx_wrap_mask = 0x000001FF, + .tx_rollover_ind = BIT(28), + .rx_rollover_ind = BIT(12), + .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND, + .ring_flag_sop = MWIFIEX_BD_FLAG_SOP, + .ring_flag_eop = MWIFIEX_BD_FLAG_EOP, + .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP, + .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP, + .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR, + .pfu_enabled = 1, + .sleep_cookie = 0, }; struct mwifiex_pcie_device { @@ -227,6 +260,15 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = { .can_ext_scan = true, }; +static const struct mwifiex_pcie_device mwifiex_pcie8997 = { + .firmware = PCIE8997_DEFAULT_FW_NAME, + .reg = &mwifiex_reg_8997, + .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, + .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, + .can_dump_fw = false, + .can_ext_scan = true, +}; + struct mwifiex_evt_buf_desc { u64 paddr; u16 len; @@ -325,6 +367,7 @@ mwifiex_pcie_txbd_not_full(struct pcie_service_card *card) return 1; break; case PCIE_DEVICE_ID_MARVELL_88W8897: + case PCIE_DEVICE_ID_MARVELL_88W8997: if (((card->txbd_wrptr & reg->tx_mask) != (card->txbd_rdptr & reg->tx_mask)) || ((card->txbd_wrptr & reg->tx_rollover_ind) == diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index baf9715dd..278ec477f 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -527,7 +527,8 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv, if (ch->flags & IEEE80211_CHAN_NO_IR) scan_chan_list[chan_idx].chan_scan_mode_bitmap - |= MWIFIEX_PASSIVE_SCAN; + |= (MWIFIEX_PASSIVE_SCAN | + MWIFIEX_HIDDEN_SSID_REPORT); else scan_chan_list[chan_idx].chan_scan_mode_bitmap &= ~MWIFIEX_PASSIVE_SCAN; @@ -823,6 +824,7 @@ mwifiex_config_scan(struct mwifiex_private *priv, int i; u8 ssid_filter; struct mwifiex_ie_types_htcap *ht_cap; + struct mwifiex_ie_types_bss_mode *bss_mode; /* The tlv_buf_len is calculated for each scan command. The TLVs added in this routine will be preserved since the routine that sends the @@ -908,6 +910,10 @@ mwifiex_config_scan(struct mwifiex_private *priv, wildcard_ssid_tlv->max_ssid_length = IEEE80211_MAX_SSID_LEN; + if (!memcmp(user_scan_in->ssid_list[i].ssid, + "DIRECT-", 7)) + wildcard_ssid_tlv->max_ssid_length = 0xfe; + memcpy(wildcard_ssid_tlv->ssid, user_scan_in->ssid_list[i].ssid, ssid_len); @@ -968,6 +974,15 @@ mwifiex_config_scan(struct mwifiex_private *priv, else *max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD; + if (adapter->ext_scan) { + bss_mode = (struct mwifiex_ie_types_bss_mode *)tlv_pos; + bss_mode->header.type = cpu_to_le16(TLV_TYPE_BSS_MODE); + bss_mode->header.len = cpu_to_le16(sizeof(bss_mode->bss_mode)); + bss_mode->bss_mode = scan_cfg_out->bss_mode; + tlv_pos += sizeof(bss_mode->header) + + le16_to_cpu(bss_mode->header.len); + } + /* If the input config or adapter has the number of Probes set, add tlv */ if (num_probes) { @@ -1035,7 +1050,8 @@ mwifiex_config_scan(struct mwifiex_private *priv, if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE) (scan_chan_list + chan_idx)->chan_scan_mode_bitmap - |= MWIFIEX_PASSIVE_SCAN; + |= (MWIFIEX_PASSIVE_SCAN | + MWIFIEX_HIDDEN_SSID_REPORT); else (scan_chan_list + chan_idx)->chan_scan_mode_bitmap @@ -1586,6 +1602,62 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv, return ret; } +/* This function checks if SSID string contains all zeroes or length is zero */ +static bool mwifiex_is_hidden_ssid(struct cfg80211_ssid *ssid) +{ + int idx; + + for (idx = 0; idx < ssid->ssid_len; idx++) { + if (ssid->ssid[idx]) + return false; + } + + return true; +} + +/* This function checks if any hidden SSID found in passive scan channels + * and save those channels for specific SSID active scan + */ +static int mwifiex_save_hidden_ssid_channels(struct mwifiex_private *priv, + struct cfg80211_bss *bss) +{ + struct mwifiex_bssdescriptor *bss_desc; + int ret; + int chid; + + /* Allocate and fill new bss descriptor */ + bss_desc = kzalloc(sizeof(*bss_desc), GFP_KERNEL); + if (!bss_desc) + return -ENOMEM; + + ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc); + if (ret) + goto done; + + if (mwifiex_is_hidden_ssid(&bss_desc->ssid)) { + mwifiex_dbg(priv->adapter, INFO, "found hidden SSID\n"); + for (chid = 0 ; chid < MWIFIEX_USER_SCAN_CHAN_MAX; chid++) { + if (priv->hidden_chan[chid].chan_number == + bss->channel->hw_value) + break; + + if (!priv->hidden_chan[chid].chan_number) { + priv->hidden_chan[chid].chan_number = + bss->channel->hw_value; + priv->hidden_chan[chid].radio_type = + bss->channel->band; + priv->hidden_chan[chid].scan_type = + MWIFIEX_SCAN_TYPE_ACTIVE; + break; + } + } + } + +done: + kfree(bss_desc); + return 0; +} + static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv, struct cfg80211_bss *bss) { @@ -1775,6 +1847,14 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, .mac_address, ETH_ALEN)) mwifiex_update_curr_bss_params(priv, bss); cfg80211_put_bss(priv->wdev.wiphy, bss); + + if ((chan->flags & IEEE80211_CHAN_RADAR) || + (chan->flags & IEEE80211_CHAN_NO_IR)) { + mwifiex_dbg(adapter, INFO, + "radar or passive channel %d\n", + channel); + mwifiex_save_hidden_ssid_channels(priv, bss); + } } } else { mwifiex_dbg(adapter, WARN, "missing BSS channel IE\n"); @@ -1798,6 +1878,57 @@ static void mwifiex_complete_scan(struct mwifiex_private *priv) } } +/* This function checks if any hidden SSID found in passive scan channels + * and do specific SSID active scan for those channels + */ +static int +mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv) +{ + int ret; + struct mwifiex_adapter *adapter = priv->adapter; + u8 id = 0; + struct mwifiex_user_scan_cfg *user_scan_cfg; + + if (adapter->active_scan_triggered || !priv->scan_request) { + adapter->active_scan_triggered = false; + return 0; + } + + if (!priv->hidden_chan[0].chan_number) { + mwifiex_dbg(adapter, INFO, "No BSS with hidden SSID found on DFS channels\n"); + return 0; + } + user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL); + + if (!user_scan_cfg) + return -ENOMEM; + + memset(user_scan_cfg, 0, sizeof(*user_scan_cfg)); + + for (id = 0; id < MWIFIEX_USER_SCAN_CHAN_MAX; id++) { + if (!priv->hidden_chan[id].chan_number) + break; + memcpy(&user_scan_cfg->chan_list[id], + &priv->hidden_chan[id], + sizeof(struct mwifiex_user_scan_chan)); + } + + adapter->active_scan_triggered = true; + user_scan_cfg->num_ssids = priv->scan_request->n_ssids; + user_scan_cfg->ssid_list = priv->scan_request->ssids; + + ret = mwifiex_scan_networks(priv, user_scan_cfg); + kfree(user_scan_cfg); + + memset(&priv->hidden_chan, 0, sizeof(priv->hidden_chan)); + + if (ret) { + dev_err(priv->adapter->dev, "scan failed: %d\n", ret); + return ret; + } + + return 0; +} static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) { struct mwifiex_adapter *adapter = priv->adapter; @@ -1811,6 +1942,8 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) adapter->scan_processing = false; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); + mwifiex_active_scan_req_for_passive_chan(priv); + if (!adapter->ext_scan) mwifiex_complete_scan(priv); @@ -1837,15 +1970,17 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) adapter->scan_processing = false; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); - if (priv->scan_request) { - mwifiex_dbg(adapter, INFO, - "info: aborting scan\n"); - cfg80211_scan_done(priv->scan_request, 1); - priv->scan_request = NULL; - } else { - priv->scan_aborting = false; - mwifiex_dbg(adapter, INFO, - "info: scan already aborted\n"); + if (!adapter->active_scan_triggered) { + if (priv->scan_request) { + mwifiex_dbg(adapter, INFO, + "info: aborting scan\n"); + cfg80211_scan_done(priv->scan_request, 1); + priv->scan_request = NULL; + } else { + priv->scan_aborting = false; + mwifiex_dbg(adapter, INFO, + "info: scan already aborted\n"); + } } } else { /* Get scan command from scan_pending_q and put to diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index 03098cc5f..6ab76e2b5 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -51,6 +51,10 @@ static unsigned long iface_work_flags; static struct semaphore add_remove_card_sem; +static struct memory_type_mapping generic_mem_type_map[] = { + {"DUMP", NULL, 0, 0xDD}, +}; + static struct memory_type_mapping mem_type_mapping_tbl[] = { {"ITCM", NULL, 0, 0xF0}, {"DTCM", NULL, 0, 0xF1}, @@ -91,6 +95,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) return -ENOMEM; card->func = func; + card->device_id = id; func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; @@ -107,6 +112,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size; card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size; card->can_dump_fw = data->can_dump_fw; + card->fw_dump_enh = data->fw_dump_enh; card->can_auto_tdls = data->can_auto_tdls; card->can_ext_scan = data->can_ext_scan; } @@ -287,6 +293,8 @@ static int mwifiex_sdio_suspend(struct device *dev) #define SDIO_DEVICE_ID_MARVELL_8887 (0x9135) /* Device ID for SD8801 */ #define SDIO_DEVICE_ID_MARVELL_8801 (0x9139) +/* Device ID for SD8997 */ +#define SDIO_DEVICE_ID_MARVELL_8997 (0x9141) /* WLAN IDs */ @@ -303,6 +311,8 @@ static const struct sdio_device_id mwifiex_ids[] = { .driver_data = (unsigned long)&mwifiex_sdio_sd8887}, {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8801), .driver_data = (unsigned long)&mwifiex_sdio_sd8801}, + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997), + .driver_data = (unsigned long)&mwifiex_sdio_sd8997}, {}, }; @@ -910,6 +920,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, if (!fwbuf) return -ENOMEM; + sdio_claim_host(card->func); + /* Perform firmware data transfer */ do { /* The host polls for the DN_LD_CARD_RDY and CARD_IO_READY @@ -1014,6 +1026,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, offset += txlen; } while (true); + sdio_release_host(card->func); + mwifiex_dbg(adapter, MSG, "info: FW download over, size %d bytes\n", offset); @@ -1964,8 +1978,13 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) adapter->dev = &func->dev; strcpy(adapter->fw_name, card->firmware); - adapter->mem_type_mapping_tbl = mem_type_mapping_tbl; - adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl); + if (card->fw_dump_enh) { + adapter->mem_type_mapping_tbl = generic_mem_type_map; + adapter->num_mem_types = 1; + } else { + adapter->mem_type_mapping_tbl = mem_type_mapping_tbl; + adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl); + } return 0; } @@ -2107,26 +2126,46 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port) port, card->mp_data_port_mask); } +static void mwifiex_recreate_adapter(struct sdio_mmc_card *card) +{ + struct sdio_func *func = card->func; + const struct sdio_device_id *device_id = card->device_id; + + /* TODO mmc_hw_reset does not require destroying and re-probing the + * whole adapter. Hence there was no need to for this rube-goldberg + * design to reload the fw from an external workqueue. If we don't + * destroy the adapter we could reload the fw from + * mwifiex_main_work_queue directly. + * The real difficulty with fw reset is to restore all the user + * settings applied through ioctl. By destroying and recreating the + * adapter, we take the easy way out, since we rely on user space to + * restore them. We assume that user space will treat the new + * incarnation of the adapter(interfaces) as if they had been just + * discovered and initializes them from scratch. + */ + + mwifiex_sdio_remove(func); + + /* power cycle the adapter */ + sdio_claim_host(func); + mmc_hw_reset(func->card->host); + sdio_release_host(func); + + mwifiex_sdio_probe(func, device_id); +} + static struct mwifiex_adapter *save_adapter; static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) { struct sdio_mmc_card *card = adapter->card; - struct mmc_host *target = card->func->card->host; - - /* The actual reset operation must be run outside of driver thread. - * This is because mmc_remove_host() will cause the device to be - * instantly destroyed, and the driver then needs to end its thread, - * leading to a deadlock. - * - * We run it in a totally independent workqueue. - */ - mwifiex_dbg(adapter, WARN, "Resetting card...\n"); - mmc_remove_host(target); - /* 200ms delay is based on experiment with sdhci controller */ - mdelay(200); - target->rescan_entered = 0; /* rescan non-removable cards */ - mmc_add_host(target); + /* TODO card pointer is unprotected. If the adapter is removed + * physically, sdio core might trigger mwifiex_sdio_remove, before this + * workqueue is run, which will destroy the adapter struct. When this + * workqueue eventually exceutes it will dereference an invalid adapter + * pointer + */ + mwifiex_recreate_adapter(card); } /* This function read/write firmware */ @@ -2138,8 +2177,8 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter, int ret, tries; u8 ctrl_data = 0; - sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl, - &ret); + sdio_writeb(card->func, card->reg->fw_dump_host_ready, + card->reg->fw_dump_ctrl, &ret); if (ret) { mwifiex_dbg(adapter, ERROR, "SDIO Write ERR\n"); return RDWR_STATUS_FAILURE; @@ -2155,10 +2194,10 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter, break; if (doneflag && ctrl_data == doneflag) return RDWR_STATUS_DONE; - if (ctrl_data != FW_DUMP_HOST_READY) { + if (ctrl_data != card->reg->fw_dump_host_ready) { mwifiex_dbg(adapter, WARN, - "The ctrl reg was changed, re-try again!\n"); - sdio_writeb(card->func, FW_DUMP_HOST_READY, + "The ctrl reg was changed, re-try again\n"); + sdio_writeb(card->func, card->reg->fw_dump_host_ready, card->reg->fw_dump_ctrl, &ret); if (ret) { mwifiex_dbg(adapter, ERROR, "SDIO write err\n"); @@ -2167,7 +2206,7 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter, } usleep_range(100, 200); } - if (ctrl_data == FW_DUMP_HOST_READY) { + if (ctrl_data == card->reg->fw_dump_host_ready) { mwifiex_dbg(adapter, ERROR, "Fail to pull ctrl_data\n"); return RDWR_STATUS_FAILURE; @@ -2300,10 +2339,129 @@ done: sdio_release_host(card->func); } +static void mwifiex_sdio_generic_fw_dump(struct mwifiex_adapter *adapter) +{ + struct sdio_mmc_card *card = adapter->card; + struct memory_type_mapping *entry = &generic_mem_type_map[0]; + unsigned int reg, reg_start, reg_end; + u8 start_flag = 0, done_flag = 0; + u8 *dbg_ptr, *end_ptr; + enum rdwr_status stat; + int ret = -1, tries; + + if (!card->fw_dump_enh) + return; + + if (entry->mem_ptr) { + vfree(entry->mem_ptr); + entry->mem_ptr = NULL; + } + entry->mem_size = 0; + + mwifiex_pm_wakeup_card(adapter); + sdio_claim_host(card->func); + + mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n"); + + stat = mwifiex_sdio_rdwr_firmware(adapter, done_flag); + if (stat == RDWR_STATUS_FAILURE) + goto done; + + reg_start = card->reg->fw_dump_start; + reg_end = card->reg->fw_dump_end; + for (reg = reg_start; reg <= reg_end; reg++) { + for (tries = 0; tries < MAX_POLL_TRIES; tries++) { + start_flag = sdio_readb(card->func, reg, &ret); + if (ret) { + mwifiex_dbg(adapter, ERROR, + "SDIO read err\n"); + goto done; + } + if (start_flag == 0) + break; + if (tries == MAX_POLL_TRIES) { + mwifiex_dbg(adapter, ERROR, + "FW not ready to dump\n"); + ret = -1; + goto done; + } + } + usleep_range(100, 200); + } + + entry->mem_ptr = vmalloc(0xf0000 + 1); + if (!entry->mem_ptr) { + ret = -1; + goto done; + } + dbg_ptr = entry->mem_ptr; + entry->mem_size = 0xf0000; + end_ptr = dbg_ptr + entry->mem_size; + + done_flag = entry->done_flag; + mwifiex_dbg(adapter, DUMP, + "Start %s output, please wait...\n", entry->mem_name); + + while (true) { + stat = mwifiex_sdio_rdwr_firmware(adapter, done_flag); + if (stat == RDWR_STATUS_FAILURE) + goto done; + for (reg = reg_start; reg <= reg_end; reg++) { + *dbg_ptr = sdio_readb(card->func, reg, &ret); + if (ret) { + mwifiex_dbg(adapter, ERROR, + "SDIO read err\n"); + goto done; + } + dbg_ptr++; + if (dbg_ptr >= end_ptr) { + u8 *tmp_ptr; + + tmp_ptr = vmalloc(entry->mem_size + 0x4000 + 1); + if (!tmp_ptr) + goto done; + + memcpy(tmp_ptr, entry->mem_ptr, + entry->mem_size); + vfree(entry->mem_ptr); + entry->mem_ptr = tmp_ptr; + tmp_ptr = NULL; + dbg_ptr = entry->mem_ptr + entry->mem_size; + entry->mem_size += 0x4000; + end_ptr = entry->mem_ptr + entry->mem_size; + } + } + if (stat == RDWR_STATUS_DONE) { + entry->mem_size = dbg_ptr - entry->mem_ptr; + mwifiex_dbg(adapter, DUMP, "dump %s done size=0x%x\n", + entry->mem_name, entry->mem_size); + ret = 0; + break; + } + } + mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n"); + +done: + if (ret) { + mwifiex_dbg(adapter, ERROR, "firmware dump failed\n"); + if (entry->mem_ptr) { + vfree(entry->mem_ptr); + entry->mem_ptr = NULL; + } + entry->mem_size = 0; + } + sdio_release_host(card->func); +} + static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter) { + struct sdio_mmc_card *card = adapter->card; + mwifiex_drv_info_dump(adapter); - mwifiex_sdio_fw_dump(adapter); + if (card->fw_dump_enh) + mwifiex_sdio_generic_fw_dump(adapter); + else + mwifiex_sdio_fw_dump(adapter); mwifiex_upload_device_dump(adapter); } diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h index 06c956c0e..da70fde39 100644 --- a/drivers/net/wireless/mwifiex/sdio.h +++ b/drivers/net/wireless/mwifiex/sdio.h @@ -35,6 +35,7 @@ #define SD8897_DEFAULT_FW_NAME "/*(DEBLOBBED)*/" #define SD8887_DEFAULT_FW_NAME "/*(DEBLOBBED)*/" #define SD8801_DEFAULT_FW_NAME "/*(DEBLOBBED)*/" +#define SD8997_DEFAULT_FW_NAME "/*(DEBLOBBED)*/" #define BLOCK_MODE 1 #define BYTE_MODE 0 @@ -222,6 +223,7 @@ struct mwifiex_sdio_card_reg { u8 cmd_cfg_1; u8 cmd_cfg_2; u8 cmd_cfg_3; + u8 fw_dump_host_ready; u8 fw_dump_ctrl; u8 fw_dump_start; u8 fw_dump_end; @@ -257,11 +259,15 @@ struct sdio_mmc_card { bool supports_sdio_new_mode; bool has_control_mask; bool can_dump_fw; + bool fw_dump_enh; bool can_auto_tdls; bool can_ext_scan; struct mwifiex_sdio_mpa_tx mpa_tx; struct mwifiex_sdio_mpa_rx mpa_rx; + + /* needed for card reset */ + const struct sdio_device_id *device_id; }; struct mwifiex_sdio_device { @@ -275,6 +281,7 @@ struct mwifiex_sdio_device { bool supports_sdio_new_mode; bool has_control_mask; bool can_dump_fw; + bool fw_dump_enh; bool can_auto_tdls; bool can_ext_scan; }; @@ -350,6 +357,7 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = { .cmd_cfg_1 = 0xb9, .cmd_cfg_2 = 0xba, .cmd_cfg_3 = 0xbb, + .fw_dump_host_ready = 0xee, .fw_dump_ctrl = 0xe2, .fw_dump_start = 0xe3, .fw_dump_end = 0xea, @@ -361,6 +369,59 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = { 0x59, 0x5c, 0x5d}, }; +static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = { + .start_rd_port = 0, + .start_wr_port = 0, + .base_0_reg = 0xF8, + .base_1_reg = 0xF9, + .poll_reg = 0x5C, + .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK | + CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK, + .host_int_rsr_reg = 0x4, + .host_int_status_reg = 0x0C, + .host_int_mask_reg = 0x08, + .status_reg_0 = 0xE8, + .status_reg_1 = 0xE9, + .sdio_int_mask = 0xff, + .data_port_mask = 0xffffffff, + .io_port_0_reg = 0xE4, + .io_port_1_reg = 0xE5, + .io_port_2_reg = 0xE6, + .max_mp_regs = 196, + .rd_bitmap_l = 0x10, + .rd_bitmap_u = 0x11, + .rd_bitmap_1l = 0x12, + .rd_bitmap_1u = 0x13, + .wr_bitmap_l = 0x14, + .wr_bitmap_u = 0x15, + .wr_bitmap_1l = 0x16, + .wr_bitmap_1u = 0x17, + .rd_len_p0_l = 0x18, + .rd_len_p0_u = 0x19, + .card_misc_cfg_reg = 0xd8, + .card_cfg_2_1_reg = 0xd9, + .cmd_rd_len_0 = 0xc0, + .cmd_rd_len_1 = 0xc1, + .cmd_rd_len_2 = 0xc2, + .cmd_rd_len_3 = 0xc3, + .cmd_cfg_0 = 0xc4, + .cmd_cfg_1 = 0xc5, + .cmd_cfg_2 = 0xc6, + .cmd_cfg_3 = 0xc7, + .fw_dump_host_ready = 0xcc, + .fw_dump_ctrl = 0xf0, + .fw_dump_start = 0xf1, + .fw_dump_end = 0xf8, + .func1_dump_reg_start = 0x10, + .func1_dump_reg_end = 0x17, + .func1_scratch_reg = 0xe8, + .func1_spec_reg_num = 13, + .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D, + 0x60, 0x61, 0x62, 0x64, + 0x65, 0x66, 0x68, 0x69, + 0x6a}, +}; + static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = { .start_rd_port = 0, .start_wr_port = 0, @@ -469,6 +530,22 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { .can_ext_scan = true, }; +static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = { + .firmware = SD8997_DEFAULT_FW_NAME, + .reg = &mwifiex_reg_sd8997, + .max_ports = 32, + .mp_agg_pkt_limit = 16, + .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, + .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX, + .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX, + .supports_sdio_new_mode = true, + .has_control_mask = false, + .can_dump_fw = true, + .fw_dump_enh = true, + .can_auto_tdls = false, + .can_ext_scan = true, +}; + static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = { .firmware = SD8887_DEFAULT_FW_NAME, .reg = &mwifiex_reg_sd8887, diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index 037adcd1f..a49a80dd7 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -26,6 +26,10 @@ #include "11n.h" #include "11ac.h" +static bool drcs; +module_param(drcs, bool, 0644); +MODULE_PARM_DESC(drcs, "multi-channel operation:1, single-channel operation:0"); + static bool disable_auto_ds; module_param(disable_auto_ds, bool, 0); MODULE_PARM_DESC(disable_auto_ds, @@ -1511,6 +1515,22 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv, return 0; } +static int +mwifiex_cmd_set_mc_policy(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, + u16 cmd_action, void *data_buf) +{ + struct host_cmd_ds_multi_chan_policy *mc_pol = &cmd->params.mc_policy; + const u16 *drcs_info = data_buf; + + mc_pol->action = cpu_to_le16(cmd_action); + mc_pol->policy = cpu_to_le16(*drcs_info); + cmd->command = cpu_to_le16(HostCmd_CMD_MC_POLICY); + cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_multi_chan_policy) + + S_DS_GEN); + return 0; +} + static int mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, @@ -1575,6 +1595,50 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv, return 0; } +static int +mwifiex_cmd_tdls_config(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, + u16 cmd_action, void *data_buf) +{ + struct host_cmd_ds_tdls_config *tdls_config = &cmd->params.tdls_config; + struct mwifiex_tdls_init_cs_params *config; + struct mwifiex_tdls_config *init_config; + u16 len; + + cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_CONFIG); + cmd->size = cpu_to_le16(S_DS_GEN); + tdls_config->tdls_action = cpu_to_le16(cmd_action); + le16_add_cpu(&cmd->size, sizeof(tdls_config->tdls_action)); + + switch (cmd_action) { + case ACT_TDLS_CS_ENABLE_CONFIG: + init_config = data_buf; + len = sizeof(*init_config); + memcpy(tdls_config->tdls_data, init_config, len); + break; + case ACT_TDLS_CS_INIT: + config = data_buf; + len = sizeof(*config); + memcpy(tdls_config->tdls_data, config, len); + break; + case ACT_TDLS_CS_STOP: + len = sizeof(struct mwifiex_tdls_stop_cs_params); + memcpy(tdls_config->tdls_data, data_buf, len); + break; + case ACT_TDLS_CS_PARAMS: + len = sizeof(struct mwifiex_tdls_config_cs_params); + memcpy(tdls_config->tdls_data, data_buf, len); + break; + default: + mwifiex_dbg(priv->adapter, ERROR, + "Unknown TDLS configuration\n"); + return -ENOTSUPP; + } + + le16_add_cpu(&cmd->size, len); + return 0; +} + static int mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, @@ -1933,10 +1997,12 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, if (priv->bss_mode == NL80211_IFTYPE_ADHOC) cmd_ptr->params.bss_mode.con_type = CONNECTION_TYPE_ADHOC; - else if (priv->bss_mode == NL80211_IFTYPE_STATION) + else if (priv->bss_mode == NL80211_IFTYPE_STATION || + priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) cmd_ptr->params.bss_mode.con_type = CONNECTION_TYPE_INFRA; - else if (priv->bss_mode == NL80211_IFTYPE_AP) + else if (priv->bss_mode == NL80211_IFTYPE_AP || + priv->bss_mode == NL80211_IFTYPE_P2P_GO) cmd_ptr->params.bss_mode.con_type = CONNECTION_TYPE_AP; cmd_ptr->size = cpu_to_le16(sizeof(struct host_cmd_ds_set_bss_mode) + S_DS_GEN); @@ -1958,6 +2024,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, case HostCmd_CMD_TDLS_OPER: ret = mwifiex_cmd_tdls_oper(priv, cmd_ptr, data_buf); break; + case HostCmd_CMD_TDLS_CONFIG: + ret = mwifiex_cmd_tdls_config(priv, cmd_ptr, cmd_action, + data_buf); + break; case HostCmd_CMD_CHAN_REPORT_REQUEST: ret = mwifiex_cmd_issue_chan_report_request(priv, cmd_ptr, data_buf); @@ -1966,6 +2036,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action, data_buf); break; + case HostCmd_CMD_MC_POLICY: + ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action, + data_buf); + break; default: mwifiex_dbg(priv->adapter, ERROR, "PREP_CMD: unknown cmd- %#x\n", cmd_no); @@ -2082,6 +2156,18 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) if (ret) return -1; } + + if (drcs) { + adapter->drcs_enabled = true; + if (ISSUPP_DRCS_ENABLED(adapter->fw_cap_info)) + ret = mwifiex_send_cmd(priv, + HostCmd_CMD_MC_POLICY, + HostCmd_ACT_GEN_SET, 0, + &adapter->drcs_enabled, + true); + if (ret) + return -1; + } } /* get tx rate */ diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index b645884b3..87b69d8ad 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -599,6 +599,7 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv, "info: key: GTK is set\n"); priv->wpa_is_gtk_set = true; priv->scan_block = false; + priv->port_open = true; } } @@ -629,6 +630,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n"); priv->wpa_is_gtk_set = true; priv->scan_block = false; + priv->port_open = true; } } @@ -893,7 +895,7 @@ static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv, case ACT_TDLS_DELETE: if (reason) { if (!node || reason == TDLS_ERR_LINK_NONEXISTENT) - mwifiex_dbg(priv->adapter, ERROR, + mwifiex_dbg(priv->adapter, MSG, "TDLS link delete for %pM failed: reason %d\n", cmd_tdls_oper->peer_mac, reason); else @@ -1191,12 +1193,15 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, break; case HostCmd_CMD_TDLS_OPER: ret = mwifiex_ret_tdls_oper(priv, resp); + case HostCmd_CMD_MC_POLICY: break; case HostCmd_CMD_CHAN_REPORT_REQUEST: break; case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG: ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp); break; + case HostCmd_CMD_TDLS_CONFIG: + break; default: mwifiex_dbg(adapter, ERROR, "CMD_RESP: unknown cmd response %#x\n", diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c index 848de2621..3d18c585e 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/mwifiex/sta_event.c @@ -54,6 +54,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) priv->media_connected = false; priv->scan_block = false; + priv->port_open = false; if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) { @@ -153,6 +154,7 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv, struct mwifiex_sta_node *sta_ptr; struct mwifiex_tdls_generic_event *tdls_evt = (void *)event_skb->data + sizeof(adapter->event_cause); + u8 *mac = tdls_evt->peer_mac; /* reserved 2 bytes are not mandatory in tdls event */ if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) - @@ -175,6 +177,59 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv, le16_to_cpu(tdls_evt->u.reason_code), GFP_KERNEL); break; + case TDLS_EVENT_CHAN_SWITCH_RESULT: + mwifiex_dbg(adapter, EVENT, "tdls channel switch result :\n"); + mwifiex_dbg(adapter, EVENT, + "status=0x%x, reason=0x%x cur_chan=%d\n", + tdls_evt->u.switch_result.status, + tdls_evt->u.switch_result.reason, + tdls_evt->u.switch_result.cur_chan); + + /* tdls channel switch failed */ + if (tdls_evt->u.switch_result.status != 0) { + switch (tdls_evt->u.switch_result.cur_chan) { + case TDLS_BASE_CHANNEL: + sta_ptr->tdls_status = TDLS_IN_BASE_CHAN; + break; + case TDLS_OFF_CHANNEL: + sta_ptr->tdls_status = TDLS_IN_OFF_CHAN; + break; + default: + break; + } + return ret; + } + + /* tdls channel switch success */ + switch (tdls_evt->u.switch_result.cur_chan) { + case TDLS_BASE_CHANNEL: + if (sta_ptr->tdls_status == TDLS_IN_BASE_CHAN) + break; + mwifiex_update_ralist_tx_pause_in_tdls_cs(priv, mac, + false); + sta_ptr->tdls_status = TDLS_IN_BASE_CHAN; + break; + case TDLS_OFF_CHANNEL: + if (sta_ptr->tdls_status == TDLS_IN_OFF_CHAN) + break; + mwifiex_update_ralist_tx_pause_in_tdls_cs(priv, mac, + true); + sta_ptr->tdls_status = TDLS_IN_OFF_CHAN; + break; + default: + break; + } + + break; + case TDLS_EVENT_START_CHAN_SWITCH: + mwifiex_dbg(adapter, EVENT, "tdls start channel switch...\n"); + sta_ptr->tdls_status = TDLS_CHAN_SWITCHING; + break; + case TDLS_EVENT_CHAN_SWITCH_STOPPED: + mwifiex_dbg(adapter, EVENT, + "tdls chan switch stopped, reason=%d\n", + tdls_evt->u.cs_stop_reason); + break; default: break; } @@ -182,6 +237,145 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv, return ret; } +static void mwifiex_process_uap_tx_pause(struct mwifiex_private *priv, + struct mwifiex_ie_types_header *tlv) +{ + struct mwifiex_tx_pause_tlv *tp; + struct mwifiex_sta_node *sta_ptr; + unsigned long flags; + + tp = (void *)tlv; + mwifiex_dbg(priv->adapter, EVENT, + "uap tx_pause: %pM pause=%d, pkts=%d\n", + tp->peermac, tp->tx_pause, + tp->pkt_cnt); + + if (ether_addr_equal(tp->peermac, priv->netdev->dev_addr)) { + if (tp->tx_pause) + priv->port_open = false; + else + priv->port_open = true; + } else if (is_multicast_ether_addr(tp->peermac)) { + mwifiex_update_ralist_tx_pause(priv, tp->peermac, tp->tx_pause); + } else { + spin_lock_irqsave(&priv->sta_list_spinlock, flags); + sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac); + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + + if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) { + sta_ptr->tx_pause = tp->tx_pause; + mwifiex_update_ralist_tx_pause(priv, tp->peermac, + tp->tx_pause); + } + } +} + +static void mwifiex_process_sta_tx_pause(struct mwifiex_private *priv, + struct mwifiex_ie_types_header *tlv) +{ + struct mwifiex_tx_pause_tlv *tp; + struct mwifiex_sta_node *sta_ptr; + int status; + unsigned long flags; + + tp = (void *)tlv; + mwifiex_dbg(priv->adapter, EVENT, + "sta tx_pause: %pM pause=%d, pkts=%d\n", + tp->peermac, tp->tx_pause, + tp->pkt_cnt); + + if (ether_addr_equal(tp->peermac, priv->cfg_bssid)) { + if (tp->tx_pause) + priv->port_open = false; + else + priv->port_open = true; + } else { + if (!ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) + return; + + status = mwifiex_get_tdls_link_status(priv, tp->peermac); + if (mwifiex_is_tdls_link_setup(status)) { + spin_lock_irqsave(&priv->sta_list_spinlock, flags); + sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac); + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + + if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) { + sta_ptr->tx_pause = tp->tx_pause; + mwifiex_update_ralist_tx_pause(priv, + tp->peermac, + tp->tx_pause); + } + } + } +} + +void mwifiex_process_multi_chan_event(struct mwifiex_private *priv, + struct sk_buff *event_skb) +{ + struct mwifiex_ie_types_multi_chan_info *chan_info; + u16 status; + + chan_info = (void *)event_skb->data + sizeof(u32); + + if (le16_to_cpu(chan_info->header.type) != TLV_TYPE_MULTI_CHAN_INFO) { + mwifiex_dbg(priv->adapter, ERROR, + "unknown TLV in chan_info event\n"); + return; + } + + status = le16_to_cpu(chan_info->status); + + if (status) { + mwifiex_dbg(priv->adapter, EVENT, + "multi-channel operation started\n"); + } else { + mwifiex_dbg(priv->adapter, EVENT, + "multi-channel operation over\n"); + } +} + +void mwifiex_process_tx_pause_event(struct mwifiex_private *priv, + struct sk_buff *event_skb) +{ + struct mwifiex_ie_types_header *tlv; + u16 tlv_type, tlv_len; + int tlv_buf_left; + + if (!priv->media_connected) { + mwifiex_dbg(priv->adapter, ERROR, + "tx_pause event while disconnected; bss_role=%d\n", + priv->bss_role); + return; + } + + tlv_buf_left = event_skb->len - sizeof(u32); + tlv = (void *)event_skb->data + sizeof(u32); + + while (tlv_buf_left >= (int)sizeof(struct mwifiex_ie_types_header)) { + tlv_type = le16_to_cpu(tlv->type); + tlv_len = le16_to_cpu(tlv->len); + if ((sizeof(struct mwifiex_ie_types_header) + tlv_len) > + tlv_buf_left) { + mwifiex_dbg(priv->adapter, ERROR, + "wrong tlv: tlvLen=%d, tlvBufLeft=%d\n", + tlv_len, tlv_buf_left); + break; + } + if (tlv_type == TLV_TYPE_TX_PAUSE) { + if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) + mwifiex_process_sta_tx_pause(priv, tlv); + else + mwifiex_process_uap_tx_pause(priv, tlv); + } + + tlv_buf_left -= sizeof(struct mwifiex_ie_types_header) + + tlv_len; + tlv = (void *)((u8 *)tlv + tlv_len + + sizeof(struct mwifiex_ie_types_header)); + } + +} + /* * This function handles coex events generated by firmware */ @@ -359,7 +553,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_PS_AWAKE: mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n"); - if (!adapter->pps_uapsd_mode && + if (!adapter->pps_uapsd_mode && priv->port_open && priv->media_connected && adapter->sleep_period.period) { adapter->pps_uapsd_mode = true; mwifiex_dbg(adapter, EVENT, @@ -438,6 +632,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_PORT_RELEASE: mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n"); + priv->port_open = true; break; case EVENT_EXT_SCAN_REPORT: @@ -573,6 +768,16 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) ret = mwifiex_parse_tdls_event(priv, adapter->event_skb); break; + case EVENT_TX_DATA_PAUSE: + mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n"); + mwifiex_process_tx_pause_event(priv, adapter->event_skb); + break; + + case EVENT_MULTI_CHAN_INFO: + mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n"); + mwifiex_process_multi_chan_event(priv, adapter->event_skb); + break; + case EVENT_TX_STATUS_REPORT: mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n"); mwifiex_parse_tx_status_event(priv, adapter->event_body); diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index d8b7d9c20..a6c8a4f7b 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -66,8 +66,8 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter, if (status <= 0) { if (status == 0) status = -ETIMEDOUT; - mwifiex_dbg(adapter, ERROR, - "cmd_wait_q terminated: %d\n", status); + mwifiex_dbg(adapter, ERROR, "cmd_wait_q terminated: %d\n", + status); mwifiex_cancel_all_pending_cmd(adapter); return status; } diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c index 2faa1bc42..b3e163de9 100644 --- a/drivers/net/wireless/mwifiex/tdls.c +++ b/drivers/net/wireless/mwifiex/tdls.c @@ -49,7 +49,7 @@ static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv, tid = skb->priority; tid_down = mwifiex_wmm_downgrade_tid(priv, tid); - if (status == TDLS_SETUP_COMPLETE) { + if (mwifiex_is_tdls_link_setup(status)) { ra_list = mwifiex_wmm_get_queue_raptr(priv, tid, mac); ra_list->tdls_link = true; tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT; @@ -164,7 +164,7 @@ static void mwifiex_tdls_add_aid(struct mwifiex_private *priv, pos = (void *)skb_put(skb, 4); *pos++ = WLAN_EID_AID; *pos++ = 2; - *pos++ = le16_to_cpu(assoc_rsp->a_id); + memcpy(pos, &assoc_rsp->a_id, sizeof(assoc_rsp->a_id)); return; } @@ -355,6 +355,7 @@ static void mwifiex_tdls_add_ext_capab(struct mwifiex_private *priv, extcap->ieee_hdr.len = 8; memset(extcap->ext_capab, 0, 8); extcap->ext_capab[4] |= WLAN_EXT_CAPA5_TDLS_ENABLED; + extcap->ext_capab[3] |= WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH; if (priv->adapter->is_hw_11ac_capable) extcap->ext_capab[7] |= WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED; @@ -1071,6 +1072,11 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer) for (i = 0; i < MAX_NUM_TID; i++) sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED; } + if (sta_ptr->tdls_cap.extcap.ext_capab[3] & + WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH) { + mwifiex_config_tdls_enable(priv); + mwifiex_config_tdls_cs_params(priv); + } memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq)); mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE); @@ -1141,7 +1147,7 @@ int mwifiex_get_tdls_list(struct mwifiex_private *priv, spin_lock_irqsave(&priv->sta_list_spinlock, flags); list_for_each_entry(sta_ptr, &priv->sta_list, list) { - if (sta_ptr->tdls_status == TDLS_SETUP_COMPLETE) { + if (mwifiex_is_tdls_link_setup(sta_ptr->tdls_status)) { ether_addr_copy(peer->peer_addr, sta_ptr->mac_addr); peer++; count++; @@ -1295,7 +1301,7 @@ void mwifiex_auto_tdls_update_peer_status(struct mwifiex_private *priv, if ((link_status == TDLS_NOT_SETUP) && (peer->tdls_status == TDLS_SETUP_INPROGRESS)) peer->failure_count++; - else if (link_status == TDLS_SETUP_COMPLETE) + else if (mwifiex_is_tdls_link_setup(link_status)) peer->failure_count = 0; peer->tdls_status = link_status; @@ -1367,7 +1373,7 @@ void mwifiex_check_auto_tdls(unsigned long context) if (((tdls_peer->rssi >= MWIFIEX_TDLS_RSSI_LOW) || !tdls_peer->rssi) && - tdls_peer->tdls_status == TDLS_SETUP_COMPLETE) { + mwifiex_is_tdls_link_setup(tdls_peer->tdls_status)) { tdls_peer->tdls_status = TDLS_LINK_TEARDOWN; mwifiex_dbg(priv->adapter, MSG, "teardown TDLS link,peer=%pM rssi=%d\n", @@ -1416,3 +1422,67 @@ void mwifiex_clean_auto_tdls(struct mwifiex_private *priv) mwifiex_flush_auto_tdls_list(priv); } } + +static int mwifiex_config_tdls(struct mwifiex_private *priv, u8 enable) +{ + struct mwifiex_tdls_config config; + + config.enable = cpu_to_le16(enable); + return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG, + ACT_TDLS_CS_ENABLE_CONFIG, 0, &config, true); +} + +int mwifiex_config_tdls_enable(struct mwifiex_private *priv) +{ + return mwifiex_config_tdls(priv, true); +} + +int mwifiex_config_tdls_disable(struct mwifiex_private *priv) +{ + return mwifiex_config_tdls(priv, false); +} + +int mwifiex_config_tdls_cs_params(struct mwifiex_private *priv) +{ + struct mwifiex_tdls_config_cs_params config_tdls_cs_params; + + config_tdls_cs_params.unit_time = MWIFIEX_DEF_CS_UNIT_TIME; + config_tdls_cs_params.thr_otherlink = MWIFIEX_DEF_CS_THR_OTHERLINK; + config_tdls_cs_params.thr_directlink = MWIFIEX_DEF_THR_DIRECTLINK; + + return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG, + ACT_TDLS_CS_PARAMS, 0, + &config_tdls_cs_params, true); +} + +int mwifiex_stop_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac) +{ + struct mwifiex_tdls_stop_cs_params stop_tdls_cs_params; + + ether_addr_copy(stop_tdls_cs_params.peer_mac, peer_mac); + + return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG, + ACT_TDLS_CS_STOP, 0, + &stop_tdls_cs_params, true); +} + +int mwifiex_start_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac, + u8 primary_chan, u8 second_chan_offset, u8 band) +{ + struct mwifiex_tdls_init_cs_params start_tdls_cs_params; + + ether_addr_copy(start_tdls_cs_params.peer_mac, peer_mac); + start_tdls_cs_params.primary_chan = primary_chan; + start_tdls_cs_params.second_chan_offset = second_chan_offset; + start_tdls_cs_params.band = band; + + start_tdls_cs_params.switch_time = cpu_to_le16(MWIFIEX_DEF_CS_TIME); + start_tdls_cs_params.switch_timeout = + cpu_to_le16(MWIFIEX_DEF_CS_TIMEOUT); + start_tdls_cs_params.reg_class = MWIFIEX_DEF_CS_REG_CLASS; + start_tdls_cs_params.periodicity = MWIFIEX_DEF_CS_PERIODICITY; + + return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG, + ACT_TDLS_CS_INIT, 0, + &start_tdls_cs_params, true); +} diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c index 5ed9b7940..8b1e5b5d4 100644 --- a/drivers/net/wireless/mwifiex/txrx.c +++ b/drivers/net/wireless/mwifiex/txrx.c @@ -370,8 +370,28 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv, /* consumes ack_skb */ skb_complete_wifi_ack(ack_skb, !tx_status->status); } else { + /* Remove broadcast address which was added by driver */ + memmove(ack_skb->data + + sizeof(struct ieee80211_hdr_3addr) + + MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16), + ack_skb->data + + sizeof(struct ieee80211_hdr_3addr) + + MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16) + + ETH_ALEN, ack_skb->len - + (sizeof(struct ieee80211_hdr_3addr) + + MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16) + + ETH_ALEN)); + ack_skb->len = ack_skb->len - ETH_ALEN; + /* Remove driver's proprietary header including 2 bytes + * of packet length and pass actual management frame buffer + * to cfg80211. + */ cfg80211_mgmt_tx_status(&priv->wdev, tx_info->cookie, - ack_skb->data, ack_skb->len, + ack_skb->data + + MWIFIEX_MGMT_FRAME_HEADER_SIZE + + sizeof(u16), ack_skb->len - + (MWIFIEX_MGMT_FRAME_HEADER_SIZE + + sizeof(u16)), !tx_status->status, GFP_ATOMIC); dev_kfree_skb_any(ack_skb); } diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c index b74930054..4d5a6e3b6 100644 --- a/drivers/net/wireless/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/mwifiex/uap_cmd.c @@ -808,7 +808,7 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_chan_def chandef) { - u8 config_bands = 0; + u8 config_bands = 0, old_bands = priv->adapter->config_bands; priv->bss_chandef = chandef; @@ -834,6 +834,11 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv, } priv->adapter->config_bands = config_bands; + + if (old_bands != config_bands) { + mwifiex_send_domain_info_cmd_fw(priv->adapter->wiphy); + mwifiex_dnld_txpwr_table(priv); + } } int mwifiex_config_start_uap(struct mwifiex_private *priv, diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c index 7bc1f850e..46c972a65 100644 --- a/drivers/net/wireless/mwifiex/uap_event.c +++ b/drivers/net/wireless/mwifiex/uap_event.c @@ -41,6 +41,8 @@ static int mwifiex_check_uap_capabilties(struct mwifiex_private *priv, mwifiex_dbg_dump(priv->adapter, EVT_D, "uap capabilties:", event->data, event->len); + skb_push(event, MWIFIEX_BSS_START_EVT_FIX_SIZE); + while ((evt_len >= sizeof(tlv_hdr->header))) { tlv_hdr = (struct mwifiex_ie_types_data *)curr; tlv_len = le16_to_cpu(tlv_hdr->header.len); @@ -176,6 +178,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) break; case EVENT_UAP_BSS_IDLE: priv->media_connected = false; + priv->port_open = false; if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); mwifiex_stop_net_dev_queue(priv->netdev, adapter); @@ -185,6 +188,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) break; case EVENT_UAP_BSS_ACTIVE: priv->media_connected = true; + priv->port_open = true; if (!netif_carrier_ok(priv->netdev)) netif_carrier_on(priv->netdev); mwifiex_wake_up_net_dev_queue(priv->netdev, adapter); @@ -192,6 +196,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) case EVENT_UAP_BSS_START: mwifiex_dbg(adapter, EVENT, "AP EVENT: event id: %#x\n", eventcause); + priv->port_open = false; memcpy(priv->netdev->dev_addr, adapter->event_body + 2, ETH_ALEN); if (priv->hist_data) @@ -297,6 +302,16 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) mwifiex_bt_coex_wlan_param_update_event(priv, adapter->event_skb); break; + case EVENT_TX_DATA_PAUSE: + mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n"); + mwifiex_process_tx_pause_event(priv, adapter->event_skb); + break; + + case EVENT_MULTI_CHAN_INFO: + mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n"); + mwifiex_process_multi_chan_event(priv, adapter->event_skb); + break; + default: mwifiex_dbg(adapter, EVENT, "event: unknown event id: %#x\n", eventcause); diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index 539c7aba4..a43934053 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c @@ -47,6 +47,11 @@ static struct usb_device_id mwifiex_usb_table[] = { {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2, USB_CLASS_VENDOR_SPEC, USB_SUBCLASS_VENDOR_SPEC, 0xff)}, + /* 8997 */ + {USB_DEVICE(USB8XXX_VID, USB8997_PID_1)}, + {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8997_PID_2, + USB_CLASS_VENDOR_SPEC, + USB_SUBCLASS_VENDOR_SPEC, 0xff)}, { } /* Terminating entry */ }; @@ -244,9 +249,11 @@ setup_for_next: if (card->rx_cmd_ep == context->ep) { mwifiex_usb_submit_rx_urb(context, size); } else { - context->skb = NULL; - if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING) + if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING){ mwifiex_usb_submit_rx_urb(context, size); + }else{ + context->skb = NULL; + } } return; @@ -380,12 +387,14 @@ static int mwifiex_usb_probe(struct usb_interface *intf, case USB8797_PID_1: case USB8801_PID_1: case USB8897_PID_1: + case USB8997_PID_1: card->usb_boot_state = USB8XXX_FW_DNLD; break; case USB8766_PID_2: case USB8797_PID_2: case USB8801_PID_2: case USB8897_PID_2: + case USB8997_PID_2: card->usb_boot_state = USB8XXX_FW_READY; break; default: @@ -812,6 +821,12 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) adapter->dev = &card->udev->dev; switch (le16_to_cpu(card->udev->descriptor.idProduct)) { + case USB8997_PID_1: + case USB8997_PID_2: + adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K; + strcpy(adapter->fw_name, USB8997_DEFAULT_FW_NAME); + adapter->ext_scan = true; + break; case USB8897_PID_1: case USB8897_PID_2: adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K; @@ -868,8 +883,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, /* Allocate memory for transmit */ fwdata = kzalloc(FW_DNLD_TX_BUF_SIZE, GFP_KERNEL); - if (!fwdata) + if (!fwdata) { + ret = -ENOMEM; goto fw_exit; + } /* Allocate memory for receive */ recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL); diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h index 8499587bd..a6dbf4e08 100644 --- a/drivers/net/wireless/mwifiex/usb.h +++ b/drivers/net/wireless/mwifiex/usb.h @@ -32,6 +32,8 @@ #define USB8897_PID_2 0x2046 #define USB8801_PID_1 0x2049 #define USB8801_PID_2 0x204a +#define USB8997_PID_1 0x204d +#define USB8997_PID_2 0x204e #define USB8XXX_FW_DNLD 1 @@ -46,6 +48,7 @@ #define USB8797_DEFAULT_FW_NAME "/*(DEBLOBBED)*/" #define USB8801_DEFAULT_FW_NAME "/*(DEBLOBBED)*/" #define USB8897_DEFAULT_FW_NAME "/*(DEBLOBBED)*/" +#define USB8997_DEFAULT_FW_NAME "/*(DEBLOBBED)*/" #define FW_DNLD_TX_BUF_SIZE 620 #define FW_DNLD_RX_BUF_SIZE 2048 diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c index 790e61953..0cec8a644 100644 --- a/drivers/net/wireless/mwifiex/util.c +++ b/drivers/net/wireless/mwifiex/util.c @@ -126,6 +126,10 @@ static int num_of_items = ARRAY_SIZE(items); int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter) { + if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) + if (adapter->if_ops.init_fw_port) + adapter->if_ops.init_fw_port(adapter); + adapter->init_wait_q_woken = true; wake_up_interruptible(&adapter->init_wait_q); return 0; @@ -496,16 +500,12 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb) int mwifiex_complete_cmd(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node) { - mwifiex_dbg(adapter, CMD, - "cmd completed: status=%d\n", + WARN_ON(!cmd_node->wait_q_enabled); + mwifiex_dbg(adapter, CMD, "cmd completed: status=%d\n", adapter->cmd_wait_q.status); - *(cmd_node->condition) = true; - - if (adapter->cmd_wait_q.status == -ETIMEDOUT) - mwifiex_dbg(adapter, ERROR, "cmd timeout\n"); - else - wake_up_interruptible(&adapter->cmd_wait_q.wait); + *cmd_node->condition = true; + wake_up_interruptible(&adapter->cmd_wait_q.wait); return 0; } @@ -531,6 +531,65 @@ mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac) return NULL; } +static struct mwifiex_sta_node * +mwifiex_get_tdls_sta_entry(struct mwifiex_private *priv, u8 status) +{ + struct mwifiex_sta_node *node; + + list_for_each_entry(node, &priv->sta_list, list) { + if (node->tdls_status == status) + return node; + } + + return NULL; +} + +/* If tdls channel switching is on-going, tx data traffic should be + * blocked until the switching stage completed. + */ +u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv) +{ + struct mwifiex_sta_node *sta_ptr; + + if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) + return false; + + sta_ptr = mwifiex_get_tdls_sta_entry(priv, TDLS_CHAN_SWITCHING); + if (sta_ptr) + return true; + + return false; +} + +u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv) +{ + struct mwifiex_sta_node *sta_ptr; + + if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) + return false; + + sta_ptr = mwifiex_get_tdls_sta_entry(priv, TDLS_IN_OFF_CHAN); + if (sta_ptr) + return true; + + return false; +} + +/* If tdls channel switching is on-going or tdls operate on off-channel, + * cmd path should be blocked until tdls switched to base-channel. + */ +u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv) +{ + if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) + return true; + + if (mwifiex_is_tdls_chan_switching(priv) || + mwifiex_is_tdls_off_chan(priv)) + return false; + + return true; +} + /* This function will add a sta_node entry to associated station list * table with the given mac address. * If entry exist already, existing entry is returned. diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index a8ea21c33..173d3663c 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -160,9 +160,10 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra) ra_list->tdls_link = false; ra_list->ba_status = BA_SETUP_NONE; ra_list->amsdu_in_ampdu = false; + ra_list->tx_paused = false; if (!mwifiex_queuing_ra_based(priv)) { - if (mwifiex_get_tdls_link_status(priv, ra) == - TDLS_SETUP_COMPLETE) { + if (mwifiex_is_tdls_link_setup + (mwifiex_get_tdls_link_status(priv, ra))) { ra_list->tdls_link = true; ra_list->is_11n_enabled = mwifiex_tdls_peer_11n_enabled(priv, ra); @@ -448,6 +449,11 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter) } } +int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter) +{ + return atomic_read(&adapter->bypass_tx_pending) ? false : true; +} + /* * This function checks if WMM Tx queue is empty. */ @@ -459,6 +465,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter) for (i = 0; i < adapter->priv_num; ++i) { priv = adapter->priv[i]; + if (priv && !priv->port_open) + continue; if (priv && atomic_read(&priv->wmm.tx_pkts_queued)) return false; } @@ -580,6 +588,10 @@ mwifiex_clean_txrx(struct mwifiex_private *priv) skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) mwifiex_write_data_complete(priv->adapter, skb, 0, -1); + skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) + mwifiex_write_data_complete(priv->adapter, skb, 0, -1); + atomic_set(&priv->adapter->bypass_tx_pending, 0); + idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL); idr_destroy(&priv->ack_status_frames); } @@ -603,6 +615,88 @@ mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid, return NULL; } +void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac, + u8 tx_pause) +{ + struct mwifiex_ra_list_tbl *ra_list; + u32 pkt_cnt = 0, tx_pkts_queued; + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); + + for (i = 0; i < MAX_NUM_TID; ++i) { + ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac); + if (ra_list && ra_list->tx_paused != tx_pause) { + pkt_cnt += ra_list->total_pkt_count; + ra_list->tx_paused = tx_pause; + if (tx_pause) + priv->wmm.pkts_paused[i] += + ra_list->total_pkt_count; + else + priv->wmm.pkts_paused[i] -= + ra_list->total_pkt_count; + } + } + + if (pkt_cnt) { + tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued); + if (tx_pause) + tx_pkts_queued -= pkt_cnt; + else + tx_pkts_queued += pkt_cnt; + + atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued); + atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID); + } + spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); +} + +/* This function update non-tdls peer ralist tx_pause while + * tdls channel swithing + */ +void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv, + u8 *mac, u8 tx_pause) +{ + struct mwifiex_ra_list_tbl *ra_list; + u32 pkt_cnt = 0, tx_pkts_queued; + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); + + for (i = 0; i < MAX_NUM_TID; ++i) { + list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list, + list) { + if (!memcmp(ra_list->ra, mac, ETH_ALEN)) + continue; + + if (ra_list && ra_list->tx_paused != tx_pause) { + pkt_cnt += ra_list->total_pkt_count; + ra_list->tx_paused = tx_pause; + if (tx_pause) + priv->wmm.pkts_paused[i] += + ra_list->total_pkt_count; + else + priv->wmm.pkts_paused[i] -= + ra_list->total_pkt_count; + } + } + } + + if (pkt_cnt) { + tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued); + if (tx_pause) + tx_pkts_queued -= pkt_cnt; + else + tx_pkts_queued += pkt_cnt; + + atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued); + atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID); + } + spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); +} + /* * This function retrieves an RA list node for a given TID and * RA address pair. @@ -669,6 +763,18 @@ mwifiex_is_ralist_valid(struct mwifiex_private *priv, return false; } +/* + * This function adds a packet to bypass TX queue. + * This is special TX queue for packets which can be sent even when port_open + * is false. + */ +void +mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv, + struct sk_buff *skb) +{ + skb_queue_tail(&priv->bypass_txq, skb); +} + /* * This function adds a packet to WMM queue. * @@ -723,6 +829,9 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, !mwifiex_is_skb_mgmt_frame(skb)) { switch (tdls_status) { case TDLS_SETUP_COMPLETE: + case TDLS_CHAN_SWITCHING: + case TDLS_IN_BASE_CHAN: + case TDLS_IN_OFF_CHAN: ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra); tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT; @@ -765,7 +874,10 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, atomic_set(&priv->wmm.highest_queued_prio, priv->tos_to_tid_inv[tid_down]); - atomic_inc(&priv->wmm.tx_pkts_queued); + if (ra_list->tx_paused) + priv->wmm.pkts_paused[tid_down]++; + else + atomic_inc(&priv->wmm.tx_pkts_queued); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); } @@ -970,7 +1082,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter, priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv; - if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0) + if (!priv_tmp->port_open || + (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)) continue; /* iterate over the WMM queues of the BSS */ @@ -987,7 +1100,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter, list_for_each_entry(ptr, &tid_ptr->ra_list, list) { - if (!skb_queue_empty(&ptr->skb_head)) + if (!ptr->tx_paused && + !skb_queue_empty(&ptr->skb_head)) /* holds both locks */ goto found; } @@ -1339,6 +1453,38 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter) return 0; } +void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter) +{ + struct mwifiex_tx_param tx_param; + struct sk_buff *skb; + struct mwifiex_txinfo *tx_info; + struct mwifiex_private *priv; + int i; + + if (adapter->data_sent || adapter->tx_lock_flag) + return; + + for (i = 0; i < adapter->priv_num; ++i) { + priv = adapter->priv[i]; + + if (skb_queue_empty(&priv->bypass_txq)) + continue; + + skb = skb_dequeue(&priv->bypass_txq); + tx_info = MWIFIEX_SKB_TXCB(skb); + + /* no aggregation for bypass packets */ + tx_param.next_pkt_len = 0; + + if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) { + skb_queue_head(&priv->bypass_txq, skb); + tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; + } else { + atomic_dec(&adapter->bypass_tx_pending); + } + } +} + /* * This function transmits the highest priority packet awaiting in the * WMM Queues. diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h index 48ece0b35..38f09762b 100644 --- a/drivers/net/wireless/mwifiex/wmm.h +++ b/drivers/net/wireless/mwifiex/wmm.h @@ -99,12 +99,16 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead) void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, struct sk_buff *skb); +void mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv, + struct sk_buff *skb); void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra); void mwifiex_rotate_priolists(struct mwifiex_private *priv, struct mwifiex_ra_list_tbl *ra, int tid); int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter); +int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter); void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter); +void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter); int mwifiex_is_ralist_valid(struct mwifiex_private *priv, struct mwifiex_ra_list_tbl *ra_list, int tid); @@ -126,6 +130,10 @@ struct mwifiex_ra_list_tbl * mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, const u8 *ra_addr); u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid); +void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac, + u8 tx_pause); +void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv, + u8 *mac, u8 tx_pause); struct mwifiex_ra_list_tbl *mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid, const u8 *ra_addr); diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 4f0411775..bdb4a1b87 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -5019,35 +5019,36 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16); rcu_read_unlock(); - } - if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc && - !priv->ap_fw) { - rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates); - if (rc) - goto out; + if (changed & BSS_CHANGED_ASSOC) { + if (!priv->ap_fw) { + rc = mwl8k_cmd_set_rate(hw, vif, + ap_legacy_rates, + ap_mcs_rates); + if (rc) + goto out; - rc = mwl8k_cmd_use_fixed_rate_sta(hw); - if (rc) - goto out; - } else { - if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc && - priv->ap_fw) { - int idx; - int rate; + rc = mwl8k_cmd_use_fixed_rate_sta(hw); + if (rc) + goto out; + } else { + int idx; + int rate; - /* Use AP firmware specific rate command. - */ - idx = ffs(vif->bss_conf.basic_rates); - if (idx) - idx--; + /* Use AP firmware specific rate command. + */ + idx = ffs(vif->bss_conf.basic_rates); + if (idx) + idx--; - if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) - rate = mwl8k_rates_24[idx].hw_value; - else - rate = mwl8k_rates_50[idx].hw_value; + if (hw->conf.chandef.chan->band == + IEEE80211_BAND_2GHZ) + rate = mwl8k_rates_24[idx].hw_value; + else + rate = mwl8k_rates_50[idx].hw_value; - mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate); + mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate); + } } } diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c index c41018047..7b5c55432 100644 --- a/drivers/net/wireless/orinoco/main.c +++ b/drivers/net/wireless/orinoco/main.c @@ -2321,8 +2321,6 @@ void free_orinocodev(struct orinoco_private *priv) struct orinoco_rx_data *rx_data, *temp; struct orinoco_scan_data *sd, *sdtemp; - wiphy_unregister(wiphy); - /* If the tasklet is scheduled when we call tasklet_kill it * will run one final time. However the tasklet will only * drain priv->rx_list if the hw is still available. */ diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c index c0a27377d..a956f965a 100644 --- a/drivers/net/wireless/orinoco/orinoco_cs.c +++ b/drivers/net/wireless/orinoco/orinoco_cs.c @@ -118,6 +118,7 @@ static void orinoco_cs_detach(struct pcmcia_device *link) orinoco_cs_release(link); + wiphy_unregister(priv_to_wiphy(priv)); free_orinocodev(priv); } /* orinoco_cs_detach */ diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c index 1b543e30e..048693b6c 100644 --- a/drivers/net/wireless/orinoco/orinoco_nortel.c +++ b/drivers/net/wireless/orinoco/orinoco_nortel.c @@ -223,13 +223,15 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev, err = orinoco_if_add(priv, 0, 0, NULL); if (err) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); - goto fail; + goto fail_wiphy; } pci_set_drvdata(pdev, priv); return 0; + fail_wiphy: + wiphy_unregister(priv_to_wiphy(priv)); fail: free_irq(pdev->irq, priv); @@ -263,6 +265,7 @@ static void orinoco_nortel_remove_one(struct pci_dev *pdev) iowrite16(0, card->bridge_io + 10); orinoco_if_del(priv); + wiphy_unregister(priv_to_wiphy(priv)); free_irq(pdev->irq, priv); free_orinocodev(priv); pci_iounmap(pdev, priv->hw.iobase); diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c index 74219d59d..4938a2208 100644 --- a/drivers/net/wireless/orinoco/orinoco_pci.c +++ b/drivers/net/wireless/orinoco/orinoco_pci.c @@ -173,13 +173,15 @@ static int orinoco_pci_init_one(struct pci_dev *pdev, err = orinoco_if_add(priv, 0, 0, NULL); if (err) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); - goto fail; + goto fail_wiphy; } pci_set_drvdata(pdev, priv); return 0; + fail_wiphy: + wiphy_unregister(priv_to_wiphy(priv)); fail: free_irq(pdev->irq, priv); @@ -203,6 +205,7 @@ static void orinoco_pci_remove_one(struct pci_dev *pdev) struct orinoco_private *priv = pci_get_drvdata(pdev); orinoco_if_del(priv); + wiphy_unregister(priv_to_wiphy(priv)); free_irq(pdev->irq, priv); free_orinocodev(priv); pci_iounmap(pdev, priv->hw.iobase); diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c index 8b045236b..221352027 100644 --- a/drivers/net/wireless/orinoco/orinoco_plx.c +++ b/drivers/net/wireless/orinoco/orinoco_plx.c @@ -262,13 +262,15 @@ static int orinoco_plx_init_one(struct pci_dev *pdev, err = orinoco_if_add(priv, 0, 0, NULL); if (err) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); - goto fail; + goto fail_wiphy; } pci_set_drvdata(pdev, priv); return 0; + fail_wiphy: + wiphy_unregister(priv_to_wiphy(priv)); fail: free_irq(pdev->irq, priv); @@ -299,6 +301,7 @@ static void orinoco_plx_remove_one(struct pci_dev *pdev) struct orinoco_pci_card *card = priv->card; orinoco_if_del(priv); + wiphy_unregister(priv_to_wiphy(priv)); free_irq(pdev->irq, priv); free_orinocodev(priv); pci_iounmap(pdev, priv->hw.iobase); diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c index d6b4fe4c5..995733a32 100644 --- a/drivers/net/wireless/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/orinoco/orinoco_usb.c @@ -1502,6 +1502,7 @@ static inline void ezusb_delete(struct ezusb_priv *upriv) if (upriv->dev) { struct orinoco_private *priv = ndev_priv(upriv->dev); orinoco_if_del(priv); + wiphy_unregister(priv_to_wiphy(upriv)); free_orinocodev(priv); } } @@ -1695,6 +1696,7 @@ static int ezusb_probe(struct usb_interface *interface, if (orinoco_if_add(priv, 0, 0, &ezusb_netdev_ops) != 0) { upriv->dev = NULL; err("%s: orinoco_if_add() failed", __func__); + wiphy_unregister(priv_to_wiphy(priv)); goto error; } upriv->dev = priv->ndev; diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index 2b4ef256c..de62f5dcb 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig @@ -240,7 +240,6 @@ config RT2X00_LIB_USB config RT2X00_LIB tristate - select AVERAGE config RT2X00_LIB_FIRMWARE bool diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h index afba0739c..78cc035b2 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.h +++ b/drivers/net/wireless/rt2x00/rt2500usb.h @@ -54,7 +54,7 @@ #define CSR_REG_BASE 0x0400 #define CSR_REG_SIZE 0x0100 #define EEPROM_BASE 0x0000 -#define EEPROM_SIZE 0x006a +#define EEPROM_SIZE 0x006e #define BBP_BASE 0x0000 #define BBP_SIZE 0x0060 #define RF_BASE 0x0004 diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 66497ca83..ee7d97482 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -1114,6 +1114,7 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x0db0, 0x871c) }, { USB_DEVICE(0x0db0, 0x899a) }, /* Ovislink */ + { USB_DEVICE(0x1b75, 0x3070) }, { USB_DEVICE(0x1b75, 0x3071) }, { USB_DEVICE(0x1b75, 0x3072) }, { USB_DEVICE(0x1b75, 0xa200) }, diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index 9bb398bed..3282ddb76 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -254,6 +254,8 @@ struct link_qual { int tx_failed; }; +DECLARE_EWMA(rssi, 1024, 8) + /* * Antenna settings about the currently active link. */ @@ -285,7 +287,7 @@ struct link_ant { * Similar to the avg_rssi in the link_qual structure * this value is updated by using the walking average. */ - struct ewma rssi_ant; + struct ewma_rssi rssi_ant; }; /* @@ -314,7 +316,7 @@ struct link { /* * Currently active average RSSI value */ - struct ewma avg_rssi; + struct ewma_rssi avg_rssi; /* * Work structure for scheduling periodic link tuning. diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c index 9b941c0c1..017188e5a 100644 --- a/drivers/net/wireless/rt2x00/rt2x00link.c +++ b/drivers/net/wireless/rt2x00/rt2x00link.c @@ -33,15 +33,11 @@ */ #define DEFAULT_RSSI -128 -/* Constants for EWMA calculations. */ -#define RT2X00_EWMA_FACTOR 1024 -#define RT2X00_EWMA_WEIGHT 8 - -static inline int rt2x00link_get_avg_rssi(struct ewma *ewma) +static inline int rt2x00link_get_avg_rssi(struct ewma_rssi *ewma) { unsigned long avg; - avg = ewma_read(ewma); + avg = ewma_rssi_read(ewma); if (avg) return -avg; @@ -76,8 +72,7 @@ static void rt2x00link_antenna_update_rssi_history(struct rt2x00_dev *rt2x00dev, static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev) { - ewma_init(&rt2x00dev->link.ant.rssi_ant, RT2X00_EWMA_FACTOR, - RT2X00_EWMA_WEIGHT); + ewma_rssi_init(&rt2x00dev->link.ant.rssi_ant); } static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev) @@ -225,12 +220,12 @@ void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev, /* * Update global RSSI */ - ewma_add(&link->avg_rssi, -rxdesc->rssi); + ewma_rssi_add(&link->avg_rssi, -rxdesc->rssi); /* * Update antenna RSSI */ - ewma_add(&ant->rssi_ant, -rxdesc->rssi); + ewma_rssi_add(&ant->rssi_ant, -rxdesc->rssi); } void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev) @@ -285,8 +280,7 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna) */ rt2x00dev->link.count = 0; memset(qual, 0, sizeof(*qual)); - ewma_init(&rt2x00dev->link.avg_rssi, RT2X00_EWMA_FACTOR, - RT2X00_EWMA_WEIGHT); + ewma_rssi_init(&rt2x00dev->link.avg_rssi); /* * Restore the VGC level as stored in the registers, diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h index d4567d12e..5da670394 100644 --- a/drivers/net/wireless/rtlwifi/pci.h +++ b/drivers/net/wireless/rtlwifi/pci.h @@ -247,6 +247,8 @@ struct rtl_pci { /* MSI support */ bool msi_support; bool using_msi; + /* interrupt clear before set */ + bool int_clear; }; struct mp_adapter { diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c index c8058aa73..629125658 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c @@ -200,7 +200,7 @@ int rtl88e_download_fw(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl92c_firmware_header *pfwheader; + struct rtlwifi_firmware_header *pfwheader; u8 *pfwdata; u32 fwsize; int err; @@ -209,7 +209,7 @@ int rtl88e_download_fw(struct ieee80211_hw *hw, if (!rtlhal->pfirmware) return 1; - pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware; + pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware; pfwdata = rtlhal->pfirmware; fwsize = rtlhal->fwsize; RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, @@ -219,10 +219,10 @@ int rtl88e_download_fw(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Firmware Version(%d), Signature(%#x), Size(%d)\n", pfwheader->version, pfwheader->signature, - (int)sizeof(struct rtl92c_firmware_header)); + (int)sizeof(struct rtlwifi_firmware_header)); - pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header); - fwsize = fwsize - sizeof(struct rtl92c_firmware_header); + pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header); + fwsize = fwsize - sizeof(struct rtlwifi_firmware_header); } if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) { diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h index 05e944e45..21bd4a533 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h @@ -37,7 +37,7 @@ #define FW_8192C_POLLING_TIMEOUT_COUNT 3000 #define IS_FW_HEADER_EXIST(_pfwhdr) \ - ((_pfwhdr->signature&0xFFFF) == 0x88E1) + ((le16_to_cpu(_pfwhdr->signature) & 0xFFFF) == 0x88E1) #define USE_OLD_WOWLAN_DEBUG_FW 0 #define H2C_88E_RSVDPAGE_LOC_LEN 5 @@ -131,25 +131,6 @@ #define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) #define FW_PWR_STATE_RF_OFF 0 -struct rtl92c_firmware_header { - u16 signature; - u8 category; - u8 function; - u16 version; - u8 subversion; - u8 rsvd1; - u8 month; - u8 date; - u8 hour; - u8 minute; - u16 ramcodesize; - u16 rsvd2; - u32 svnindex; - u32 rsvd3; - u32 rsvd4; - u32 rsvd5; -}; - enum rtl8188e_h2c_cmd { H2C_88E_RSVDPAGE = 0, H2C_88E_JOINBSSRPT = 1, diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c index 0aca6f474..03cbe4cf1 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c @@ -39,6 +39,7 @@ #define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1) #define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1) #define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1) +#define BT_MASK 0x00ffffff #define RTLPRIV (struct rtl_priv *) #define GET_UNDECORATED_AVERAGE_RSSI(_priv) \ @@ -312,7 +313,7 @@ static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw) struct dig_t *digtable = &rtlpriv->dm_digtable; u32 isbt; - /* modify DIG lower bound, deal with abnorally large false alarm */ + /* modify DIG lower bound, deal with abnormally large false alarm */ if (rtlpriv->falsealm_cnt.cnt_all > 10000) { digtable->large_fa_hit++; if (digtable->forbidden_igi < digtable->cur_igvalue) { @@ -1536,13 +1537,11 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw) return false; bt_state = rtl_read_byte(rtlpriv, 0x4fd); - bt_tx = rtl_read_dword(rtlpriv, 0x488); - bt_tx = bt_tx & 0x00ffffff; - bt_pri = rtl_read_dword(rtlpriv, 0x48c); - bt_pri = bt_pri & 0x00ffffff; + bt_tx = rtl_read_dword(rtlpriv, 0x488) & BT_MASK; + bt_pri = rtl_read_dword(rtlpriv, 0x48c) & BT_MASK; polling = rtl_read_dword(rtlpriv, 0x490); - if (bt_tx == 0xffffffff && bt_pri == 0xffffffff && + if (bt_tx == BT_MASK && bt_pri == BT_MASK && polling == 0xffffffff && bt_state == 0xff) return false; diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c index 14b819ea8..43fcb25c8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c @@ -221,7 +221,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl92c_firmware_header *pfwheader; + struct rtlwifi_firmware_header *pfwheader; u8 *pfwdata; u32 fwsize; int err; @@ -230,19 +230,19 @@ int rtl92c_download_fw(struct ieee80211_hw *hw) if (!rtlhal->pfirmware) return 1; - pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware; + pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware; pfwdata = (u8 *)rtlhal->pfirmware; fwsize = rtlhal->fwsize; if (IS_FW_HEADER_EXIST(pfwheader)) { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Firmware Version(%d), Signature(%#x),Size(%d)\n", pfwheader->version, pfwheader->signature, - (int)sizeof(struct rtl92c_firmware_header)); + (int)sizeof(struct rtlwifi_firmware_header)); - rtlhal->fw_version = pfwheader->version; + rtlhal->fw_version = le16_to_cpu(pfwheader->version); rtlhal->fw_subversion = pfwheader->subversion; - pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header); - fwsize = fwsize - sizeof(struct rtl92c_firmware_header); + pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header); + fwsize = fwsize - sizeof(struct rtlwifi_firmware_header); } _rtl92c_enable_fw_download(hw, true); diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h index e9f4281f5..864806c19 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h +++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h @@ -69,25 +69,6 @@ ((GET_CVID_CUT_VERSION(version) == \ CHIP_VENDOR_UMC_B_CUT) ? true : false) : false) -struct rtl92c_firmware_header { - __le16 signature; - u8 category; - u8 function; - __le16 version; - u8 subversion; - u8 rsvd1; - u8 month; - u8 date; - u8 hour; - u8 minute; - __le16 ramcodeSize; - __le16 rsvd2; - __le32 svnindex; - __le32 rsvd3; - __le32 rsvd4; - __le32 rsvd5; -}; - #define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0)) #define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \ diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h index c940a8717..74a479ac3 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h @@ -32,24 +32,15 @@ /*------------------------------------------------------------------------- * Chip specific *-------------------------------------------------------------------------*/ -#define CHIP_8723 BIT(2) /* RTL8723 With BT feature */ -#define CHIP_8723_DRV_REV BIT(3) /* RTL8723 Driver Revised */ #define NORMAL_CHIP BIT(4) #define CHIP_VENDOR_UMC BIT(5) #define CHIP_VENDOR_UMC_B_CUT BIT(6) -#define IS_8723_SERIES(version) \ - (((version) & CHIP_8723) ? true : false) - #define IS_92C_1T2R(version) \ (((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R)) #define IS_VENDOR_UMC(version) \ (((version) & CHIP_VENDOR_UMC) ? true : false) -#define IS_VENDOR_8723_A_CUT(version) \ - (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6))) ? \ - false : true) : false) - #define CHIP_BONDING_92C_1T2R 0x1 #define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3) diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index 767358a55..25db369b5 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -818,26 +818,29 @@ static void _rtl92cu_init_usb_aggregation(struct ieee80211_hw *hw) static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw) { - u16 value16; - + u16 value16; + u32 value32; struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - mac->rx_conf = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APPFCS | - RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL | - RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32); - rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf); + value32 = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APPFCS | + RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL | + RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&value32)); /* Accept all multicast address */ rtl_write_dword(rtlpriv, REG_MAR, 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_MAR + 4, 0xFFFFFFFF); /* Accept all management frames */ value16 = 0xFFFF; - rtl92c_set_mgt_filter(hw, value16); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_MGT_FILTER, + (u8 *)(&value16)); /* Reject all control frame - default value is 0 */ - rtl92c_set_ctrl_filter(hw, 0x0); + value16 = 0x0; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CTRL_FILTER, + (u8 *)(&value16)); /* Accept all data frames */ value16 = 0xFFFF; - rtl92c_set_data_filter(hw, value16); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DATA_FILTER, + (u8 *)(&value16)); } static void _rtl92cu_init_beacon_parameters(struct ieee80211_hw *hw) @@ -988,17 +991,6 @@ static void _InitPABias(struct ieee80211_hw *hw) } } -static void _update_mac_setting(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - - mac->rx_conf = rtl_read_dword(rtlpriv, REG_RCR); - mac->rx_mgt_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP0); - mac->rx_ctrl_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP1); - mac->rx_data_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP2); -} - int rtl92cu_hw_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1068,7 +1060,6 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw) } _rtl92cu_hw_configure(hw); _InitPABias(hw); - _update_mac_setting(hw); rtl92c_dm_init(hw); exit: local_irq_restore(flags); @@ -1620,7 +1611,6 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); enum wireless_mode wirelessmode = mac->mode; u8 idx = 0; @@ -1829,63 +1819,10 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) u4b_ac_param); break; default: - RT_ASSERT(false, - "SetHwReg8185(): invalid aci: %d !\n", + RT_ASSERT(false, "invalid aci: %d !\n", e_aci); break; } - if (rtlusb->acm_method != EACMWAY2_SW) - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_ACM_CTRL, &e_aci); - break; - } - case HW_VAR_ACM_CTRL:{ - u8 e_aci = *val; - union aci_aifsn *p_aci_aifsn = (union aci_aifsn *) - (&(mac->ac[0].aifs)); - u8 acm = p_aci_aifsn->f.acm; - u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL); - - acm_ctrl = - acm_ctrl | ((rtlusb->acm_method == 2) ? 0x0 : 0x1); - if (acm) { - switch (e_aci) { - case AC0_BE: - acm_ctrl |= AcmHw_BeqEn; - break; - case AC2_VI: - acm_ctrl |= AcmHw_ViqEn; - break; - case AC3_VO: - acm_ctrl |= AcmHw_VoqEn; - break; - default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n", - acm); - break; - } - } else { - switch (e_aci) { - case AC0_BE: - acm_ctrl &= (~AcmHw_BeqEn); - break; - case AC2_VI: - acm_ctrl &= (~AcmHw_ViqEn); - break; - case AC3_VO: - acm_ctrl &= (~AcmHw_VoqEn); - break; - default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); - break; - } - } - RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE, - "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", - acm_ctrl); - rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl); break; } case HW_VAR_RCR:{ @@ -1999,12 +1936,15 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } case HW_VAR_MGT_FILTER: rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *)val); + mac->rx_mgt_filter = *(u16 *)val; break; case HW_VAR_CTRL_FILTER: rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *)val); + mac->rx_ctrl_filter = *(u16 *)val; break; case HW_VAR_DATA_FILTER: rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val); + mac->rx_data_filter = *(u16 *)val; break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, @@ -2280,7 +2220,6 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate; u8 u1tmp = 0; bool actuallyset = false; @@ -2357,20 +2296,7 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid) if (ppsc->pwrdown_mode && e_rfpowerstate_toset == ERFOFF) { /* Enable register area 0x0-0xc. */ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0); - if (IS_HARDWARE_TYPE_8723U(rtlhal)) { - /* - * We should configure HW PDn source for WiFi - * ONLY, and then our HW will be set in - * power-down mode if PDn source from all - * functions are configured. - */ - u1tmp = rtl_read_byte(rtlpriv, - REG_MULTI_FUNC_CTRL); - rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, - (u1tmp|WL_HWPDN_EN)); - } else { - rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812); - } + rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812); } if (e_rfpowerstate_toset == ERFOFF) { if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c index 490a7cf7c..035713311 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c @@ -69,8 +69,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw) chip_version = NORMAL_CHIP; chip_version |= ((value32 & TYPE_ID) ? CHIP_92C : 0); chip_version |= ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0); - /* RTL8723 with BT function. */ - chip_version |= ((value32 & BT_FUNC) ? CHIP_8723 : 0); if (IS_VENDOR_UMC(chip_version)) chip_version |= ((value32 & CHIP_VER_RTL_MASK) ? CHIP_VENDOR_UMC_B_CUT : 0); @@ -78,10 +76,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw) value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM); chip_version |= ((CHIP_BONDING_IDENTIFIER(value32) == CHIP_BONDING_92C_1T2R) ? CHIP_92C_1T2R : 0); - } else if (IS_8723_SERIES(chip_version)) { - value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS); - chip_version |= ((value32 & RF_RL_ID) ? - CHIP_8723_DRV_REV : 0); } } rtlhal->version = (enum version_8192c)chip_version; @@ -114,12 +108,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw) case VERSION_NORMAL_UMC_CHIP_88C_B_CUT: versionid = "NORMAL_UMC_CHIP_88C_B_CUT"; break; - case VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT: - versionid = "NORMAL_UMC_CHIP_8723_1T1R_A_CUT"; - break; - case VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT: - versionid = "NORMAL_UMC_CHIP_8723_1T1R_B_CUT"; - break; case VERSION_TEST_CHIP_92C: versionid = "TEST_CHIP_92C"; break; @@ -405,59 +393,9 @@ void rtl92c_disable_interrupt(struct ieee80211_hw *hw) void rtl92c_set_qos(struct ieee80211_hw *hw, int aci) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - u32 u4b_ac_param; rtl92c_dm_init_edca_turbo(hw); - u4b_ac_param = (u32) mac->ac[aci].aifs; - u4b_ac_param |= - ((u32) le16_to_cpu(mac->ac[aci].cw_min) & 0xF) << - AC_PARAM_ECW_MIN_OFFSET; - u4b_ac_param |= - ((u32) le16_to_cpu(mac->ac[aci].cw_max) & 0xF) << - AC_PARAM_ECW_MAX_OFFSET; - u4b_ac_param |= (u32) le16_to_cpu(mac->ac[aci].tx_op) << - AC_PARAM_TXOP_OFFSET; - RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD, "queue:%x, ac_param:%x\n", - aci, u4b_ac_param); - switch (aci) { - case AC1_BK: - rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param); - break; - case AC0_BE: - rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param); - break; - case AC2_VI: - rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param); - break; - case AC3_VO: - rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param); - break; - default: - RT_ASSERT(false, "invalid aci: %d !\n", aci); - break; - } -} - -/*------------------------------------------------------------------------- - * HW MAC Address - *-------------------------------------------------------------------------*/ -void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr) -{ - u32 i; - struct rtl_priv *rtlpriv = rtl_priv(hw); - - for (i = 0 ; i < ETH_ALEN ; i++) - rtl_write_byte(rtlpriv, (REG_MACID + i), *(addr+i)); - - RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, - "MAC Address: %02X-%02X-%02X-%02X-%02X-%02X\n", - rtl_read_byte(rtlpriv, REG_MACID), - rtl_read_byte(rtlpriv, REG_MACID+1), - rtl_read_byte(rtlpriv, REG_MACID+2), - rtl_read_byte(rtlpriv, REG_MACID+3), - rtl_read_byte(rtlpriv, REG_MACID+4), - rtl_read_byte(rtlpriv, REG_MACID+5)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, (u8 *)&aci); } void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size) @@ -656,47 +594,6 @@ void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T) rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, value); } -u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - return rtl_read_word(rtlpriv, REG_RXFLTMAP0); -} - -void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtl_write_word(rtlpriv, REG_RXFLTMAP0, filter); -} - -u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - return rtl_read_word(rtlpriv, REG_RXFLTMAP1); -} - -void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtl_write_word(rtlpriv, REG_RXFLTMAP1, filter); -} - -u16 rtl92c_get_data_filter(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - return rtl_read_word(rtlpriv, REG_RXFLTMAP2); -} - -void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtl_write_word(rtlpriv, REG_RXFLTMAP2, filter); -} /*==============================================================*/ static u8 _rtl92c_query_rxpwrpercentage(char antpower) diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h index e34f0f14c..553a4bfac 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h @@ -48,7 +48,6 @@ void rtl92c_set_qos(struct ieee80211_hw *hw, int aci); /*--------------------------------------------------------------- * Hardware init functions *---------------------------------------------------------------*/ -void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr); void rtl92c_init_interrupt(struct ieee80211_hw *hw); void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size); @@ -73,15 +72,6 @@ void rtl92c_init_retry_function(struct ieee80211_hw *hw); void rtl92c_disable_fast_edca(struct ieee80211_hw *hw); void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T); -/* For filter */ -u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw); -void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter); -u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw); -void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter); -u16 rtl92c_get_data_filter(struct ieee80211_hw *hw); -void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter); - - u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw); struct rx_fwinfo_92c { diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c index 587b8c505..7c1db7e75 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c @@ -420,7 +420,7 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw) "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n", de_digtable->recover_cnt, de_digtable->rx_gain_min); - /* deal with abnorally large false alarm */ + /* deal with abnormally large false alarm */ if (falsealm_cnt->cnt_all > 10000) { RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "dm_DIG(): Abnormally false alarm case\n"); diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h index 1646e7c3d..8a38daa31 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h +++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h @@ -110,28 +110,6 @@ #define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \ SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val) -struct rtl92d_firmware_header { - u16 signature; - u8 category; - u8 function; - u16 version; - u8 subversion; - u8 rsvd1; - - u8 month; - u8 date; - u8 hour; - u8 minute; - u16 ramcodeSize; - u16 rsvd2; - - u32 svnindex; - u32 rsvd3; - - u32 rsvd4; - u32 rsvd5; -}; - int rtl92d_download_fw(struct ieee80211_hw *hw); void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c index 1961b8e28..bb06fe836 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c @@ -3515,14 +3515,14 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw) for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath; rfpath++) { if (rtlhal->current_bandtype == BAND_ON_2_4G) { - /* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */ + /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */ rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) | BIT(16) | BIT(18), 0); /* RF0x0b[16:14] =3b'111 */ rtl_set_rfreg(hw, (enum radio_path)rfpath, 0x0B, 0x1c000, 0x07); } else { - /* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */ + /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */ rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) | BIT(16) | BIT(18), (BIT(16) | BIT(8)) >> 8); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c index 232865cc3..0708eedd9 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c @@ -198,7 +198,7 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl92c_firmware_header *pfwheader; + struct rtlwifi_firmware_header *pfwheader; u8 *pfwdata; u32 fwsize; int err; @@ -207,8 +207,8 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) if (!rtlhal->pfirmware) return 1; - pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware; - rtlhal->fw_version = pfwheader->version; + pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware; + rtlhal->fw_version = le16_to_cpu(pfwheader->version); rtlhal->fw_subversion = pfwheader->subversion; pfwdata = (u8 *)rtlhal->pfirmware; fwsize = rtlhal->fwsize; @@ -219,10 +219,10 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Firmware Version(%d), Signature(%#x),Size(%d)\n", pfwheader->version, pfwheader->signature, - (int)sizeof(struct rtl92c_firmware_header)); + (int)sizeof(struct rtlwifi_firmware_header)); - pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header); - fwsize = fwsize - sizeof(struct rtl92c_firmware_header); + pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header); + fwsize = fwsize - sizeof(struct rtlwifi_firmware_header); } else { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Firmware no Header, Signature(%#x)\n", diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h index 3e2a48e5f..069da1e7e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h @@ -33,7 +33,7 @@ #define FW_8192C_POLLING_TIMEOUT_COUNT 3000 #define IS_FW_HEADER_EXIST(_pfwhdr) \ - ((_pfwhdr->signature&0xFFF0) == 0x92E0) + ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x92E0) #define USE_OLD_WOWLAN_DEBUG_FW 0 #define H2C_92E_RSVDPAGE_LOC_LEN 5 @@ -89,25 +89,6 @@ #define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) #define FW_PWR_STATE_RF_OFF 0 -struct rtl92c_firmware_header { - u16 signature; - u8 category; - u8 function; - u16 version; - u8 subversion; - u8 rsvd1; - u8 month; - u8 date; - u8 hour; - u8 minute; - u16 ramcodesize; - u16 rsvd2; - u32 svnindex; - u32 rsvd3; - u32 rsvd4; - u32 rsvd5; -}; - enum rtl8192e_h2c_cmd { H2C_92E_RSVDPAGE = 0, H2C_92E_MSRRPT = 1, diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c index a863a44f9..018340aed 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c @@ -449,7 +449,7 @@ static void _rtl92ee_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw, "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n", rate_section, path, txnum); break; - }; + } } else { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Invalid Band %d\n", band); @@ -489,7 +489,7 @@ static u8 _rtl92ee_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw, "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n", rate_section, path, txnum); break; - }; + } } else { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Invalid Band %d()\n", band); @@ -853,7 +853,7 @@ static u8 _rtl92ee_get_rate_section_index(u32 regaddr) else if (regaddr >= 0xE20 && regaddr <= 0xE4C) index = (u8)((regaddr - 0xE20) / 4); break; - }; + } return index; } diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c index 5111af829..213e76a7c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c @@ -205,9 +205,9 @@ bool rtl8723e_get_btc_status(void) return true; } -static bool is_fw_header(struct rtl8723e_firmware_header *hdr) +static bool is_fw_header(struct rtlwifi_firmware_header *hdr) { - return (hdr->signature & 0xfff0) == 0x2300; + return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x2300; } static struct rtl_hal_ops rtl8723e_hal_ops = { diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c index 8e7789648..645f000ab 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c @@ -209,9 +209,9 @@ bool rtl8723be_get_btc_status(void) return true; } -static bool is_fw_header(struct rtl8723e_firmware_header *hdr) +static bool is_fw_header(struct rtlwifi_firmware_header *hdr) { - return (hdr->signature & 0xfff0) == 0x5300; + return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x5300; } static struct rtl_hal_ops rtl8723be_hal_ops = { diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c index dd698e7e9..a2f5e89be 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c @@ -253,7 +253,7 @@ int rtl8723_download_fw(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl8723e_firmware_header *pfwheader; + struct rtlwifi_firmware_header *pfwheader; u8 *pfwdata; u32 fwsize; int err; @@ -263,7 +263,7 @@ int rtl8723_download_fw(struct ieee80211_hw *hw, if (!rtlhal->pfirmware) return 1; - pfwheader = (struct rtl8723e_firmware_header *)rtlhal->pfirmware; + pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware; pfwdata = rtlhal->pfirmware; fwsize = rtlhal->fwsize; @@ -275,10 +275,10 @@ int rtl8723_download_fw(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "Firmware Version(%d), Signature(%#x), Size(%d)\n", pfwheader->version, pfwheader->signature, - (int)sizeof(struct rtl8723e_firmware_header)); + (int)sizeof(struct rtlwifi_firmware_header)); - pfwdata = pfwdata + sizeof(struct rtl8723e_firmware_header); - fwsize = fwsize - sizeof(struct rtl8723e_firmware_header); + pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header); + fwsize = fwsize - sizeof(struct rtlwifi_firmware_header); } if (rtl_read_byte(rtlpriv, REG_MCUFWDL)&BIT(7)) { diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h index 3ebafc809..8ea372d16 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h +++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h @@ -50,25 +50,6 @@ enum version_8723e { VERSION_UNKNOWN = 0xFF, }; -struct rtl8723e_firmware_header { - u16 signature; - u8 category; - u8 function; - u16 version; - u8 subversion; - u8 rsvd1; - u8 month; - u8 date; - u8 hour; - u8 minute; - u16 ramcodesize; - u16 rsvd2; - u32 svnindex; - u32 rsvd3; - u32 rsvd4; - u32 rsvd5; -}; - enum rtl8723be_cmd { H2C_8723BE_RSVDPAGE = 0, H2C_8723BE_JOINBSSRPT = 1, diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c index 95e95626b..525eb2346 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c @@ -210,7 +210,7 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl8821a_firmware_header *pfwheader; + struct rtlwifi_firmware_header *pfwheader; u8 *pfwdata; u32 fwsize; int err; @@ -228,8 +228,8 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) return 1; pfwheader = - (struct rtl8821a_firmware_header *)rtlhal->wowlan_firmware; - rtlhal->fw_version = pfwheader->version; + (struct rtlwifi_firmware_header *)rtlhal->wowlan_firmware; + rtlhal->fw_version = le16_to_cpu(pfwheader->version); rtlhal->fw_subversion = pfwheader->subversion; pfwdata = (u8 *)rtlhal->wowlan_firmware; fwsize = rtlhal->wowlan_fwsize; @@ -238,8 +238,8 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) return 1; pfwheader = - (struct rtl8821a_firmware_header *)rtlhal->pfirmware; - rtlhal->fw_version = pfwheader->version; + (struct rtlwifi_firmware_header *)rtlhal->pfirmware; + rtlhal->fw_version = le16_to_cpu(pfwheader->version); rtlhal->fw_subversion = pfwheader->subversion; pfwdata = (u8 *)rtlhal->pfirmware; fwsize = rtlhal->fwsize; @@ -255,8 +255,8 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) "Firmware Version(%d), Signature(%#x)\n", pfwheader->version, pfwheader->signature); - pfwdata = pfwdata + sizeof(struct rtl8821a_firmware_header); - fwsize = fwsize - sizeof(struct rtl8821a_firmware_header); + pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header); + fwsize = fwsize - sizeof(struct rtlwifi_firmware_header); } if (rtlhal->mac_func_enable) { diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h index 591c14c0b..8f5b4aade 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h @@ -34,10 +34,10 @@ #define FW_8821AE_POLLING_TIMEOUT_COUNT 6000 #define IS_FW_HEADER_EXIST_8812(_pfwhdr) \ - ((_pfwhdr->signature&0xFFF0) == 0x9500) + ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x9500) #define IS_FW_HEADER_EXIST_8821(_pfwhdr) \ - ((_pfwhdr->signature&0xFFF0) == 0x2100) + ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x2100) #define USE_OLD_WOWLAN_DEBUG_FW 0 @@ -137,25 +137,6 @@ #define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) #define FW_PWR_STATE_RF_OFF 0 -struct rtl8821a_firmware_header { - u16 signature; - u8 category; - u8 function; - u16 version; - u8 subversion; - u8 rsvd1; - u8 month; - u8 date; - u8 hour; - u8 minute; - u16 ramcodeSize; - u16 rsvd2; - u32 svnindex; - u32 rsvd3; - u32 rsvd4; - u32 rsvd5; -}; - enum rtl8812_c2h_evt { C2H_8812_DBG = 0, C2H_8812_LB = 1, diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c index b7f18e215..6e9418ed9 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c @@ -2253,11 +2253,28 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci) } } +static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 tmp = rtl_read_dword(rtlpriv, REG_HISR); + + rtl_write_dword(rtlpriv, REG_HISR, tmp); + + tmp = rtl_read_dword(rtlpriv, REG_HISRE); + rtl_write_dword(rtlpriv, REG_HISRE, tmp); + + tmp = rtl_read_dword(rtlpriv, REG_HSISR); + rtl_write_dword(rtlpriv, REG_HSISR, tmp); +} + void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + if (!rtlpci->int_clear) + rtl8821ae_clear_interrupt(hw);/*clear it here first*/ + rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); rtlpci->irq_enabled = true; diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c index 265fcab63..f04a96891 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c @@ -96,6 +96,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtl8821ae_bt_reg_init(hw); rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear; rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); rtlpriv->dm.dm_initialgain_enable = 1; @@ -167,6 +168,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + rtlpci->msi_support = rtlpriv->cfg->mod_params->int_clear; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); rtlpriv->psc.reg_fwctrl_lps = 3; @@ -308,6 +310,7 @@ static struct rtl_mod_params rtl8821ae_mod_params = { .swctrl_lps = false, .fwctrl_lps = true, .msi_support = true, + .int_clear = true, .debug = DBG_EMERG, .disable_watchdog = 0, }; @@ -437,6 +440,7 @@ module_param_named(fwlps, rtl8821ae_mod_params.fwctrl_lps, bool, 0444); module_param_named(msi, rtl8821ae_mod_params.msi_support, bool, 0444); module_param_named(disable_watchdog, rtl8821ae_mod_params.disable_watchdog, bool, 0444); +module_param_named(int_clear, rtl8821ae_mod_params.int_clear, bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); @@ -444,6 +448,7 @@ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); +MODULE_PARM_DESC(int_clear, "Set to 1 to disable interrupt clear before set (default 0)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index 2b770b5e2..4544752a2 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h @@ -222,6 +222,25 @@ enum rf_tx_num { #define WOL_REASON_REALWOW_V2_WAKEUPPKT BIT(9) #define WOL_REASON_REALWOW_V2_ACKLOST BIT(10) +struct rtlwifi_firmware_header { + __le16 signature; + u8 category; + u8 function; + __le16 version; + u8 subversion; + u8 rsvd1; + u8 month; + u8 date; + u8 hour; + u8 minute; + __le16 ramcodeSize; + __le16 rsvd2; + __le32 svnindex; + __le32 rsvd3; + __le32 rsvd4; + __le32 rsvd5; +}; + struct txpower_info_2g { u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G]; u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G]; @@ -2064,16 +2083,12 @@ struct rtl_tcb_desc { bool tx_enable_sw_calc_duration; }; -struct rtl92c_firmware_header; - struct rtl_wow_pattern { u8 type; u16 crc; u32 mask[4]; }; -struct rtl8723e_firmware_header; - struct rtl_hal_ops { int (*init_sw_vars) (struct ieee80211_hw *hw); void (*deinit_sw_vars) (struct ieee80211_hw *hw); @@ -2177,7 +2192,7 @@ struct rtl_hal_ops { void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); bool (*get_btc_status) (void); - bool (*is_fw_header)(struct rtl8723e_firmware_header *hdr); + bool (*is_fw_header)(struct rtlwifi_firmware_header *hdr); u32 (*rx_command_packet)(struct ieee80211_hw *hw, struct rtl_stats status, struct sk_buff *skb); void (*add_wowlan_pattern)(struct ieee80211_hw *hw, @@ -2234,6 +2249,9 @@ struct rtl_mod_params { /* default 0: 1 means disable */ bool disable_watchdog; + + /* default 0: 1 means do not disable interrupts */ + bool int_clear; }; struct rtl_hal_usbint_cfg { diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c index 0c0d5cd98..7c355fff2 100644 --- a/drivers/net/wireless/ti/wl12xx/scan.c +++ b/drivers/net/wireless/ti/wl12xx/scan.c @@ -118,7 +118,11 @@ static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif, if (passive) scan_options |= WL1271_SCAN_OPT_PASSIVE; - cmd->params.role_id = wlvif->role_id; + /* scan on the dev role if the regular one is not started */ + if (wlcore_is_p2p_mgmt(wlvif)) + cmd->params.role_id = wlvif->dev_role_id; + else + cmd->params.role_id = wlvif->role_id; if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) { ret = -EINVAL; diff --git a/drivers/net/wireless/ti/wl18xx/acx.c b/drivers/net/wireless/ti/wl18xx/acx.c index 67f2a0eec..4be040930 100644 --- a/drivers/net/wireless/ti/wl18xx/acx.c +++ b/drivers/net/wireless/ti/wl18xx/acx.c @@ -282,3 +282,30 @@ out: kfree(acx); return ret; } + +int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl) +{ + struct acx_dynamic_fw_traces_cfg *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx dynamic fw traces config %d", + wl->dynamic_fw_traces); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->dynamic_fw_traces = cpu_to_le32(wl->dynamic_fw_traces); + + ret = wl1271_cmd_configure(wl, ACX_DYNAMIC_TRACES_CFG, + acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx config dynamic fw traces failed: %d", ret); + goto out; + } +out: + kfree(acx); + return ret; +} diff --git a/drivers/net/wireless/ti/wl18xx/acx.h b/drivers/net/wireless/ti/wl18xx/acx.h index 4afccd4b9..342a2993e 100644 --- a/drivers/net/wireless/ti/wl18xx/acx.h +++ b/drivers/net/wireless/ti/wl18xx/acx.h @@ -35,7 +35,8 @@ enum { ACX_PEER_CAP = 0x0056, ACX_INTERRUPT_NOTIFY = 0x0057, ACX_RX_BA_FILTER = 0x0058, - ACX_AP_SLEEP_CFG = 0x0059 + ACX_AP_SLEEP_CFG = 0x0059, + ACX_DYNAMIC_TRACES_CFG = 0x005A, }; /* numbers of bits the length field takes (add 1 for the actual number) */ @@ -92,27 +93,26 @@ struct wl18xx_acx_checksum_state { struct wl18xx_acx_error_stats { - u32 error_frame; - u32 error_null_Frame_tx_start; - u32 error_numll_frame_cts_start; - u32 error_bar_retry; - u32 error_frame_cts_nul_flid; -} __packed; - -struct wl18xx_acx_debug_stats { - u32 debug1; - u32 debug2; - u32 debug3; - u32 debug4; - u32 debug5; - u32 debug6; -} __packed; - -struct wl18xx_acx_ring_stats { - u32 prepared_descs; - u32 tx_cmplt; + u32 error_frame_non_ctrl; + u32 error_frame_ctrl; + u32 error_frame_during_protection; + u32 null_frame_tx_start; + u32 null_frame_cts_start; + u32 bar_retry; + u32 num_frame_cts_nul_flid; + u32 tx_abort_failure; + u32 tx_resume_failure; + u32 rx_cmplt_db_overflow_cnt; + u32 elp_while_rx_exch; + u32 elp_while_tx_exch; + u32 elp_while_tx; + u32 elp_while_nvic_pending; + u32 rx_excessive_frame_len; + u32 burst_mismatch; + u32 tbc_exch_mismatch; } __packed; +#define NUM_OF_RATES_INDEXES 30 struct wl18xx_acx_tx_stats { u32 tx_prepared_descs; u32 tx_cmplt; @@ -122,7 +122,7 @@ struct wl18xx_acx_tx_stats { u32 tx_data_programmed; u32 tx_burst_programmed; u32 tx_starts; - u32 tx_imm_resp; + u32 tx_stop; u32 tx_start_templates; u32 tx_start_int_templates; u32 tx_start_fw_gen; @@ -131,13 +131,14 @@ struct wl18xx_acx_tx_stats { u32 tx_exch; u32 tx_retry_template; u32 tx_retry_data; + u32 tx_retry_per_rate[NUM_OF_RATES_INDEXES]; u32 tx_exch_pending; u32 tx_exch_expiry; u32 tx_done_template; u32 tx_done_data; u32 tx_done_int_template; - u32 tx_frame_checksum; - u32 tx_checksum_result; + u32 tx_cfe1; + u32 tx_cfe2; u32 frag_called; u32 frag_mpdu_alloc_failed; u32 frag_init_called; @@ -165,11 +166,8 @@ struct wl18xx_acx_rx_stats { u32 rx_cmplt_task; u32 rx_phy_hdr; u32 rx_timeout; + u32 rx_rts_timeout; u32 rx_timeout_wa; - u32 rx_wa_density_dropped_frame; - u32 rx_wa_ba_not_expected; - u32 rx_frame_checksum; - u32 rx_checksum_result; u32 defrag_called; u32 defrag_init_called; u32 defrag_in_process_called; @@ -179,6 +177,7 @@ struct wl18xx_acx_rx_stats { u32 decrypt_key_not_found; u32 defrag_need_decrypt; u32 rx_tkip_replays; + u32 rx_xfr; } __packed; struct wl18xx_acx_isr_stats { @@ -193,21 +192,13 @@ struct wl18xx_acx_pwr_stats { u32 connection_out_of_sync; u32 cont_miss_bcns_spread[PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD]; u32 rcvd_awake_bcns_cnt; -} __packed; - -struct wl18xx_acx_event_stats { - u32 calibration; - u32 rx_mismatch; - u32 rx_mem_empty; -} __packed; - -struct wl18xx_acx_ps_poll_stats { - u32 ps_poll_timeouts; - u32 upsd_timeouts; - u32 upsd_max_ap_turn; - u32 ps_poll_max_ap_turn; - u32 ps_poll_utilization; - u32 upsd_utilization; + u32 sleep_time_count; + u32 sleep_time_avg; + u32 sleep_cycle_avg; + u32 sleep_percent; + u32 ap_sleep_active_conf; + u32 ap_sleep_user_conf; + u32 ap_sleep_counter; } __packed; struct wl18xx_acx_rx_filter_stats { @@ -227,11 +218,11 @@ struct wl18xx_acx_rx_rate_stats { } __packed; #define AGGR_STATS_TX_AGG 16 -#define AGGR_STATS_TX_RATE 16 #define AGGR_STATS_RX_SIZE_LEN 16 struct wl18xx_acx_aggr_stats { - u32 tx_agg_vs_rate[AGGR_STATS_TX_AGG * AGGR_STATS_TX_RATE]; + u32 tx_agg_rate[AGGR_STATS_TX_AGG]; + u32 tx_agg_len[AGGR_STATS_TX_AGG]; u32 rx_size[AGGR_STATS_RX_SIZE_LEN]; } __packed; @@ -240,8 +231,6 @@ struct wl18xx_acx_aggr_stats { struct wl18xx_acx_pipeline_stats { u32 hs_tx_stat_fifo_int; u32 hs_rx_stat_fifo_int; - u32 tcp_tx_stat_fifo_int; - u32 tcp_rx_stat_fifo_int; u32 enc_tx_stat_fifo_int; u32 enc_rx_stat_fifo_int; u32 rx_complete_stat_fifo_int; @@ -249,38 +238,61 @@ struct wl18xx_acx_pipeline_stats { u32 post_proc_swi; u32 sec_frag_swi; u32 pre_to_defrag_swi; - u32 defrag_to_csum_swi; - u32 csum_to_rx_xfer_swi; + u32 defrag_to_rx_xfer_swi; u32 dec_packet_in; u32 dec_packet_in_fifo_full; u32 dec_packet_out; - u32 cs_rx_packet_in; - u32 cs_rx_packet_out; u16 pipeline_fifo_full[PIPE_STATS_HW_FIFO]; + u16 padding; +} __packed; + +#define DIVERSITY_STATS_NUM_OF_ANT 2 + +struct wl18xx_acx_diversity_stats { + u32 num_of_packets_per_ant[DIVERSITY_STATS_NUM_OF_ANT]; + u32 total_num_of_toggles; } __packed; -struct wl18xx_acx_mem_stats { - u32 rx_free_mem_blks; - u32 tx_free_mem_blks; - u32 fwlog_free_mem_blks; - u32 fw_gen_free_mem_blks; +struct wl18xx_acx_thermal_stats { + u16 irq_thr_low; + u16 irq_thr_high; + u16 tx_stop; + u16 tx_resume; + u16 false_irq; + u16 adc_source_unexpected; +} __packed; + +#define WL18XX_NUM_OF_CALIBRATIONS_ERRORS 18 +struct wl18xx_acx_calib_failure_stats { + u16 fail_count[WL18XX_NUM_OF_CALIBRATIONS_ERRORS]; + u32 calib_count; +} __packed; + +struct wl18xx_roaming_stats { + s32 rssi_level; +} __packed; + +struct wl18xx_dfs_stats { + u32 num_of_radar_detections; } __packed; struct wl18xx_acx_statistics { struct acx_header header; struct wl18xx_acx_error_stats error; - struct wl18xx_acx_debug_stats debug; struct wl18xx_acx_tx_stats tx; struct wl18xx_acx_rx_stats rx; struct wl18xx_acx_isr_stats isr; struct wl18xx_acx_pwr_stats pwr; - struct wl18xx_acx_ps_poll_stats ps_poll; struct wl18xx_acx_rx_filter_stats rx_filter; struct wl18xx_acx_rx_rate_stats rx_rate; struct wl18xx_acx_aggr_stats aggr_size; struct wl18xx_acx_pipeline_stats pipeline; - struct wl18xx_acx_mem_stats mem; + struct wl18xx_acx_diversity_stats diversity; + struct wl18xx_acx_thermal_stats thermal; + struct wl18xx_acx_calib_failure_stats calib; + struct wl18xx_roaming_stats roaming; + struct wl18xx_dfs_stats dfs; } __packed; struct wl18xx_acx_clear_statistics { @@ -367,6 +379,15 @@ struct acx_ap_sleep_cfg { u8 idle_conn_thresh; } __packed; +/* + * ACX_DYNAMIC_TRACES_CFG + * configure the FW dynamic traces + */ +struct acx_dynamic_fw_traces_cfg { + struct acx_header header; + __le32 dynamic_fw_traces; +} __packed; + int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap, u32 sdio_blk_size, u32 extra_mem_blks, u32 len_field_size); @@ -380,5 +401,6 @@ int wl18xx_acx_set_peer_cap(struct wl1271 *wl, int wl18xx_acx_interrupt_notify_config(struct wl1271 *wl, bool action); int wl18xx_acx_rx_ba_filter(struct wl1271 *wl, bool action); int wl18xx_acx_ap_sleep(struct wl1271 *wl); +int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl); #endif /* __WL18XX_ACX_H__ */ diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c index 5fbd2230f..4edfe2839 100644 --- a/drivers/net/wireless/ti/wl18xx/debugfs.c +++ b/drivers/net/wireless/ti/wl18xx/debugfs.c @@ -36,18 +36,23 @@ DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c, wl18xx_acx_statistics) -WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug1, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug2, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug3, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug4, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug5, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug6, "%u"); - -WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(error, error_null_Frame_tx_start, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(error, error_numll_frame_cts_start, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(error, error_bar_retry, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_cts_nul_flid, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_non_ctrl, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_ctrl, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_during_protection, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_tx_start, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_cts_start, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, bar_retry, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, num_frame_cts_nul_flid, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_abort_failure, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_resume_failure, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_cmplt_db_overflow_cnt, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_rx_exch, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx_exch, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_nvic_pending, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_excessive_frame_len, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, burst_mismatch, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, tbc_exch_mismatch, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_prepared_descs, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cmplt, "%u"); @@ -57,7 +62,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_programmed, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_programmed, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_burst_programmed, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_starts, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_imm_resp, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_stop, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_templates, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_int_templates, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_fw_gen, "%u"); @@ -66,13 +71,15 @@ WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_null_frame, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_template, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_data, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(tx, tx_retry_per_rate, + NUM_OF_RATES_INDEXES); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_pending, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_expiry, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_template, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_data, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_int_template, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_frame_checksum, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_checksum_result, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe1, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe2, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_called, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_mpdu_alloc_failed, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_init_called, "%u"); @@ -97,11 +104,8 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_pre_complt, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt_task, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_phy_hdr, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_rts_timeout, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout_wa, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_density_dropped_frame, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_ba_not_expected, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_frame_checksum, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_checksum_result, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_called, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_init_called, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_in_process_called, "%u"); @@ -111,6 +115,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_decrypt_failed, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, decrypt_key_not_found, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_decrypt, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_tkip_replays, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_xfr, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u"); @@ -120,14 +125,13 @@ WL18XX_DEBUGFS_FWSTATS_FILE(pwr, connection_out_of_sync, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pwr, cont_miss_bcns_spread, PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD); WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_bcns_cnt, "%u"); - - -WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_timeouts, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_timeouts, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_max_ap_turn, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_max_ap_turn, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_utilization, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_utilization, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_count, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_avg, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_cycle_avg, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_percent, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_active_conf, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_user_conf, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_counter, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, beacon_filter, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, arp_filter, "%u"); @@ -141,14 +145,14 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50); -WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate, - AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE); +WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_rate, + AGGR_STATS_TX_AGG); +WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_len, + AGGR_STATS_TX_AGG); WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, rx_size, AGGR_STATS_RX_SIZE_LEN); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, hs_tx_stat_fifo_int, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_tx_stat_fifo_int, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_rx_stat_fifo_int, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_tx_stat_fifo_int, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_rx_stat_fifo_int, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, rx_complete_stat_fifo_int, "%u"); @@ -156,21 +160,32 @@ WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_proc_swi, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, post_proc_swi, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, sec_frag_swi, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_to_defrag_swi, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_csum_swi, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, csum_to_rx_xfer_swi, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_rx_xfer_swi, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in_fifo_full, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_out, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_in, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_out, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pipeline, pipeline_fifo_full, PIPE_STATS_HW_FIFO); -WL18XX_DEBUGFS_FWSTATS_FILE(mem, rx_free_mem_blks, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(mem, tx_free_mem_blks, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(mem, fwlog_free_mem_blks, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(mem, fw_gen_free_mem_blks, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(diversity, num_of_packets_per_ant, + DIVERSITY_STATS_NUM_OF_ANT); +WL18XX_DEBUGFS_FWSTATS_FILE(diversity, total_num_of_toggles, "%u"); + +WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_low, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_high, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_stop, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_resume, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(thermal, false_irq, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(thermal, adc_source_unexpected, "%u"); + +WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(calib, fail_count, + WL18XX_NUM_OF_CALIBRATIONS_ERRORS); +WL18XX_DEBUGFS_FWSTATS_FILE(calib, calib_count, "%u"); + +WL18XX_DEBUGFS_FWSTATS_FILE(roaming, rssi_level, "%d"); + +WL18XX_DEBUGFS_FWSTATS_FILE(dfs, num_of_radar_detections, "%d"); static ssize_t conf_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -281,6 +296,55 @@ static const struct file_operations radar_detection_ops = { .llseek = default_llseek, }; +static ssize_t dynamic_fw_traces_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + unsigned long value; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 0, &value); + if (ret < 0) + return ret; + + mutex_lock(&wl->mutex); + + wl->dynamic_fw_traces = value; + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl18xx_acx_dynamic_fw_traces(wl); + if (ret < 0) + count = ret; + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + return count; +} + +static ssize_t dynamic_fw_traces_read(struct file *file, + char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + return wl1271_format_buffer(userbuf, count, ppos, + "%d\n", wl->dynamic_fw_traces); +} + +static const struct file_operations dynamic_fw_traces_ops = { + .read = dynamic_fw_traces_read, + .write = dynamic_fw_traces_write, + .open = simple_open, + .llseek = default_llseek, +}; + int wl18xx_debugfs_add_files(struct wl1271 *wl, struct dentry *rootdir) { @@ -301,18 +365,23 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_ADD(clear_fw_stats, stats); - DEBUGFS_FWSTATS_ADD(debug, debug1); - DEBUGFS_FWSTATS_ADD(debug, debug2); - DEBUGFS_FWSTATS_ADD(debug, debug3); - DEBUGFS_FWSTATS_ADD(debug, debug4); - DEBUGFS_FWSTATS_ADD(debug, debug5); - DEBUGFS_FWSTATS_ADD(debug, debug6); - - DEBUGFS_FWSTATS_ADD(error, error_frame); - DEBUGFS_FWSTATS_ADD(error, error_null_Frame_tx_start); - DEBUGFS_FWSTATS_ADD(error, error_numll_frame_cts_start); - DEBUGFS_FWSTATS_ADD(error, error_bar_retry); - DEBUGFS_FWSTATS_ADD(error, error_frame_cts_nul_flid); + DEBUGFS_FWSTATS_ADD(error, error_frame_non_ctrl); + DEBUGFS_FWSTATS_ADD(error, error_frame_ctrl); + DEBUGFS_FWSTATS_ADD(error, error_frame_during_protection); + DEBUGFS_FWSTATS_ADD(error, null_frame_tx_start); + DEBUGFS_FWSTATS_ADD(error, null_frame_cts_start); + DEBUGFS_FWSTATS_ADD(error, bar_retry); + DEBUGFS_FWSTATS_ADD(error, num_frame_cts_nul_flid); + DEBUGFS_FWSTATS_ADD(error, tx_abort_failure); + DEBUGFS_FWSTATS_ADD(error, tx_resume_failure); + DEBUGFS_FWSTATS_ADD(error, rx_cmplt_db_overflow_cnt); + DEBUGFS_FWSTATS_ADD(error, elp_while_rx_exch); + DEBUGFS_FWSTATS_ADD(error, elp_while_tx_exch); + DEBUGFS_FWSTATS_ADD(error, elp_while_tx); + DEBUGFS_FWSTATS_ADD(error, elp_while_nvic_pending); + DEBUGFS_FWSTATS_ADD(error, rx_excessive_frame_len); + DEBUGFS_FWSTATS_ADD(error, burst_mismatch); + DEBUGFS_FWSTATS_ADD(error, tbc_exch_mismatch); DEBUGFS_FWSTATS_ADD(tx, tx_prepared_descs); DEBUGFS_FWSTATS_ADD(tx, tx_cmplt); @@ -322,7 +391,7 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_FWSTATS_ADD(tx, tx_data_programmed); DEBUGFS_FWSTATS_ADD(tx, tx_burst_programmed); DEBUGFS_FWSTATS_ADD(tx, tx_starts); - DEBUGFS_FWSTATS_ADD(tx, tx_imm_resp); + DEBUGFS_FWSTATS_ADD(tx, tx_stop); DEBUGFS_FWSTATS_ADD(tx, tx_start_templates); DEBUGFS_FWSTATS_ADD(tx, tx_start_int_templates); DEBUGFS_FWSTATS_ADD(tx, tx_start_fw_gen); @@ -331,13 +400,14 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_FWSTATS_ADD(tx, tx_exch); DEBUGFS_FWSTATS_ADD(tx, tx_retry_template); DEBUGFS_FWSTATS_ADD(tx, tx_retry_data); + DEBUGFS_FWSTATS_ADD(tx, tx_retry_per_rate); DEBUGFS_FWSTATS_ADD(tx, tx_exch_pending); DEBUGFS_FWSTATS_ADD(tx, tx_exch_expiry); DEBUGFS_FWSTATS_ADD(tx, tx_done_template); DEBUGFS_FWSTATS_ADD(tx, tx_done_data); DEBUGFS_FWSTATS_ADD(tx, tx_done_int_template); - DEBUGFS_FWSTATS_ADD(tx, tx_frame_checksum); - DEBUGFS_FWSTATS_ADD(tx, tx_checksum_result); + DEBUGFS_FWSTATS_ADD(tx, tx_cfe1); + DEBUGFS_FWSTATS_ADD(tx, tx_cfe2); DEBUGFS_FWSTATS_ADD(tx, frag_called); DEBUGFS_FWSTATS_ADD(tx, frag_mpdu_alloc_failed); DEBUGFS_FWSTATS_ADD(tx, frag_init_called); @@ -362,11 +432,8 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_FWSTATS_ADD(rx, rx_cmplt_task); DEBUGFS_FWSTATS_ADD(rx, rx_phy_hdr); DEBUGFS_FWSTATS_ADD(rx, rx_timeout); + DEBUGFS_FWSTATS_ADD(rx, rx_rts_timeout); DEBUGFS_FWSTATS_ADD(rx, rx_timeout_wa); - DEBUGFS_FWSTATS_ADD(rx, rx_wa_density_dropped_frame); - DEBUGFS_FWSTATS_ADD(rx, rx_wa_ba_not_expected); - DEBUGFS_FWSTATS_ADD(rx, rx_frame_checksum); - DEBUGFS_FWSTATS_ADD(rx, rx_checksum_result); DEBUGFS_FWSTATS_ADD(rx, defrag_called); DEBUGFS_FWSTATS_ADD(rx, defrag_init_called); DEBUGFS_FWSTATS_ADD(rx, defrag_in_process_called); @@ -376,6 +443,7 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_FWSTATS_ADD(rx, decrypt_key_not_found); DEBUGFS_FWSTATS_ADD(rx, defrag_need_decrypt); DEBUGFS_FWSTATS_ADD(rx, rx_tkip_replays); + DEBUGFS_FWSTATS_ADD(rx, rx_xfr); DEBUGFS_FWSTATS_ADD(isr, irqs); @@ -384,13 +452,13 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_FWSTATS_ADD(pwr, connection_out_of_sync); DEBUGFS_FWSTATS_ADD(pwr, cont_miss_bcns_spread); DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_bcns_cnt); - - DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_timeouts); - DEBUGFS_FWSTATS_ADD(ps_poll, upsd_timeouts); - DEBUGFS_FWSTATS_ADD(ps_poll, upsd_max_ap_turn); - DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_max_ap_turn); - DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_utilization); - DEBUGFS_FWSTATS_ADD(ps_poll, upsd_utilization); + DEBUGFS_FWSTATS_ADD(pwr, sleep_time_count); + DEBUGFS_FWSTATS_ADD(pwr, sleep_time_avg); + DEBUGFS_FWSTATS_ADD(pwr, sleep_cycle_avg); + DEBUGFS_FWSTATS_ADD(pwr, sleep_percent); + DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_active_conf); + DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_user_conf); + DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_counter); DEBUGFS_FWSTATS_ADD(rx_filter, beacon_filter); DEBUGFS_FWSTATS_ADD(rx_filter, arp_filter); @@ -404,12 +472,11 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_FWSTATS_ADD(rx_rate, rx_frames_per_rates); - DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_vs_rate); + DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_rate); + DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_len); DEBUGFS_FWSTATS_ADD(aggr_size, rx_size); DEBUGFS_FWSTATS_ADD(pipeline, hs_tx_stat_fifo_int); - DEBUGFS_FWSTATS_ADD(pipeline, tcp_tx_stat_fifo_int); - DEBUGFS_FWSTATS_ADD(pipeline, tcp_rx_stat_fifo_int); DEBUGFS_FWSTATS_ADD(pipeline, enc_tx_stat_fifo_int); DEBUGFS_FWSTATS_ADD(pipeline, enc_rx_stat_fifo_int); DEBUGFS_FWSTATS_ADD(pipeline, rx_complete_stat_fifo_int); @@ -417,22 +484,33 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_FWSTATS_ADD(pipeline, post_proc_swi); DEBUGFS_FWSTATS_ADD(pipeline, sec_frag_swi); DEBUGFS_FWSTATS_ADD(pipeline, pre_to_defrag_swi); - DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_csum_swi); - DEBUGFS_FWSTATS_ADD(pipeline, csum_to_rx_xfer_swi); + DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_rx_xfer_swi); DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in); DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in_fifo_full); DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_out); - DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_in); - DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_out); DEBUGFS_FWSTATS_ADD(pipeline, pipeline_fifo_full); - DEBUGFS_FWSTATS_ADD(mem, rx_free_mem_blks); - DEBUGFS_FWSTATS_ADD(mem, tx_free_mem_blks); - DEBUGFS_FWSTATS_ADD(mem, fwlog_free_mem_blks); - DEBUGFS_FWSTATS_ADD(mem, fw_gen_free_mem_blks); + DEBUGFS_FWSTATS_ADD(diversity, num_of_packets_per_ant); + DEBUGFS_FWSTATS_ADD(diversity, total_num_of_toggles); + + DEBUGFS_FWSTATS_ADD(thermal, irq_thr_low); + DEBUGFS_FWSTATS_ADD(thermal, irq_thr_high); + DEBUGFS_FWSTATS_ADD(thermal, tx_stop); + DEBUGFS_FWSTATS_ADD(thermal, tx_resume); + DEBUGFS_FWSTATS_ADD(thermal, false_irq); + DEBUGFS_FWSTATS_ADD(thermal, adc_source_unexpected); + + DEBUGFS_FWSTATS_ADD(calib, fail_count); + + DEBUGFS_FWSTATS_ADD(calib, calib_count); + + DEBUGFS_FWSTATS_ADD(roaming, rssi_level); + + DEBUGFS_FWSTATS_ADD(dfs, num_of_radar_detections); DEBUGFS_ADD(conf, moddir); DEBUGFS_ADD(radar_detection, moddir); + DEBUGFS_ADD(dynamic_fw_traces, moddir); return 0; diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c index 548bb9e7e..09c7e098f 100644 --- a/drivers/net/wireless/ti/wl18xx/event.c +++ b/drivers/net/wireless/ti/wl18xx/event.c @@ -112,6 +112,14 @@ static int wlcore_smart_config_decode_event(struct wl1271 *wl, return 0; } +static void wlcore_event_time_sync(struct wl1271 *wl, u16 tsf_msb, u16 tsf_lsb) +{ + u32 clock; + /* convert the MSB+LSB to a u32 TSF value */ + clock = (tsf_msb << 16) | tsf_lsb; + wl1271_info("TIME_SYNC_EVENT_ID: clock %u", clock); +} + int wl18xx_process_mailbox_events(struct wl1271 *wl) { struct wl18xx_event_mailbox *mbox = wl->mbox; @@ -128,6 +136,11 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl) wl18xx_scan_completed(wl, wl->scan_wlvif); } + if (vector & TIME_SYNC_EVENT_ID) + wlcore_event_time_sync(wl, + mbox->time_sync_tsf_msb, + mbox->time_sync_tsf_lsb); + if (vector & RADAR_DETECTED_EVENT_ID) { wl1271_info("radar event: channel %d type %s", mbox->radar_channel, diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h index 266ee8783..f3d4f1337 100644 --- a/drivers/net/wireless/ti/wl18xx/event.h +++ b/drivers/net/wireless/ti/wl18xx/event.h @@ -38,8 +38,9 @@ enum { REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(18), DFS_CHANNELS_CONFIG_COMPLETE_EVENT = BIT(19), PERIODIC_SCAN_REPORT_EVENT_ID = BIT(20), - SMART_CONFIG_SYNC_EVENT_ID = BIT(22), - SMART_CONFIG_DECODE_EVENT_ID = BIT(23), + SMART_CONFIG_SYNC_EVENT_ID = BIT(22), + SMART_CONFIG_DECODE_EVENT_ID = BIT(23), + TIME_SYNC_EVENT_ID = BIT(24), }; enum wl18xx_radar_types { @@ -95,13 +96,16 @@ struct wl18xx_event_mailbox { /* smart config sync channel */ u8 sc_sync_channel; u8 sc_sync_band; - u8 padding2[2]; + /* time sync msb*/ + u16 time_sync_tsf_msb; /* radar detect */ u8 radar_channel; u8 radar_type; - u8 padding3[2]; + /* time sync lsb*/ + u16 time_sync_tsf_lsb; + } __packed; int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event, diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c index 413fb3cbd..42fbc0458 100644 --- a/drivers/net/wireless/ti/wl18xx/main.c +++ b/drivers/net/wireless/ti/wl18xx/main.c @@ -422,6 +422,8 @@ static struct wlcore_conf wl18xx_conf = { .num_probe_reqs = 2, .rssi_threshold = -90, .snr_threshold = 0, + .num_short_intervals = SCAN_MAX_SHORT_INTERVALS, + .long_interval = 30000, }, .ht = { .rx_ba_win_size = 32, @@ -1026,8 +1028,8 @@ static int wl18xx_boot(struct wl1271 *wl) CHANNEL_SWITCH_COMPLETE_EVENT_ID | DFS_CHANNELS_CONFIG_COMPLETE_EVENT | SMART_CONFIG_SYNC_EVENT_ID | - SMART_CONFIG_DECODE_EVENT_ID; -; + SMART_CONFIG_DECODE_EVENT_ID | + TIME_SYNC_EVENT_ID; wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID; @@ -1159,6 +1161,11 @@ static int wl18xx_hw_init(struct wl1271 *wl) if (ret < 0) return ret; + /* set the dynamic fw traces bitmap */ + ret = wl18xx_acx_dynamic_fw_traces(wl); + if (ret < 0) + return ret; + if (checksum_param) { ret = wl18xx_acx_set_checksum_state(wl); if (ret != 0) @@ -1797,7 +1804,7 @@ static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = { static const struct ieee80211_iface_limit wl18xx_iface_limits[] = { { - .max = 3, + .max = 2, .types = BIT(NL80211_IFTYPE_STATION), }, { @@ -1806,6 +1813,10 @@ static const struct ieee80211_iface_limit wl18xx_iface_limits[] = { BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_CLIENT), }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), + }, }; static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = { @@ -1813,6 +1824,48 @@ static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = { .max = 2, .types = BIT(NL80211_IFTYPE_AP), }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), + }, +}; + +static const struct ieee80211_iface_limit wl18xx_iface_ap_cl_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_AP), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_CLIENT), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), + }, +}; + +static const struct ieee80211_iface_limit wl18xx_iface_ap_go_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_AP), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_GO), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), + }, }; static const struct ieee80211_iface_combination diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c index 98666f235..c938c494c 100644 --- a/drivers/net/wireless/ti/wl18xx/scan.c +++ b/drivers/net/wireless/ti/wl18xx/scan.c @@ -51,7 +51,11 @@ static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif, goto out; } - cmd->role_id = wlvif->role_id; + /* scan on the dev role if the regular one is not started */ + if (wlcore_is_p2p_mgmt(wlvif)) + cmd->role_id = wlvif->dev_role_id; + else + cmd->role_id = wlvif->role_id; if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) { ret = -EINVAL; @@ -223,9 +227,20 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl, SCAN_TYPE_PERIODIC); wl18xx_adjust_channels(cmd, cmd_channels); - cmd->short_cycles_sec = 0; - cmd->long_cycles_sec = cpu_to_le16(req->interval); - cmd->short_cycles_count = 0; + if (c->num_short_intervals && c->long_interval && + c->long_interval > req->interval) { + cmd->short_cycles_msec = cpu_to_le16(req->interval); + cmd->long_cycles_msec = cpu_to_le16(c->long_interval); + cmd->short_cycles_count = c->num_short_intervals; + } else { + cmd->short_cycles_msec = 0; + cmd->long_cycles_msec = cpu_to_le16(req->interval); + cmd->short_cycles_count = 0; + } + wl1271_debug(DEBUG_SCAN, "short_interval: %d, long_interval: %d, num_short: %d", + le16_to_cpu(cmd->short_cycles_msec), + le16_to_cpu(cmd->long_cycles_msec), + cmd->short_cycles_count); cmd->total_cycles = 0; diff --git a/drivers/net/wireless/ti/wl18xx/scan.h b/drivers/net/wireless/ti/wl18xx/scan.h index 2e636aa5d..66a763f64 100644 --- a/drivers/net/wireless/ti/wl18xx/scan.h +++ b/drivers/net/wireless/ti/wl18xx/scan.h @@ -74,8 +74,8 @@ struct wl18xx_cmd_scan_params { u8 dfs; /* number of dfs channels in 5ghz */ u8 passive_active; /* number of passive before active channels 2.4ghz */ - __le16 short_cycles_sec; - __le16 long_cycles_sec; + __le16 short_cycles_msec; + __le16 long_cycles_msec; u8 short_cycles_count; u8 total_cycles; /* 0 - infinite */ u8 padding[2]; diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 68919f8d4..f01d24baf 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -2003,12 +2003,15 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif, wlvif->bss_type == BSS_TYPE_IBSS))) return -EINVAL; - ret = wl12xx_cmd_role_enable(wl, - wl12xx_wlvif_to_vif(wlvif)->addr, - WL1271_ROLE_DEVICE, - &wlvif->dev_role_id); - if (ret < 0) - goto out; + /* the dev role is already started for p2p mgmt interfaces */ + if (!wlcore_is_p2p_mgmt(wlvif)) { + ret = wl12xx_cmd_role_enable(wl, + wl12xx_wlvif_to_vif(wlvif)->addr, + WL1271_ROLE_DEVICE, + &wlvif->dev_role_id); + if (ret < 0) + goto out; + } ret = wl12xx_cmd_role_start_dev(wl, wlvif, band, channel); if (ret < 0) @@ -2023,7 +2026,8 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif, out_stop: wl12xx_cmd_role_stop_dev(wl, wlvif); out_disable: - wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id); + if (!wlcore_is_p2p_mgmt(wlvif)) + wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id); out: return ret; } @@ -2052,10 +2056,42 @@ int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif) if (ret < 0) goto out; - ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id); - if (ret < 0) - goto out; + if (!wlcore_is_p2p_mgmt(wlvif)) { + ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id); + if (ret < 0) + goto out; + } out: return ret; } + +int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 feature, u8 enable, u8 value) +{ + struct wlcore_cmd_generic_cfg *cmd; + int ret; + + wl1271_debug(DEBUG_CMD, + "cmd generic cfg (role %d feature %d enable %d value %d)", + wlvif->role_id, feature, enable, value); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->role_id = wlvif->role_id; + cmd->feature = feature; + cmd->enable = enable; + cmd->value = value; + + ret = wl1271_cmd_send(wl, CMD_GENERIC_CFG, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to send generic cfg command"); + goto out_free; + } +out_free: + kfree(cmd); + return ret; +} +EXPORT_SYMBOL_GPL(wlcore_cmd_generic_cfg); diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h index e14cd407a..8dc46c0a4 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.h +++ b/drivers/net/wireless/ti/wlcore/cmd.h @@ -92,6 +92,8 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel, enum ieee80211_band band); int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl); +int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 feature, u8 enable, u8 value); int wl12xx_cmd_config_fwlog(struct wl1271 *wl); int wl12xx_cmd_start_fwlog(struct wl1271 *wl); int wl12xx_cmd_stop_fwlog(struct wl1271 *wl); @@ -652,6 +654,19 @@ struct wl12xx_cmd_regdomain_dfs_config { u8 padding[3]; } __packed; +enum wlcore_generic_cfg_feature { + WLCORE_CFG_FEATURE_RADAR_DEBUG = 2, +}; + +struct wlcore_cmd_generic_cfg { + struct wl1271_cmd_header header; + + u8 role_id; + u8 feature; + u8 enable; + u8 value; +} __packed; + struct wl12xx_cmd_config_fwlog { struct wl1271_cmd_header header; diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h index 166add00b..52a9d1b14 100644 --- a/drivers/net/wireless/ti/wlcore/conf.h +++ b/drivers/net/wireless/ti/wlcore/conf.h @@ -1186,6 +1186,15 @@ struct conf_sched_scan_settings { /* SNR threshold to be used for filtering */ s8 snr_threshold; + + /* + * number of short intervals scheduled scan cycles before + * switching to long intervals + */ + u8 num_short_intervals; + + /* interval between each long scheduled scan cycle (in ms) */ + u16 long_interval; } __packed; struct conf_ht_setting { @@ -1352,7 +1361,7 @@ struct conf_recovery_settings { * version, the two LSB are the lower driver's private conf * version. */ -#define WLCORE_CONF_VERSION (0x0006 << 16) +#define WLCORE_CONF_VERSION (0x0007 << 16) #define WLCORE_CONF_MASK 0xffff0000 #define WLCORE_CONF_SIZE (sizeof(struct wlcore_conf_header) + \ sizeof(struct wlcore_conf)) diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c index 5ca1fb161..e92f2639a 100644 --- a/drivers/net/wireless/ti/wlcore/init.c +++ b/drivers/net/wireless/ti/wlcore/init.c @@ -348,7 +348,7 @@ static int wl12xx_init_fwlog(struct wl1271 *wl) } /* generic sta initialization (non vif-specific) */ -static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret; diff --git a/drivers/net/wireless/ti/wlcore/init.h b/drivers/net/wireless/ti/wlcore/init.h index a45fbfdde..fd1cdb6bc 100644 --- a/drivers/net/wireless/ti/wlcore/init.h +++ b/drivers/net/wireless/ti/wlcore/init.h @@ -35,5 +35,6 @@ int wl1271_hw_init(struct wl1271 *wl); int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif); int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif); int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif); +int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif); #endif diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index e9a7795dd..b26d88d8c 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -1792,6 +1792,9 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw, wl->wow_enabled = true; wl12xx_for_each_wlvif(wl, wlvif) { + if (wlcore_is_p2p_mgmt(wlvif)) + continue; + ret = wl1271_configure_suspend(wl, wlvif, wow); if (ret < 0) { mutex_unlock(&wl->mutex); @@ -1901,6 +1904,9 @@ static int wl1271_op_resume(struct ieee80211_hw *hw) goto out; wl12xx_for_each_wlvif(wl, wlvif) { + if (wlcore_is_p2p_mgmt(wlvif)) + continue; + wl1271_configure_resume(wl, wlvif); } @@ -2256,6 +2262,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif) wlvif->p2p = 1; /* fall-through */ case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_DEVICE: wlvif->bss_type = BSS_TYPE_STA_BSS; break; case NL80211_IFTYPE_ADHOC: @@ -2477,7 +2484,8 @@ static void wlcore_hw_queue_iter(void *data, u8 *mac, { struct wlcore_hw_queue_iter_data *iter_data = data; - if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE)) + if (vif->type == NL80211_IFTYPE_P2P_DEVICE || + WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE)) return; if (iter_data->cur_running || vif == iter_data->vif) { @@ -2495,6 +2503,11 @@ static int wlcore_allocate_hw_queue_base(struct wl1271 *wl, struct wlcore_hw_queue_iter_data iter_data = {}; int i, q_base; + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; + return 0; + } + iter_data.vif = vif; /* mark all bits taken by active interfaces */ @@ -2618,14 +2631,27 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, goto out; } - ret = wl12xx_cmd_role_enable(wl, vif->addr, - role_type, &wlvif->role_id); - if (ret < 0) - goto out; + if (!wlcore_is_p2p_mgmt(wlvif)) { + ret = wl12xx_cmd_role_enable(wl, vif->addr, + role_type, &wlvif->role_id); + if (ret < 0) + goto out; - ret = wl1271_init_vif_specific(wl, vif); - if (ret < 0) - goto out; + ret = wl1271_init_vif_specific(wl, vif); + if (ret < 0) + goto out; + + } else { + ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE, + &wlvif->dev_role_id); + if (ret < 0) + goto out; + + /* needed mainly for configuring rate policies */ + ret = wl1271_sta_hw_init(wl, wlvif); + if (ret < 0) + goto out; + } list_add(&wlvif->list, &wl->wlvif_list); set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags); @@ -2696,9 +2722,15 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, wl12xx_stop_dev(wl, wlvif); } - ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id); - if (ret < 0) - goto deinit; + if (!wlcore_is_p2p_mgmt(wlvif)) { + ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id); + if (ret < 0) + goto deinit; + } else { + ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id); + if (ret < 0) + goto deinit; + } wl1271_ps_elp_sleep(wl); } @@ -3088,6 +3120,9 @@ static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif, { int ret; + if (wlcore_is_p2p_mgmt(wlvif)) + return 0; + if (conf->power_level != wlvif->power_level) { ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level); if (ret < 0) @@ -3207,6 +3242,9 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, goto out; wl12xx_for_each_wlvif(wl, wlvif) { + if (wlcore_is_p2p_mgmt(wlvif)) + continue; + if (wlvif->bss_type != BSS_TYPE_AP_BSS) { if (*total & FIF_ALLMULTI) ret = wl1271_acx_group_address_tbl(wl, wlvif, @@ -4837,6 +4875,9 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u8 ps_scheme; int ret = 0; + if (wlcore_is_p2p_mgmt(wlvif)) + return 0; + mutex_lock(&wl->mutex); wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue); @@ -6078,8 +6119,10 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO); + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_DEVICE) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO); wl->hw->wiphy->max_scan_ssids = 1; wl->hw->wiphy->max_sched_scan_ssids = 16; wl->hw->wiphy->max_match_sets = 16; diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c index e12597428..5b2927391 100644 --- a/drivers/net/wireless/ti/wlcore/rx.c +++ b/drivers/net/wireless/ti/wlcore/rx.c @@ -74,7 +74,14 @@ static void wl1271_rx_status(struct wl1271 *wl, if (desc->rate <= wl->hw_min_ht_rate) status->flag |= RX_FLAG_HT; - status->signal = desc->rssi; + /* + * Read the signal level and antenna diversity indication. + * The msb in the signal level is always set as it is a + * negative number. + * The antenna indication is the msb of the rssi. + */ + status->signal = ((desc->rssi & RSSI_LEVEL_BITMASK) | BIT(7)); + status->antenna = ((desc->rssi & ANT_DIVERSITY_BITMASK) >> 7); /* * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h index a3b1618db..f5a7087cf 100644 --- a/drivers/net/wireless/ti/wlcore/rx.h +++ b/drivers/net/wireless/ti/wlcore/rx.h @@ -30,6 +30,9 @@ #define WL1271_RX_MAX_RSSI -30 #define WL1271_RX_MIN_RSSI -95 +#define RSSI_LEVEL_BITMASK 0x7F +#define ANT_DIVERSITY_BITMASK BIT(7) + #define SHORT_PREAMBLE_BIT BIT(0) #define OFDM_RATE_BIT BIT(6) #define PBCC_RATE_BIT BIT(7) diff --git a/drivers/net/wireless/ti/wlcore/scan.h b/drivers/net/wireless/ti/wlcore/scan.h index 4dadd0c62..782eb297c 100644 --- a/drivers/net/wireless/ti/wlcore/scan.h +++ b/drivers/net/wireless/ti/wlcore/scan.h @@ -83,6 +83,12 @@ struct wl1271_cmd_trigger_scan_to { #define MAX_CHANNELS_5GHZ 42 #define SCAN_MAX_CYCLE_INTERVALS 16 + +/* The FW intervals can take up to 16 entries. + * The 1st entry isn't used (scan is immediate). The last + * entry should be used for the long_interval + */ +#define SCAN_MAX_SHORT_INTERVALS (SCAN_MAX_CYCLE_INTERVALS - 2) #define SCAN_MAX_BANDS 3 enum { diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index ea7e07abc..c172da56b 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -293,7 +293,8 @@ static int wl1271_probe(struct sdio_func *func, /* Use block mode for transferring over one block size of data */ func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; - if (wlcore_probe_of(&func->dev, &irq, &pdev_data)) + ret = wlcore_probe_of(&func->dev, &irq, &pdev_data); + if (ret) goto out_free_glue; /* if sdio can keep power while host is suspended, enable wow */ diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h index 7f363fa56..a1b6040e6 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore.h +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -500,6 +500,9 @@ struct wl1271 { /* interface combinations supported by the hw */ const struct ieee80211_iface_combination *iface_combinations; u8 n_iface_combinations; + + /* dynamic fw traces */ + u32 dynamic_fw_traces; }; int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev); diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h index ab3f04ea2..23507ceb4 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore_i.h +++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h @@ -503,6 +503,11 @@ struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif) return container_of((void *)wlvif, struct ieee80211_vif, drv_priv); } +static inline bool wlcore_is_p2p_mgmt(struct wl12xx_vif *wlvif) +{ + return wl12xx_wlvif_to_vif(wlvif)->type == NL80211_IFTYPE_P2P_DEVICE; +} + #define wl12xx_for_each_wlvif(wl, wlvif) \ list_for_each_entry(wlvif, &wl->wlvif_list, list) diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 8a495b318..a7bf74727 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -200,22 +200,27 @@ struct xenvif_queue { /* Per-queue data for xenvif */ struct xenvif_stats stats; }; -/* Maximum number of Rx slots a to-guest packet may use, including the - * slot needed for GSO meta-data. - */ -#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1) - enum state_bit_shift { /* This bit marks that the vif is connected */ VIF_STATUS_CONNECTED, }; +struct xenvif_mcast_addr { + struct list_head entry; + struct rcu_head rcu; + u8 addr[6]; +}; + +#define XEN_NETBK_MCAST_MAX 64 + struct xenvif { /* Unique identifier for this interface. */ domid_t domid; unsigned int handle; u8 fe_dev_addr[6]; + struct list_head fe_mcast_addr; + unsigned int fe_mcast_count; /* Frontend feature information. */ int gso_mask; @@ -224,6 +229,7 @@ struct xenvif { u8 can_sg:1; u8 ip_csum:1; u8 ipv6_csum:1; + u8 multicast_control:1; /* Is this interface disabled? True when backend discovers * frontend is rogue. @@ -306,11 +312,6 @@ int xenvif_dealloc_kthread(void *data); void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); -/* Determine whether the needed number of slots (req) are available, - * and set req_event if not. - */ -bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed); - void xenvif_carrier_on(struct xenvif *vif); /* Callback from stack when TX packet can be released */ @@ -325,9 +326,6 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue) queue->pending_prod + queue->pending_cons; } -/* Callback from stack when TX packet can be released */ -void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); - irqreturn_t xenvif_interrupt(int irq, void *dev_id); extern bool separate_tx_rx_irq; @@ -344,4 +342,8 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, struct sk_buff *skb); void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue); +/* Multicast control */ +bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr); +void xenvif_mcast_addr_list_free(struct xenvif *vif); + #endif /* __XEN_NETBACK__COMMON_H__ */ diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 28577a315..e7bd63eb2 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -171,6 +171,13 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) !xenvif_schedulable(vif)) goto drop; + if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) { + struct ethhdr *eth = (struct ethhdr *)skb->data; + + if (!xenvif_mcast_match(vif, eth->h_dest)) + goto drop; + } + cb = XENVIF_RX_CB(skb); cb->expires = jiffies + vif->drain_timeout; @@ -427,6 +434,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, vif->num_queues = 0; spin_lock_init(&vif->lock); + INIT_LIST_HEAD(&vif->fe_mcast_addr); dev->netdev_ops = &xenvif_netdev_ops; dev->hw_features = NETIF_F_SG | @@ -661,6 +669,8 @@ void xenvif_disconnect(struct xenvif *vif) xenvif_unmap_frontend_rings(queue); } + + xenvif_mcast_addr_list_free(vif); } /* Reverse the relevant parts of xenvif_init_queue(). diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 3f44b522b..ec98d4391 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -149,9 +149,20 @@ static inline pending_ring_idx_t pending_index(unsigned i) return i & (MAX_PENDING_REQS-1); } -bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed) +static int xenvif_rx_ring_slots_needed(struct xenvif *vif) +{ + if (vif->gso_mask) + return DIV_ROUND_UP(vif->dev->gso_max_size, PAGE_SIZE) + 1; + else + return DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); +} + +static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) { RING_IDX prod, cons; + int needed; + + needed = xenvif_rx_ring_slots_needed(queue->vif); do { prod = queue->rx.sring->req_prod; @@ -314,7 +325,7 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb } else { copy_gop->source.domid = DOMID_SELF; copy_gop->source.u.gmfn = - virt_to_mfn(page_address(page)); + virt_to_gfn(page_address(page)); } copy_gop->source.offset = offset; @@ -513,7 +524,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue) skb_queue_head_init(&rxq); - while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX) + while (xenvif_rx_ring_slots_available(queue) && (skb = xenvif_rx_dequeue(queue)) != NULL) { queue->last_rx_time = jiffies; @@ -1157,6 +1168,80 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) return false; } +/* No locking is required in xenvif_mcast_add/del() as they are + * only ever invoked from NAPI poll. An RCU list is used because + * xenvif_mcast_match() is called asynchronously, during start_xmit. + */ + +static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr) +{ + struct xenvif_mcast_addr *mcast; + + if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) { + if (net_ratelimit()) + netdev_err(vif->dev, + "Too many multicast addresses\n"); + return -ENOSPC; + } + + mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC); + if (!mcast) + return -ENOMEM; + + ether_addr_copy(mcast->addr, addr); + list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr); + vif->fe_mcast_count++; + + return 0; +} + +static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr) +{ + struct xenvif_mcast_addr *mcast; + + list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { + if (ether_addr_equal(addr, mcast->addr)) { + --vif->fe_mcast_count; + list_del_rcu(&mcast->entry); + kfree_rcu(mcast, rcu); + break; + } + } +} + +bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr) +{ + struct xenvif_mcast_addr *mcast; + + rcu_read_lock(); + list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { + if (ether_addr_equal(addr, mcast->addr)) { + rcu_read_unlock(); + return true; + } + } + rcu_read_unlock(); + + return false; +} + +void xenvif_mcast_addr_list_free(struct xenvif *vif) +{ + /* No need for locking or RCU here. NAPI poll and TX queue + * are stopped. + */ + while (!list_empty(&vif->fe_mcast_addr)) { + struct xenvif_mcast_addr *mcast; + + mcast = list_first_entry(&vif->fe_mcast_addr, + struct xenvif_mcast_addr, + entry); + --vif->fe_mcast_count; + list_del(&mcast->entry); + kfree(mcast); + } +} + static void xenvif_tx_build_gops(struct xenvif_queue *queue, int budget, unsigned *copy_ops, @@ -1215,6 +1300,31 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, break; } + if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) { + struct xen_netif_extra_info *extra; + + extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1]; + ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr); + + make_tx_response(queue, &txreq, + (ret == 0) ? + XEN_NETIF_RSP_OKAY : + XEN_NETIF_RSP_ERROR); + push_tx_responses(queue); + continue; + } + + if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) { + struct xen_netif_extra_info *extra; + + extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1]; + xenvif_mcast_del(queue->vif, extra->u.mcast.addr); + + make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY); + push_tx_responses(queue); + continue; + } + ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do); if (unlikely(ret < 0)) break; @@ -1296,7 +1406,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; queue->tx_copy_ops[*copy_ops].dest.u.gmfn = - virt_to_mfn(skb->data); + virt_to_gfn(skb->data); queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; queue->tx_copy_ops[*copy_ops].dest.offset = offset_in_page(skb->data); @@ -1839,8 +1949,7 @@ static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) prod = queue->rx.sring->req_prod; cons = queue->rx.req_cons; - return !queue->stalled - && prod - cons < XEN_NETBK_RX_SLOTS_MAX + return !queue->stalled && prod - cons < 1 && time_after(jiffies, queue->last_rx_time + queue->vif->stall_timeout); } @@ -1852,14 +1961,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) prod = queue->rx.sring->req_prod; cons = queue->rx.req_cons; - return queue->stalled - && prod - cons >= XEN_NETBK_RX_SLOTS_MAX; + return queue->stalled && prod - cons >= 1; } static bool xenvif_have_rx_work(struct xenvif_queue *queue) { return (!skb_queue_empty(&queue->rx_queue) - && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)) + && xenvif_rx_ring_slots_available(queue)) || (queue->vif->stall_timeout && (xenvif_rx_queue_stalled(queue) || xenvif_rx_queue_ready(queue))) @@ -2006,8 +2114,11 @@ static int __init netback_init(void) if (!xen_domain()) return -ENODEV; - /* Allow as many queues as there are CPUs, by default */ - xenvif_max_queues = num_online_cpus(); + /* Allow as many queues as there are CPUs if user has not + * specified a value. + */ + if (xenvif_max_queues == 0) + xenvif_max_queues = num_online_cpus(); if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index ec383b0f5..56ebd8267 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -327,6 +327,14 @@ static int netback_probe(struct xenbus_device *dev, goto abort_transaction; } + /* We support multicast-control. */ + err = xenbus_printf(xbt, dev->nodename, + "feature-multicast-control", "%d", 1); + if (err) { + message = "writing feature-multicast-control"; + goto abort_transaction; + } + err = xenbus_transaction_end(xbt, 0); } while (err == -EAGAIN); @@ -780,6 +788,12 @@ static void connect(struct backend_info *be) /* Use the number of queues requested by the frontend */ be->vif->queues = vzalloc(requested_num_queues * sizeof(struct xenvif_queue)); + if (!be->vif->queues) { + xenbus_dev_fatal(dev, -ENOMEM, + "allocating queues"); + return; + } + be->vif->num_queues = requested_num_queues; be->vif->stalled_queues = requested_num_queues; @@ -1016,6 +1030,11 @@ static int read_xenbus_vif_flags(struct backend_info *be) val = 0; vif->ipv6_csum = !!val; + if (xenbus_scanf(XBT_NIL, dev->otherend, "request-multicast-control", + "%d", &val) < 0) + val = 0; + vif->multicast_control = !!val; + return 0; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 5ff0cfd14..6febc053a 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -291,7 +291,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) struct sk_buff *skb; unsigned short id; grant_ref_t ref; - unsigned long pfn; + unsigned long gfn; struct xen_netif_rx_request *req; skb = xennet_alloc_one_rx_buffer(queue); @@ -307,12 +307,12 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) BUG_ON((signed short)ref < 0); queue->grant_rx_ref[id] = ref; - pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); + gfn = xen_page_to_gfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); req = RING_GET_REQUEST(&queue->rx, req_prod); gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, - pfn_to_mfn(pfn), + gfn, 0); req->id = id; @@ -430,8 +430,10 @@ static struct xen_netif_tx_request *xennet_make_one_txreq( ref = gnttab_claim_grant_reference(&queue->gref_tx_head); BUG_ON((signed short)ref < 0); - gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, - page_to_mfn(page), GNTMAP_readonly); + gnttab_grant_foreign_access_ref(ref, + queue->info->xbdev->otherend_id, + xen_page_to_gfn(page), + GNTMAP_readonly); queue->tx_skbs[id].skb = skb; queue->grant_tx_page[id] = page; @@ -1336,7 +1338,7 @@ static void xennet_disconnect_backend(struct netfront_info *info) netif_carrier_off(info->netdev); - for (i = 0; i < num_queues; ++i) { + for (i = 0; i < num_queues && info->queues; ++i) { struct netfront_queue *queue = &info->queues[i]; if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) @@ -1704,19 +1706,19 @@ static void xennet_destroy_queues(struct netfront_info *info) } static int xennet_create_queues(struct netfront_info *info, - unsigned int num_queues) + unsigned int *num_queues) { unsigned int i; int ret; - info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), + info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue), GFP_KERNEL); if (!info->queues) return -ENOMEM; rtnl_lock(); - for (i = 0; i < num_queues; i++) { + for (i = 0; i < *num_queues; i++) { struct netfront_queue *queue = &info->queues[i]; queue->id = i; @@ -1726,7 +1728,7 @@ static int xennet_create_queues(struct netfront_info *info, if (ret < 0) { dev_warn(&info->netdev->dev, "only created %d queues\n", i); - num_queues = i; + *num_queues = i; break; } @@ -1736,11 +1738,11 @@ static int xennet_create_queues(struct netfront_info *info, napi_enable(&queue->napi); } - netif_set_real_num_tx_queues(info->netdev, num_queues); + netif_set_real_num_tx_queues(info->netdev, *num_queues); rtnl_unlock(); - if (num_queues == 0) { + if (*num_queues == 0) { dev_err(&info->netdev->dev, "no queues\n"); return -EINVAL; } @@ -1786,7 +1788,7 @@ static int talk_to_netback(struct xenbus_device *dev, if (info->queues) xennet_destroy_queues(info); - err = xennet_create_queues(info, num_queues); + err = xennet_create_queues(info, &num_queues); if (err < 0) goto destroy_ring; @@ -2102,7 +2104,8 @@ static int xennet_remove(struct xenbus_device *dev) unregister_netdev(info->netdev); - xennet_destroy_queues(info); + if (info->queues) + xennet_destroy_queues(info); xennet_free_netdev(info->netdev); return 0; @@ -2131,8 +2134,11 @@ static int __init netif_init(void) pr_info("Initialising Xen virtual ethernet driver\n"); - /* Allow as many queues as there are CPUs, by default */ - xennet_max_queues = num_online_cpus(); + /* Allow as many queues as there are CPUs if user has not + * specified a value. + */ + if (xennet_max_queues == 0) + xennet_max_queues = num_online_cpus(); return xenbus_register_frontend(&netfront_driver); } -- cgit v1.2.3