summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c59x.c21
-rw-r--r--drivers/net/ethernet/3com/Kconfig18
-rw-r--r--drivers/net/ethernet/8390/Kconfig26
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile4
-rw-r--r--drivers/net/ethernet/adaptec/Kconfig4
-rw-r--r--drivers/net/ethernet/adi/Kconfig2
-rw-r--r--drivers/net/ethernet/agere/Kconfig4
-rw-r--r--drivers/net/ethernet/allwinner/Kconfig3
-rw-r--r--drivers/net/ethernet/alteon/Kconfig4
-rw-r--r--drivers/net/ethernet/amd/Kconfig20
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h155
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c17
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c38
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c110
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c350
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c79
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c411
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c1332
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h239
-rw-r--r--drivers/net/ethernet/apm/xgene/Makefile2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c46
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c264
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h30
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c200
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h49
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c73
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c12
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h20
-rw-r--r--drivers/net/ethernet/apple/Kconfig7
-rw-r--r--drivers/net/ethernet/arc/Kconfig4
-rw-r--r--drivers/net/ethernet/atheros/Kconfig4
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig3
-rw-r--r--drivers/net/ethernet/broadcom/b44.h8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c172
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h26
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c55
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c74
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c43
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c57
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c83
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c61
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c9
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/brocade/Kconfig4
-rw-r--r--drivers/net/ethernet/brocade/bna/Makefile2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cs.h14
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h11
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_cna.h16
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h10
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c71
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h23
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c101
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h84
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_cna.h30
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h176
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h199
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c101
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h70
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c673
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h19
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c121
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c67
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c15
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h62
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c2
-rw-r--r--drivers/net/ethernet/cadence/Kconfig2
-rw-r--r--drivers/net/ethernet/cadence/macb.c182
-rw-r--r--drivers/net/ethernet/cadence/macb.h43
-rw-r--r--drivers/net/ethernet/cavium/Kconfig54
-rw-r--r--drivers/net/ethernet/cavium/Makefile5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/Makefile16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c796
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h107
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h535
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c198
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.h33
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h51
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c1217
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c3666
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h673
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_image.h57
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h424
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c723
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c1304
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h649
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c987
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h426
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h319
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h237
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c199
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h75
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h224
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c189
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h227
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c765
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c178
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.h140
-rw-r--r--drivers/net/ethernet/cavium/thunder/Makefile11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h433
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c932
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h213
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c606
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c1364
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c1550
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h389
-rw-r--r--drivers/net/ethernet/cavium/thunder/q_struct.h701
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c969
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h220
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h221
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c389
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c294
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c498
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c382
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2222
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h86
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h184
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h72
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c123
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c36
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig12
-rw-r--r--drivers/net/ethernet/cisco/Kconfig4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c31
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c10
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h91
-rw-r--r--drivers/net/ethernet/dec/Kconfig4
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig10
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c2
-rw-r--r--drivers/net/ethernet/dlink/Kconfig4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c4
-rw-r--r--drivers/net/ethernet/emulex/Kconfig4
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig9
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h55
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c56
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h55
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c11
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h14
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c693
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/ezchip/Kconfig26
-rw-r--r--drivers/net/ethernet/ezchip/Makefile1
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c658
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.h336
-rw-r--r--drivers/net/ethernet/faraday/Kconfig4
-rw-r--r--drivers/net/ethernet/freescale/Kconfig8
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c215
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c11
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c36
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c162
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c350
-rw-r--r--drivers/net/ethernet/fujitsu/Kconfig4
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig4
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c1
-rw-r--r--drivers/net/ethernet/hp/Kconfig8
-rw-r--r--drivers/net/ethernet/i825xx/Kconfig4
-rw-r--r--drivers/net/ethernet/ibm/Kconfig4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c10
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c41
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h5
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2
-rw-r--r--drivers/net/ethernet/icplus/ipg.h2
-rw-r--r--drivers/net/ethernet/intel/Kconfig4
-rw-r--r--drivers/net/ethernet/intel/e100.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c19
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c142
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h11
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c240
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c38
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c68
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c27
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c18
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ptp.c13
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c146
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c164
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c110
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c12
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c51
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c163
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c91
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h272
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c896
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/Kconfig4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c9
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c24
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c244
-rw-r--r--drivers/net/ethernet/mellanox/Kconfig4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c127
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c404
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c290
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c208
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h521
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c679
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c860
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1915
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c252
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c350
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/flow_table.c422
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c146
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mad.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c345
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c226
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c444
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c360
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.h64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c345
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h171
-rw-r--r--drivers/net/ethernet/micrel/Kconfig4
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c5
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c4
-rw-r--r--drivers/net/ethernet/microchip/Kconfig4
-rw-r--r--drivers/net/ethernet/moxa/Kconfig4
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/myricom/Kconfig4
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c6
-rw-r--r--drivers/net/ethernet/natsemi/Kconfig7
-rw-r--r--drivers/net/ethernet/neterion/Kconfig4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c6
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c7
-rw-r--r--drivers/net/ethernet/nuvoton/Kconfig4
-rw-r--r--drivers/net/ethernet/nvidia/Kconfig8
-rw-r--r--drivers/net/ethernet/oki-semi/Kconfig4
-rw-r--r--drivers/net/ethernet/packetengines/Kconfig8
-rw-r--r--drivers/net/ethernet/pasemi/Kconfig4
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c77
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c4
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig4
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c46
-rw-r--r--drivers/net/ethernet/rdc/Kconfig4
-rw-r--r--drivers/net/ethernet/realtek/Kconfig14
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/renesas/Kconfig29
-rw-r--r--drivers/net/ethernet/renesas/Makefile4
-rw-r--r--drivers/net/ethernet/renesas/ravb.h832
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c1826
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c359
-rw-r--r--drivers/net/ethernet/rocker/rocker.c1616
-rw-r--r--drivers/net/ethernet/rocker/rocker.h28
-rw-r--r--drivers/net/ethernet/seeq/Kconfig4
-rw-r--r--drivers/net/ethernet/sfc/Kconfig9
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c1290
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c746
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h75
-rw-r--r--drivers/net/ethernet/sfc/efx.c333
-rw-r--r--drivers/net/ethernet/sfc/efx.h15
-rw-r--r--drivers/net/ethernet/sfc/enum.h2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c7
-rw-r--r--drivers/net/ethernet/sfc/falcon.c33
-rw-r--r--drivers/net/ethernet/sfc/farch.c64
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c228
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h16
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h434
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c13
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h36
-rw-r--r--drivers/net/ethernet/sfc/nic.h251
-rw-r--r--drivers/net/ethernet/sfc/ptp.c40
-rw-r--r--drivers/net/ethernet/sfc/siena.c27
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c156
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.h79
-rw-r--r--drivers/net/ethernet/sfc/sriov.c83
-rw-r--r--drivers/net/ethernet/sfc/sriov.h31
-rw-r--r--drivers/net/ethernet/sfc/tx.c3
-rw-r--r--drivers/net/ethernet/sgi/Kconfig8
-rw-r--r--drivers/net/ethernet/silan/Kconfig4
-rw-r--r--drivers/net/ethernet/sis/Kconfig4
-rw-r--r--drivers/net/ethernet/sis/sis900.h4
-rw-r--r--drivers/net/ethernet/smsc/Kconfig18
-rw-r--r--drivers/net/ethernet/stmicro/Kconfig4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig90
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c365
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c99
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c350
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c108
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c127
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h9
-rw-r--r--drivers/net/ethernet/sun/Kconfig4
-rw-r--r--drivers/net/ethernet/sun/cassini.c1
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/tehuti/Kconfig4
-rw-r--r--drivers/net/ethernet/ti/Kconfig8
-rw-r--r--drivers/net/ethernet/ti/cpsw.c43
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c45
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/ti/netcp.h2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c54
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c67
-rw-r--r--drivers/net/ethernet/ti/netcp_sgmii.c30
-rw-r--r--drivers/net/ethernet/tile/tilepro.c3
-rw-r--r--drivers/net/ethernet/toshiba/Kconfig4
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c24
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c3
-rw-r--r--drivers/net/ethernet/tundra/Kconfig4
-rw-r--r--drivers/net/ethernet/via/Kconfig10
-rw-r--r--drivers/net/ethernet/via/via-rhine.c250
-rw-r--r--drivers/net/ethernet/wiznet/Kconfig4
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c16
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h108
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c292
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c30
-rw-r--r--drivers/net/ethernet/xircom/Kconfig4
-rw-r--r--drivers/net/ethernet/xscale/Kconfig4
413 files changed, 50841 insertions, 8324 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 41095ebad..753887d02 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1763,16 +1763,9 @@ vortex_open(struct net_device *dev)
vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
}
if (i != RX_RING_SIZE) {
- int j;
pr_emerg("%s: no memory for rx ring\n", dev->name);
- for (j = 0; j < i; j++) {
- if (vp->rx_skbuff[j]) {
- dev_kfree_skb(vp->rx_skbuff[j]);
- vp->rx_skbuff[j] = NULL;
- }
- }
retval = -ENOMEM;
- goto err_free_irq;
+ goto err_free_skb;
}
/* Wrap the ring. */
vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
@@ -1782,7 +1775,13 @@ vortex_open(struct net_device *dev)
if (!retval)
goto out;
-err_free_irq:
+err_free_skb:
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (vp->rx_skbuff[i]) {
+ dev_kfree_skb(vp->rx_skbuff[i]);
+ vp->rx_skbuff[i] = NULL;
+ }
+ }
free_irq(dev->irq, dev);
err:
if (vortex_debug > 1)
@@ -2382,6 +2381,7 @@ boomerang_interrupt(int irq, void *dev_id)
void __iomem *ioaddr;
int status;
int work_done = max_interrupt_work;
+ int handled = 0;
ioaddr = vp->ioaddr;
@@ -2400,6 +2400,7 @@ boomerang_interrupt(int irq, void *dev_id)
if ((status & IntLatch) == 0)
goto handler_exit; /* No interrupt: shared IRQs can cause this */
+ handled = 1;
if (status == 0xffff) { /* h/w no longer present (hotplug)? */
if (vortex_debug > 1)
@@ -2501,7 +2502,7 @@ boomerang_interrupt(int irq, void *dev_id)
handler_exit:
vp->handling_irq = 0;
spin_unlock(&vp->lock);
- return IRQ_HANDLED;
+ return IRQ_RETVAL(handled);
}
static int vortex_rx(struct net_device *dev)
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index afaab4b23..5b7658bcf 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_3COM
default y
depends on ISA || EISA || PCI || PCMCIA
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
@@ -23,8 +21,7 @@ config EL3
depends on (ISA || EISA)
---help---
If you have a network (Ethernet) card belonging to the 3Com
- EtherLinkIII series, say Y and read the Ethernet-HOWTO, available
- from <http://www.tldp.org/docs.html#howto>.
+ EtherLinkIII series, say Y here.
If your card is not working you may need to use the DOS
setup disk to disable Plug & Play mode, and to select the default
@@ -38,8 +35,7 @@ config 3C515
depends on ISA && ISA_DMA_API
---help---
If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
- network card, say Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ network card, say Y here.
To compile this driver as a module, choose M here. The module
will be called 3c515.
@@ -78,9 +74,7 @@ config VORTEX
"Tornado" (3c905) PCI
"Hurricane" (3c555/3cSOHO) PCI
- If you have such a card, say Y and read the Ethernet-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>. More
- specific information is in
+ If you have such a card, say Y here. More specific information is in
<file:Documentation/networking/vortex.txt> and in the comments at
the beginning of <file:drivers/net/ethernet/3com/3c59x.c>.
@@ -97,9 +91,7 @@ config TYPHOON
3CR990SVR, 3CR990SVR95, 3CR990SVR97, 3CR990-FX-95 Server,
3CR990-FX-97 Server, 3C990B-TX-M, 3C990BSVR
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card of this type, say Y here.
To compile this driver as a module, choose M here. The module
will be called typhoon.
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 2d89bd00d..edf72258a 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_8390
default y
depends on NET_VENDOR_NATSEMI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
@@ -71,9 +69,7 @@ config MAC8390
select CRC32
---help---
If you want to include a driver to support Nubus or LC-PDS
- Ethernet cards using an NS8390 chipset or its equivalent, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ Ethernet cards using an NS8390 chipset or its equivalent, say Y.
config MCF8390
tristate "ColdFire NS8390 based Ethernet support"
@@ -95,10 +91,9 @@ config NE2000
ATARI_ETHERNEC)
select CRC32
---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Many Ethernet cards
- without a specific driver are compatible with NE2000.
+ If you have a network (Ethernet) card of this type, say Y here.
+ Many Ethernet cards without a specific driver are compatible with
+ the NE2000.
If you have a PCI NE2000 card however, say N here and Y to "PCI
NE2000 and clone support" below.
@@ -114,8 +109,7 @@ config NE2K_PCI
This driver is for NE2000 compatible PCI cards. It will not work
with ISA NE2000 cards (they have their own driver, "NE2000/NE1000
support" below). If you have a PCI NE2000 network (Ethernet) card,
- say Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ say Y here.
This driver also works for the following NE2000 clone cards:
RealTek RTL-8029 Winbond 89C940 Compex RL2000 KTI ET32P2
@@ -164,9 +158,7 @@ config ULTRA
depends on ISA
select CRC32
---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card of this type, say Y here.
Important: There have been many reports that, with some motherboards
mixing an SMC Ultra and an Adaptec AHA154x SCSI card (or compatible,
@@ -183,9 +175,7 @@ config WD80x3
depends on ISA
select CRC32
---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card of this type, say Y here.
To compile this driver as a module, choose M here. The module
will be called wd.
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index eadcb0538..f3bb17840 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -34,6 +34,7 @@ source "drivers/net/ethernet/adi/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
source "drivers/net/ethernet/calxeda/Kconfig"
+source "drivers/net/ethernet/cavium/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
@@ -66,6 +67,7 @@ config DNET
source "drivers/net/ethernet/dec/Kconfig"
source "drivers/net/ethernet/dlink/Kconfig"
source "drivers/net/ethernet/emulex/Kconfig"
+source "drivers/net/ethernet/ezchip/Kconfig"
source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/faraday/Kconfig"
source "drivers/net/ethernet/freescale/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 1367afcd0..b52e0f63f 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_NET_BFIN) += adi/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/
obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
@@ -29,6 +30,7 @@ obj-$(CONFIG_DNET) += dnet.o
obj-$(CONFIG_NET_VENDOR_DEC) += dec/
obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/
obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
+obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
@@ -63,7 +65,7 @@ obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
-obj-$(CONFIG_SH_ETH) += renesas/
+obj-$(CONFIG_NET_VENDOR_RENESAS) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
diff --git a/drivers/net/ethernet/adaptec/Kconfig b/drivers/net/ethernet/adaptec/Kconfig
index 5c804bbe3..822cffb41 100644
--- a/drivers/net/ethernet/adaptec/Kconfig
+++ b/drivers/net/ethernet/adaptec/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_ADAPTEC
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index c9cd3592a..6b94ba610 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -7,8 +7,6 @@ config NET_BFIN
depends on BF516 || BF518 || BF526 || BF527 || BF536 || BF537
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
- Make sure you know the name of your card. Read the Ethernet-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
If unsure, say Y.
diff --git a/drivers/net/ethernet/agere/Kconfig b/drivers/net/ethernet/agere/Kconfig
index 63e805de6..b6fe92003 100644
--- a/drivers/net/ethernet/agere/Kconfig
+++ b/drivers/net/ethernet/agere/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_AGERE
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/allwinner/Kconfig b/drivers/net/ethernet/allwinner/Kconfig
index d8d95d4cd..47da7e7a5 100644
--- a/drivers/net/ethernet/allwinner/Kconfig
+++ b/drivers/net/ethernet/allwinner/Kconfig
@@ -9,8 +9,7 @@ config NET_VENDOR_ALLWINNER
depends on ARCH_SUNXI
---help---
If you have a network (Ethernet) card belonging to this
- class, say Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ class, say Y here.
Note that the answer to this question doesn't directly
affect the kernel: saying N will just cause the configurator
diff --git a/drivers/net/ethernet/alteon/Kconfig b/drivers/net/ethernet/alteon/Kconfig
index 799a85282..e06ccab35 100644
--- a/drivers/net/ethernet/alteon/Kconfig
+++ b/drivers/net/ethernet/alteon/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_ALTEON
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 426916036..afc62ea80 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -37,8 +37,7 @@ config AMD8111_ETH
select MII
---help---
If you have an AMD 8111-based PCI LANCE ethernet card,
- answer Y here and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ answer Y here.
To compile this driver as a module, choose M here. The module
will be called amd8111e.
@@ -47,10 +46,8 @@ config LANCE
tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
depends on ISA && ISA_DMA_API && !ARM
---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Some LinkSys cards are
- of this type.
+ If you have a network (Ethernet) card of this type, say Y here.
+ Some LinkSys cards are of this type.
To compile this driver as a module, choose M here: the module
will be called lance. This is recommended.
@@ -62,8 +59,7 @@ config PCNET32
select MII
---help---
If you have a PCnet32 or PCnetPCI based network (Ethernet) card,
- answer Y here and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ answer Y here.
To compile this driver as a module, choose M here. The module
will be called pcnet32.
@@ -144,9 +140,7 @@ config NI65
tristate "NI6510 support"
depends on ISA && ISA_DMA_API && !ARM
---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card of this type, say Y here.
To compile this driver as a module, choose M here. The module
will be called ni65.
@@ -179,10 +173,8 @@ config SUNLANCE
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
- depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA
+ depends on ((OF_NET && OF_ADDRESS) || ACPI) && HAS_IOMEM && HAS_DMA
depends on ARM64 || COMPILE_TEST
- select PHYLIB
- select AMD_XGBE_PHY
select BITREVERSE
select CRC32
select PTP_1588_CLOCK
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 34c28aac7..b6fa89102 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -857,6 +857,48 @@
*/
#define PCS_MMD_SELECT 0xff
+/* SerDes integration register offsets */
+#define SIR0_KR_RT_1 0x002c
+#define SIR0_STATUS 0x0040
+#define SIR1_SPEED 0x0000
+
+/* SerDes integration register entry bit positions and sizes */
+#define SIR0_KR_RT_1_RESET_INDEX 11
+#define SIR0_KR_RT_1_RESET_WIDTH 1
+#define SIR0_STATUS_RX_READY_INDEX 0
+#define SIR0_STATUS_RX_READY_WIDTH 1
+#define SIR0_STATUS_TX_READY_INDEX 8
+#define SIR0_STATUS_TX_READY_WIDTH 1
+#define SIR1_SPEED_CDR_RATE_INDEX 12
+#define SIR1_SPEED_CDR_RATE_WIDTH 4
+#define SIR1_SPEED_DATARATE_INDEX 4
+#define SIR1_SPEED_DATARATE_WIDTH 2
+#define SIR1_SPEED_PLLSEL_INDEX 3
+#define SIR1_SPEED_PLLSEL_WIDTH 1
+#define SIR1_SPEED_RATECHANGE_INDEX 6
+#define SIR1_SPEED_RATECHANGE_WIDTH 1
+#define SIR1_SPEED_TXAMP_INDEX 8
+#define SIR1_SPEED_TXAMP_WIDTH 4
+#define SIR1_SPEED_WORDMODE_INDEX 0
+#define SIR1_SPEED_WORDMODE_WIDTH 3
+
+/* SerDes RxTx register offsets */
+#define RXTX_REG6 0x0018
+#define RXTX_REG20 0x0050
+#define RXTX_REG22 0x0058
+#define RXTX_REG114 0x01c8
+#define RXTX_REG129 0x0204
+
+/* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX 8
+#define RXTX_REG6_RESETB_RXD_WIDTH 1
+#define RXTX_REG20_BLWC_ENA_INDEX 2
+#define RXTX_REG20_BLWC_ENA_WIDTH 1
+#define RXTX_REG114_PQ_REG_INDEX 9
+#define RXTX_REG114_PQ_REG_WIDTH 7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
+
/* Descriptor/Packet entry bit positions and sizes */
#define RX_PACKET_ERRORS_CRC_INDEX 2
#define RX_PACKET_ERRORS_CRC_WIDTH 1
@@ -973,10 +1015,47 @@
#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
/* MDIO undefined or vendor specific registers */
+#ifndef MDIO_PMA_10GBR_PMD_CTRL
+#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
+#endif
+
+#ifndef MDIO_PMA_10GBR_FECCTRL
+#define MDIO_PMA_10GBR_FECCTRL 0x00ab
+#endif
+
+#ifndef MDIO_AN_XNP
+#define MDIO_AN_XNP 0x0016
+#endif
+
+#ifndef MDIO_AN_LPX
+#define MDIO_AN_LPX 0x0019
+#endif
+
#ifndef MDIO_AN_COMP_STAT
#define MDIO_AN_COMP_STAT 0x0030
#endif
+#ifndef MDIO_AN_INTMASK
+#define MDIO_AN_INTMASK 0x8001
+#endif
+
+#ifndef MDIO_AN_INT
+#define MDIO_AN_INT 0x8002
+#endif
+
+#ifndef MDIO_CTRL1_SPEED1G
+#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
+#endif
+
+/* MDIO mask values */
+#define XGBE_XNP_MCF_NULL_MESSAGE 0x001
+#define XGBE_XNP_ACK_PROCESSED BIT(12)
+#define XGBE_XNP_MP_FORMATTED BIT(13)
+#define XGBE_XNP_NP_EXCHANGE BIT(15)
+
+#define XGBE_KR_TRAINING_START BIT(0)
+#define XGBE_KR_TRAINING_ENABLE BIT(1)
+
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
* the variable
@@ -1119,6 +1198,82 @@ do { \
ioread32((_pdata)->xpcs_regs + (_off))
/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes integration registers.
+ */
+#define XSIR_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XSIR_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XSIR0_IOREAD(_pdata, _reg) \
+ ioread16((_pdata)->sir0_regs + _reg)
+
+#define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR0_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR0_IOWRITE(_pdata, _reg, _val) \
+ iowrite16((_val), (_pdata)->sir0_regs + _reg)
+
+#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR0_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR0_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+#define XSIR1_IOREAD(_pdata, _reg) \
+ ioread16((_pdata)->sir1_regs + _reg)
+
+#define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR1_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR1_IOWRITE(_pdata, _reg, _val) \
+ iowrite16((_val), (_pdata)->sir1_regs + _reg)
+
+#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR1_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR1_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes RxTx registers.
+ */
+#define XRXTX_IOREAD(_pdata, _reg) \
+ ioread16((_pdata)->rxtx_regs + _reg)
+
+#define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XRXTX_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XRXTX_IOWRITE(_pdata, _reg, _val) \
+ iowrite16((_val), (_pdata)->rxtx_regs + _reg)
+
+#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XRXTX_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XRXTX_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
* using MDIO. Different from above because of the use of standardized
* Linux include values. No shifting is performed with the bit
* operations, everything works on mask values.
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
index 8a50b01c2..a6b9899e2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -150,9 +150,12 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
tc_ets = 0;
tc_ets_weight = 0;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- DBGPR(" TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
- ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]);
- DBGPR(" PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]);
+ netif_dbg(pdata, drv, netdev,
+ "TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
+ ets->tc_tx_bw[i], ets->tc_rx_bw[i],
+ ets->tc_tsa[i]);
+ netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i,
+ ets->prio_tc[i]);
if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) &&
(i >= pdata->hw_feat.tc_cnt))
@@ -214,8 +217,9 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- DBGPR(" cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n",
- pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
+ netif_dbg(pdata, drv, netdev,
+ "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
+ pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
if (!pdata->pfc) {
pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc),
@@ -238,9 +242,10 @@ static u8 xgbe_dcb_getdcbx(struct net_device *netdev)
static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx)
{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
u8 support = xgbe_dcb_getdcbx(netdev);
- DBGPR(" DCBX=%#hhx\n", dcbx);
+ netif_dbg(pdata, drv, netdev, "DCBX=%#hhx\n", dcbx);
if (dcbx & ~support)
return 1;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 5c92fb71b..b3bc87fe3 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -208,8 +208,9 @@ static int xgbe_init_ring(struct xgbe_prv_data *pdata,
if (!ring->rdata)
return -ENOMEM;
- DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
- ring->rdesc, ring->rdesc_dma, ring->rdata);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "rdesc=%p, rdesc_dma=%pad, rdata=%p\n",
+ ring->rdesc, &ring->rdesc_dma, ring->rdata);
DBGPR("<--xgbe_init_ring\n");
@@ -226,7 +227,9 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
- DBGPR(" %s - tx_ring:\n", channel->name);
+ netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
+ channel->name);
+
ret = xgbe_init_ring(pdata, channel->tx_ring,
pdata->tx_desc_count);
if (ret) {
@@ -235,12 +238,14 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
goto err_ring;
}
- DBGPR(" %s - rx_ring:\n", channel->name);
+ netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
+ channel->name);
+
ret = xgbe_init_ring(pdata, channel->rx_ring,
pdata->rx_desc_count);
if (ret) {
netdev_alert(pdata->netdev,
- "error initializing Tx ring\n");
+ "error initializing Rx ring\n");
goto err_ring;
}
}
@@ -298,7 +303,8 @@ static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
get_page(pa->pages);
bd->pa = *pa;
- bd->dma = pa->pages_dma + pa->pages_offset;
+ bd->dma_base = pa->pages_dma;
+ bd->dma_off = pa->pages_offset;
bd->dma_len = len;
pa->pages_offset += len;
@@ -476,8 +482,6 @@ static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
if (rdata->state_saved) {
rdata->state_saved = 0;
- rdata->state.incomplete = 0;
- rdata->state.context_next = 0;
rdata->state.skb = NULL;
rdata->state.len = 0;
rdata->state.error = 0;
@@ -518,8 +522,6 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
rdata = XGBE_GET_DESC_DATA(ring, cur_index);
if (tso) {
- DBGPR(" TSO packet\n");
-
/* Map the TSO header */
skb_dma = dma_map_single(pdata->dev, skb->data,
packet->header_len, DMA_TO_DEVICE);
@@ -529,6 +531,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
}
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = packet->header_len;
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb header: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, packet->header_len);
offset = packet->header_len;
@@ -550,8 +555,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
}
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = len;
- DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
- cur_index, skb_dma, len);
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb data: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, len);
datalen -= len;
offset += len;
@@ -563,7 +569,8 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- DBGPR(" mapping frag %u\n", i);
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "mapping frag %u\n", i);
frag = &skb_shinfo(skb)->frags[i];
offset = 0;
@@ -582,8 +589,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = len;
rdata->mapped_as_page = 1;
- DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
- cur_index, skb_dma, len);
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb frag: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, len);
datalen -= len;
offset += len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 21d949751..a4473d8ff 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -710,7 +710,8 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
return 0;
- DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving");
+ netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
+ enable ? "entering" : "leaving");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
return 0;
@@ -724,7 +725,8 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
return 0;
- DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving");
+ netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
+ enable ? "entering" : "leaving");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
return 0;
@@ -749,8 +751,9 @@ static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
mac_addr[0] = ha->addr[4];
mac_addr[1] = ha->addr[5];
- DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr,
- *mac_reg);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "adding mac address %pM at %#x\n",
+ ha->addr, *mac_reg);
XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
}
@@ -907,23 +910,6 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
- /* If the PCS is changing modes, match the MAC speed to it */
- if (((mmd_address >> 16) == MDIO_MMD_PCS) &&
- ((mmd_address & 0xffff) == MDIO_CTRL2)) {
- struct phy_device *phydev = pdata->phydev;
-
- if (mmd_data & MDIO_PCS_CTRL2_TYPE) {
- /* KX mode */
- if (phydev->supported & SUPPORTED_1000baseKX_Full)
- xgbe_set_gmii_speed(pdata);
- else
- xgbe_set_gmii_2500_speed(pdata);
- } else {
- /* KR mode */
- xgbe_set_xgmii_speed(pdata);
- }
- }
-
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
* register sets. This requires accessing of the PCS register in two
@@ -1124,6 +1110,7 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
unsigned int rx_usecs = pdata->rx_usecs;
unsigned int rx_frames = pdata->rx_frames;
unsigned int inte;
+ dma_addr_t hdr_dma, buf_dma;
if (!rx_usecs && !rx_frames) {
/* No coalescing, interrupt for every descriptor */
@@ -1143,10 +1130,12 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
* Set buffer 2 (hi) address to buffer dma address (hi) and
* set control bits OWN and INTE
*/
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma));
- rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
- rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
+ hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
+ buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
+ rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
+ rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
@@ -1322,7 +1311,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
- DBGPR(" TC%u using SP\n", i);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TC%u using SP\n", i);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
MTL_TSA_SP);
break;
@@ -1330,7 +1320,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
weight = total_weight * ets->tc_tx_bw[i] / 100;
weight = clamp(weight, min_weight, total_weight);
- DBGPR(" TC%u using DWRR (weight %u)\n", i, weight);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TC%u using DWRR (weight %u)\n", i, weight);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
MTL_TSA_ETS);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
@@ -1359,7 +1350,8 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
}
mask &= 0xff;
- DBGPR(" TC%u PFC mask=%#x\n", tc, mask);
+ netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n",
+ tc, mask);
reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
reg_val = XGMAC_IOREAD(pdata, reg);
@@ -1457,8 +1449,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
/* Create a context descriptor if this is a TSO packet */
if (tso_context || vlan_context) {
if (tso_context) {
- DBGPR(" TSO context descriptor, mss=%u\n",
- packet->mss);
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "TSO context descriptor, mss=%u\n",
+ packet->mss);
/* Set the MSS size */
XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
@@ -1476,8 +1469,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
}
if (vlan_context) {
- DBGPR(" VLAN context descriptor, ctag=%u\n",
- packet->vlan_ctag);
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "VLAN context descriptor, ctag=%u\n",
+ packet->vlan_ctag);
/* Mark it as a CONTEXT descriptor */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
@@ -1533,6 +1527,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
packet->tcp_payload_len);
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
packet->tcp_header_len / 4);
+
+ pdata->ext_stats.tx_tso_packets++;
} else {
/* Enable CRC and Pad Insertion */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
@@ -1594,9 +1590,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
rdesc = rdata->rdesc;
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
-#ifdef XGMAC_ENABLE_TX_DESC_DUMP
- xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
-#endif
+ if (netif_msg_tx_queued(pdata))
+ xgbe_dump_tx_desc(pdata, ring, start_index,
+ packet->rdesc_count, 1);
/* Make sure ownership is written to the descriptor */
dma_wmb();
@@ -1618,11 +1614,12 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
static int xgbe_dev_read(struct xgbe_channel *channel)
{
+ struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
struct xgbe_packet_data *packet = &ring->packet_data;
- struct net_device *netdev = channel->pdata->netdev;
+ struct net_device *netdev = pdata->netdev;
unsigned int err, etlt, l34t;
DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
@@ -1637,9 +1634,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
/* Make sure descriptor fields are read after reading the OWN bit */
dma_rmb();
-#ifdef XGMAC_ENABLE_RX_DESC_DUMP
- xgbe_dump_rx_desc(ring, rdesc, ring->cur);
-#endif
+ if (netif_msg_rx_status(pdata))
+ xgbe_dump_rx_desc(pdata, ring, ring->cur);
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
/* Timestamp Context Descriptor */
@@ -1661,9 +1657,12 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
CONTEXT_NEXT, 1);
/* Get the header length */
- if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
RX_NORMAL_DESC2, HL);
+ if (rdata->rx.hdr_len)
+ pdata->ext_stats.rx_split_header_packets++;
+ }
/* Get the RSS hash */
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
@@ -1700,14 +1699,14 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
INCOMPLETE, 0);
/* Set checksum done indicator as appropriate */
- if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
+ if (netdev->features & NETIF_F_RXCSUM)
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
CSUM_DONE, 1);
/* Check for errors (only valid in last descriptor) */
err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
- DBGPR(" err=%u, etlt=%#x\n", err, etlt);
+ netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
if (!err || !etlt) {
/* No error if err is 0 or etlt is 0 */
@@ -1718,7 +1717,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
RX_NORMAL_DESC0,
OVT);
- DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag);
+ netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
+ packet->vlan_ctag);
}
} else {
if ((etlt == 0x05) || (etlt == 0x06))
@@ -2026,9 +2026,9 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
- netdev_notice(pdata->netdev,
- "%d Tx hardware queues, %d byte fifo per queue\n",
- pdata->tx_q_count, ((fifo_size + 1) * 256));
+ netif_info(pdata, drv, pdata->netdev,
+ "%d Tx hardware queues, %d byte fifo per queue\n",
+ pdata->tx_q_count, ((fifo_size + 1) * 256));
}
static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
@@ -2042,9 +2042,9 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
- netdev_notice(pdata->netdev,
- "%d Rx hardware queues, %d byte fifo per queue\n",
- pdata->rx_q_count, ((fifo_size + 1) * 256));
+ netif_info(pdata, drv, pdata->netdev,
+ "%d Rx hardware queues, %d byte fifo per queue\n",
+ pdata->rx_q_count, ((fifo_size + 1) * 256));
}
static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
@@ -2063,14 +2063,16 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
for (j = 0; j < qptc; j++) {
- DBGPR(" TXq%u mapped to TC%u\n", queue, i);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TXq%u mapped to TC%u\n", queue, i);
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
Q2TCMAP, i);
pdata->q2tc_map[queue++] = i;
}
if (i < qptc_extra) {
- DBGPR(" TXq%u mapped to TC%u\n", queue, i);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TXq%u mapped to TC%u\n", queue, i);
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
Q2TCMAP, i);
pdata->q2tc_map[queue++] = i;
@@ -2088,13 +2090,15 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
for (i = 0, prio = 0; i < prio_queues;) {
mask = 0;
for (j = 0; j < ppq; j++) {
- DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "PRIO%u mapped to RXq%u\n", prio, i);
mask |= (1 << prio);
pdata->prio2q_map[prio++] = i;
}
if (i < ppq_extra) {
- DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "PRIO%u mapped to RXq%u\n", prio, i);
mask |= (1 << prio);
pdata->prio2q_map[prio++] = i;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 9fd6c69a8..aae9d5ecd 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -183,9 +183,10 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
channel->rx_ring = rx_ring++;
}
- DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
- channel->name, channel->queue_index, channel->dma_regs,
- channel->dma_irq, channel->tx_ring, channel->rx_ring);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
+ channel->name, channel->dma_regs, channel->dma_irq,
+ channel->tx_ring, channel->rx_ring);
}
pdata->channel = channel_mem;
@@ -235,7 +236,8 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
struct xgbe_prv_data *pdata = channel->pdata;
if (count > xgbe_tx_avail_desc(ring)) {
- DBGPR(" Tx queue stopped, not enough descriptors available\n");
+ netif_info(pdata, drv, pdata->netdev,
+ "Tx queue stopped, not enough descriptors available\n");
netif_stop_subqueue(pdata->netdev, channel->queue_index);
ring->tx.queue_stopped = 1;
@@ -330,7 +332,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (!dma_isr)
goto isr_done;
- DBGPR(" DMA_ISR = %08x\n", dma_isr);
+ netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
for (i = 0; i < pdata->channel_count; i++) {
if (!(dma_isr & (1 << i)))
@@ -339,7 +341,8 @@ static irqreturn_t xgbe_isr(int irq, void *data)
channel = pdata->channel + i;
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
- DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
+ netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
+ i, dma_ch_isr);
/* The TI or RI interrupt bits may still be set even if using
* per channel DMA interrupts. Check to be sure those are not
@@ -386,8 +389,6 @@ static irqreturn_t xgbe_isr(int irq, void *data)
}
}
- DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
-
isr_done:
return IRQ_HANDLED;
}
@@ -436,43 +437,61 @@ static void xgbe_tx_timer(unsigned long data)
DBGPR("<--xgbe_tx_timer\n");
}
-static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
+static void xgbe_service(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ service_work);
+
+ pdata->phy_if.phy_status(pdata);
+}
+
+static void xgbe_service_timer(unsigned long data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+ schedule_work(&pdata->service_work);
+
+ mod_timer(&pdata->service_timer, jiffies + HZ);
+}
+
+static void xgbe_init_timers(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel;
unsigned int i;
- DBGPR("-->xgbe_init_tx_timers\n");
+ setup_timer(&pdata->service_timer, xgbe_service_timer,
+ (unsigned long)pdata);
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (!channel->tx_ring)
break;
- DBGPR(" %s adding tx timer\n", channel->name);
setup_timer(&channel->tx_timer, xgbe_tx_timer,
(unsigned long)channel);
}
+}
- DBGPR("<--xgbe_init_tx_timers\n");
+static void xgbe_start_timers(struct xgbe_prv_data *pdata)
+{
+ mod_timer(&pdata->service_timer, jiffies + HZ);
}
-static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
+static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel;
unsigned int i;
- DBGPR("-->xgbe_stop_tx_timers\n");
+ del_timer_sync(&pdata->service_timer);
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (!channel->tx_ring)
break;
- DBGPR(" %s deleting tx timer\n", channel->name);
del_timer_sync(&channel->tx_timer);
}
-
- DBGPR("<--xgbe_stop_tx_timers\n");
}
void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
@@ -512,6 +531,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
RXFIFOSIZE);
hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
TXFIFOSIZE);
+ hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
@@ -759,112 +779,12 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_free_rx_data\n");
}
-static void xgbe_adjust_link(struct net_device *netdev)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct phy_device *phydev = pdata->phydev;
- int new_state = 0;
-
- if (!phydev)
- return;
-
- if (phydev->link) {
- /* Flow control support */
- if (pdata->pause_autoneg) {
- if (phydev->pause || phydev->asym_pause) {
- pdata->tx_pause = 1;
- pdata->rx_pause = 1;
- } else {
- pdata->tx_pause = 0;
- pdata->rx_pause = 0;
- }
- }
-
- if (pdata->tx_pause != pdata->phy_tx_pause) {
- hw_if->config_tx_flow_control(pdata);
- pdata->phy_tx_pause = pdata->tx_pause;
- }
-
- if (pdata->rx_pause != pdata->phy_rx_pause) {
- hw_if->config_rx_flow_control(pdata);
- pdata->phy_rx_pause = pdata->rx_pause;
- }
-
- /* Speed support */
- if (phydev->speed != pdata->phy_speed) {
- new_state = 1;
-
- switch (phydev->speed) {
- case SPEED_10000:
- hw_if->set_xgmii_speed(pdata);
- break;
-
- case SPEED_2500:
- hw_if->set_gmii_2500_speed(pdata);
- break;
-
- case SPEED_1000:
- hw_if->set_gmii_speed(pdata);
- break;
- }
- pdata->phy_speed = phydev->speed;
- }
-
- if (phydev->link != pdata->phy_link) {
- new_state = 1;
- pdata->phy_link = 1;
- }
- } else if (pdata->phy_link) {
- new_state = 1;
- pdata->phy_link = 0;
- pdata->phy_speed = SPEED_UNKNOWN;
- }
-
- if (new_state)
- phy_print_status(phydev);
-}
-
static int xgbe_phy_init(struct xgbe_prv_data *pdata)
{
- struct net_device *netdev = pdata->netdev;
- struct phy_device *phydev = pdata->phydev;
- int ret;
-
pdata->phy_link = -1;
pdata->phy_speed = SPEED_UNKNOWN;
- pdata->phy_tx_pause = pdata->tx_pause;
- pdata->phy_rx_pause = pdata->rx_pause;
-
- ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
- pdata->phy_mode);
- if (ret) {
- netdev_err(netdev, "phy_connect_direct failed\n");
- return ret;
- }
- if (!phydev->drv || (phydev->drv->phy_id == 0)) {
- netdev_err(netdev, "phy_id not valid\n");
- ret = -ENODEV;
- goto err_phy_connect;
- }
- DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n",
- dev_name(&phydev->dev), phydev->link);
-
- return 0;
-
-err_phy_connect:
- phy_disconnect(phydev);
-
- return ret;
-}
-
-static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
-{
- if (!pdata->phydev)
- return;
-
- phy_disconnect(pdata->phydev);
+ return pdata->phy_if.phy_reset(pdata);
}
int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
@@ -889,13 +809,14 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
netif_tx_stop_all_queues(netdev);
+ xgbe_stop_timers(pdata);
+ flush_workqueue(pdata->dev_workqueue);
+
hw_if->powerdown_tx(pdata);
hw_if->powerdown_rx(pdata);
xgbe_napi_disable(pdata, 0);
- phy_stop(pdata->phydev);
-
pdata->power_down = 1;
spin_unlock_irqrestore(&pdata->lock, flags);
@@ -924,8 +845,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
pdata->power_down = 0;
- phy_start(pdata->phydev);
-
xgbe_napi_enable(pdata, 0);
hw_if->powerup_tx(pdata);
@@ -936,6 +855,8 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
netif_tx_start_all_queues(netdev);
+ xgbe_start_timers(pdata);
+
spin_unlock_irqrestore(&pdata->lock, flags);
DBGPR("<--xgbe_powerup\n");
@@ -946,6 +867,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
static int xgbe_start(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_phy_if *phy_if = &pdata->phy_if;
struct net_device *netdev = pdata->netdev;
int ret;
@@ -953,7 +875,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
hw_if->init(pdata);
- phy_start(pdata->phydev);
+ ret = phy_if->phy_start(pdata);
+ if (ret)
+ goto err_phy;
xgbe_napi_enable(pdata, 1);
@@ -964,10 +888,11 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
hw_if->enable_tx(pdata);
hw_if->enable_rx(pdata);
- xgbe_init_tx_timers(pdata);
-
netif_tx_start_all_queues(netdev);
+ xgbe_start_timers(pdata);
+ schedule_work(&pdata->service_work);
+
DBGPR("<--xgbe_start\n");
return 0;
@@ -975,8 +900,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
err_napi:
xgbe_napi_disable(pdata, 1);
- phy_stop(pdata->phydev);
+ phy_if->phy_stop(pdata);
+err_phy:
hw_if->exit(pdata);
return ret;
@@ -985,6 +911,7 @@ err_napi:
static void xgbe_stop(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_phy_if *phy_if = &pdata->phy_if;
struct xgbe_channel *channel;
struct net_device *netdev = pdata->netdev;
struct netdev_queue *txq;
@@ -994,7 +921,8 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
netif_tx_stop_all_queues(netdev);
- xgbe_stop_tx_timers(pdata);
+ xgbe_stop_timers(pdata);
+ flush_workqueue(pdata->dev_workqueue);
hw_if->disable_tx(pdata);
hw_if->disable_rx(pdata);
@@ -1003,7 +931,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
xgbe_napi_disable(pdata, 1);
- phy_stop(pdata->phydev);
+ phy_if->phy_stop(pdata);
hw_if->exit(pdata);
@@ -1374,7 +1302,7 @@ static int xgbe_open(struct net_device *netdev)
ret = clk_prepare_enable(pdata->sysclk);
if (ret) {
netdev_alert(netdev, "dma clk_prepare_enable failed\n");
- goto err_phy_init;
+ return ret;
}
ret = clk_prepare_enable(pdata->ptpclk);
@@ -1399,14 +1327,17 @@ static int xgbe_open(struct net_device *netdev)
if (ret)
goto err_channels;
- /* Initialize the device restart and Tx timestamp work struct */
+ INIT_WORK(&pdata->service_work, xgbe_service);
INIT_WORK(&pdata->restart_work, xgbe_restart);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+ xgbe_init_timers(pdata);
ret = xgbe_start(pdata);
if (ret)
goto err_rings;
+ clear_bit(XGBE_DOWN, &pdata->dev_state);
+
DBGPR("<--xgbe_open\n");
return 0;
@@ -1423,9 +1354,6 @@ err_ptpclk:
err_sysclk:
clk_disable_unprepare(pdata->sysclk);
-err_phy_init:
- xgbe_phy_exit(pdata);
-
return ret;
}
@@ -1449,8 +1377,7 @@ static int xgbe_close(struct net_device *netdev)
clk_disable_unprepare(pdata->ptpclk);
clk_disable_unprepare(pdata->sysclk);
- /* Release the phy */
- xgbe_phy_exit(pdata);
+ set_bit(XGBE_DOWN, &pdata->dev_state);
DBGPR("<--xgbe_close\n");
@@ -1478,7 +1405,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = NETDEV_TX_OK;
if (skb->len == 0) {
- netdev_err(netdev, "empty skb received from stack\n");
+ netif_err(pdata, tx_err, netdev,
+ "empty skb received from stack\n");
dev_kfree_skb_any(skb);
goto tx_netdev_return;
}
@@ -1494,7 +1422,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = xgbe_prep_tso(skb, packet);
if (ret) {
- netdev_err(netdev, "error processing TSO packet\n");
+ netif_err(pdata, tx_err, netdev,
+ "error processing TSO packet\n");
dev_kfree_skb_any(skb);
goto tx_netdev_return;
}
@@ -1513,9 +1442,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Configure required descriptor fields for transmission */
hw_if->dev_xmit(channel);
-#ifdef XGMAC_ENABLE_TX_PKT_DUMP
- xgbe_print_pkt(netdev, skb, true);
-#endif
+ if (netif_msg_pktdata(pdata))
+ xgbe_print_pkt(netdev, skb, true);
/* Stop the queue in advance if there may not be enough descriptors */
xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
@@ -1710,7 +1638,8 @@ static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
(pdata->q2tc_map[queue] == i))
queue++;
- DBGPR(" TC%u using TXq%u-%u\n", i, offset, queue - 1);
+ netif_dbg(pdata, drv, netdev, "TC%u using TXq%u-%u\n",
+ i, offset, queue - 1);
netdev_set_tc_queue(netdev, i, queue - offset, offset);
offset = queue;
}
@@ -1820,9 +1749,10 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
lower_32_bits(rdata->rdesc_dma));
}
-static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
+static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
+ struct napi_struct *napi,
struct xgbe_ring_data *rdata,
- unsigned int *len)
+ unsigned int len)
{
struct sk_buff *skb;
u8 *packet;
@@ -1832,14 +1762,35 @@ static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
if (!skb)
return NULL;
+ /* Start with the header buffer which may contain just the header
+ * or the header plus data
+ */
+ dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
+ rdata->rx.hdr.dma_off,
+ rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
+
packet = page_address(rdata->rx.hdr.pa.pages) +
rdata->rx.hdr.pa.pages_offset;
- copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len;
+ copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
copy_len = min(rdata->rx.hdr.dma_len, copy_len);
skb_copy_to_linear_data(skb, packet, copy_len);
skb_put(skb, copy_len);
- *len -= copy_len;
+ len -= copy_len;
+ if (len) {
+ /* Add the remaining data as a frag */
+ dma_sync_single_range_for_cpu(pdata->dev,
+ rdata->rx.buf.dma_base,
+ rdata->rx.buf.dma_off,
+ rdata->rx.buf.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ rdata->rx.buf.pa.pages,
+ rdata->rx.buf.pa.pages_offset,
+ len, rdata->rx.buf.dma_len);
+ rdata->rx.buf.pa.pages = NULL;
+ }
return skb;
}
@@ -1877,9 +1828,8 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
* bit */
dma_rmb();
-#ifdef XGMAC_ENABLE_TX_DESC_DUMP
- xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
-#endif
+ if (netif_msg_tx_done(pdata))
+ xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
if (hw_if->is_last_desc(rdesc)) {
tx_packets += rdata->tx.packets;
@@ -1922,7 +1872,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
struct sk_buff *skb;
struct skb_shared_hwtstamps *hwtstamps;
unsigned int incomplete, error, context_next, context;
- unsigned int len, put_len, max_len;
+ unsigned int len, rdesc_len, max_len;
unsigned int received = 0;
int packet_count = 0;
@@ -1932,6 +1882,9 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!ring)
return 0;
+ incomplete = 0;
+ context_next = 0;
+
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
@@ -1941,15 +1894,11 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
/* First time in loop see if we need to restore state */
if (!received && rdata->state_saved) {
- incomplete = rdata->state.incomplete;
- context_next = rdata->state.context_next;
skb = rdata->state.skb;
error = rdata->state.error;
len = rdata->state.len;
} else {
memset(packet, 0, sizeof(*packet));
- incomplete = 0;
- context_next = 0;
skb = NULL;
error = 0;
len = 0;
@@ -1983,43 +1932,38 @@ read_again:
if (error || packet->errors) {
if (packet->errors)
- DBGPR("Error in received packet\n");
+ netif_err(pdata, rx_err, netdev,
+ "error in received packet\n");
dev_kfree_skb(skb);
goto next_packet;
}
if (!context) {
- put_len = rdata->rx.len - len;
- len += put_len;
-
- if (!skb) {
- dma_sync_single_for_cpu(pdata->dev,
- rdata->rx.hdr.dma,
- rdata->rx.hdr.dma_len,
- DMA_FROM_DEVICE);
-
- skb = xgbe_create_skb(napi, rdata, &put_len);
- if (!skb) {
+ /* Length is cumulative, get this descriptor's length */
+ rdesc_len = rdata->rx.len - len;
+ len += rdesc_len;
+
+ if (rdesc_len && !skb) {
+ skb = xgbe_create_skb(pdata, napi, rdata,
+ rdesc_len);
+ if (!skb)
error = 1;
- goto skip_data;
- }
- }
-
- if (put_len) {
- dma_sync_single_for_cpu(pdata->dev,
- rdata->rx.buf.dma,
+ } else if (rdesc_len) {
+ dma_sync_single_range_for_cpu(pdata->dev,
+ rdata->rx.buf.dma_base,
+ rdata->rx.buf.dma_off,
rdata->rx.buf.dma_len,
DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx.buf.pa.pages,
rdata->rx.buf.pa.pages_offset,
- put_len, rdata->rx.buf.dma_len);
+ rdesc_len,
+ rdata->rx.buf.dma_len);
rdata->rx.buf.pa.pages = NULL;
}
}
-skip_data:
if (incomplete || context_next)
goto read_again;
@@ -2033,14 +1977,14 @@ skip_data:
max_len += VLAN_HLEN;
if (skb->len > max_len) {
- DBGPR("packet length exceeds configured MTU\n");
+ netif_err(pdata, rx_err, netdev,
+ "packet length exceeds configured MTU\n");
dev_kfree_skb(skb);
goto next_packet;
}
-#ifdef XGMAC_ENABLE_RX_PKT_DUMP
- xgbe_print_pkt(netdev, skb, false);
-#endif
+ if (netif_msg_pktdata(pdata))
+ xgbe_print_pkt(netdev, skb, false);
skb_checksum_none_assert(skb);
if (XGMAC_GET_BITS(packet->attributes,
@@ -2072,7 +2016,6 @@ skip_data:
skb_record_rx_queue(skb, channel->queue_index);
skb_mark_napi_id(skb, napi);
- netdev->last_rx = jiffies;
napi_gro_receive(napi, skb);
next_packet:
@@ -2083,8 +2026,6 @@ next_packet:
if (received && (incomplete || context_next)) {
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdata->state_saved = 1;
- rdata->state.incomplete = incomplete;
- rdata->state.context_next = context_next;
rdata->state.skb = skb;
rdata->state.len = len;
rdata->state.error = error;
@@ -2165,8 +2106,8 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
return processed;
}
-void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
- unsigned int count, unsigned int flag)
+void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
+ unsigned int idx, unsigned int count, unsigned int flag)
{
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
@@ -2174,20 +2115,29 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
while (count--) {
rdata = XGBE_GET_DESC_DATA(ring, idx);
rdesc = rdata->rdesc;
- pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
- (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
- le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
- le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
+ netdev_dbg(pdata->netdev,
+ "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+ (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+ le32_to_cpu(rdesc->desc0),
+ le32_to_cpu(rdesc->desc1),
+ le32_to_cpu(rdesc->desc2),
+ le32_to_cpu(rdesc->desc3));
idx++;
}
}
-void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
+void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
unsigned int idx)
{
- pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
- le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
- le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+
+ rdata = XGBE_GET_DESC_DATA(ring, idx);
+ rdesc = rdata->rdesc;
+ netdev_dbg(pdata->netdev,
+ "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
+ idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
+ le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
}
void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
@@ -2197,21 +2147,21 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
unsigned char buffer[128];
unsigned int i, j;
- netdev_alert(netdev, "\n************** SKB dump ****************\n");
+ netdev_dbg(netdev, "\n************** SKB dump ****************\n");
- netdev_alert(netdev, "%s packet of %d bytes\n",
- (tx_rx ? "TX" : "RX"), skb->len);
+ netdev_dbg(netdev, "%s packet of %d bytes\n",
+ (tx_rx ? "TX" : "RX"), skb->len);
- netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
- netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
- netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
+ netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
+ netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
+ netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
for (i = 0, j = 0; i < skb->len;) {
j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
buf[i++]);
if ((i % 32) == 0) {
- netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer);
+ netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
j = 0;
} else if ((i % 16) == 0) {
buffer[j++] = ' ';
@@ -2221,7 +2171,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
}
}
if (i % 32)
- netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer);
+ netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
- netdev_alert(netdev, "\n************** SKB dump ****************\n");
+ netdev_dbg(netdev, "\n************** SKB dump ****************\n");
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 5f149e8ee..59e090e95 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -133,6 +133,12 @@ struct xgbe_stats {
offsetof(struct xgbe_prv_data, mmc_stats._var), \
}
+#define XGMAC_EXT_STAT(_string, _var) \
+ { _string, \
+ FIELD_SIZEOF(struct xgbe_ext_stats, _var), \
+ offsetof(struct xgbe_prv_data, ext_stats._var), \
+ }
+
static const struct xgbe_stats xgbe_gstring_stats[] = {
XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
XGMAC_MMC_STAT("tx_packets", txframecount_gb),
@@ -140,6 +146,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
+ XGMAC_EXT_STAT("tx_tso_packets", tx_tso_packets),
XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
@@ -171,6 +178,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
+ XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
};
#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
@@ -239,9 +247,9 @@ static void xgbe_get_pauseparam(struct net_device *netdev,
DBGPR("-->xgbe_get_pauseparam\n");
- pause->autoneg = pdata->pause_autoneg;
- pause->tx_pause = pdata->tx_pause;
- pause->rx_pause = pdata->rx_pause;
+ pause->autoneg = pdata->phy.pause_autoneg;
+ pause->tx_pause = pdata->phy.tx_pause;
+ pause->rx_pause = pdata->phy.rx_pause;
DBGPR("<--xgbe_get_pauseparam\n");
}
@@ -250,7 +258,6 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct phy_device *phydev = pdata->phydev;
int ret = 0;
DBGPR("-->xgbe_set_pauseparam\n");
@@ -258,21 +265,26 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
DBGPR(" autoneg = %d, tx_pause = %d, rx_pause = %d\n",
pause->autoneg, pause->tx_pause, pause->rx_pause);
- pdata->pause_autoneg = pause->autoneg;
- if (pause->autoneg) {
- phydev->advertising |= ADVERTISED_Pause;
- phydev->advertising |= ADVERTISED_Asym_Pause;
+ if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE))
+ return -EINVAL;
+
+ pdata->phy.pause_autoneg = pause->autoneg;
+ pdata->phy.tx_pause = pause->tx_pause;
+ pdata->phy.rx_pause = pause->rx_pause;
- } else {
- phydev->advertising &= ~ADVERTISED_Pause;
- phydev->advertising &= ~ADVERTISED_Asym_Pause;
+ pdata->phy.advertising &= ~ADVERTISED_Pause;
+ pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
- pdata->tx_pause = pause->tx_pause;
- pdata->rx_pause = pause->rx_pause;
+ if (pause->rx_pause) {
+ pdata->phy.advertising |= ADVERTISED_Pause;
+ pdata->phy.advertising |= ADVERTISED_Asym_Pause;
}
+ if (pause->tx_pause)
+ pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
+
if (netif_running(netdev))
- ret = phy_start_aneg(phydev);
+ ret = pdata->phy_if.phy_config_aneg(pdata);
DBGPR("<--xgbe_set_pauseparam\n");
@@ -283,36 +295,39 @@ static int xgbe_get_settings(struct net_device *netdev,
struct ethtool_cmd *cmd)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- int ret;
DBGPR("-->xgbe_get_settings\n");
- if (!pdata->phydev)
- return -ENODEV;
+ cmd->phy_address = pdata->phy.address;
+
+ cmd->supported = pdata->phy.supported;
+ cmd->advertising = pdata->phy.advertising;
+ cmd->lp_advertising = pdata->phy.lp_advertising;
+
+ cmd->autoneg = pdata->phy.autoneg;
+ ethtool_cmd_speed_set(cmd, pdata->phy.speed);
+ cmd->duplex = pdata->phy.duplex;
- ret = phy_ethtool_gset(pdata->phydev, cmd);
+ cmd->port = PORT_NONE;
+ cmd->transceiver = XCVR_INTERNAL;
DBGPR("<--xgbe_get_settings\n");
- return ret;
+ return 0;
}
static int xgbe_set_settings(struct net_device *netdev,
struct ethtool_cmd *cmd)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct phy_device *phydev = pdata->phydev;
u32 speed;
int ret;
DBGPR("-->xgbe_set_settings\n");
- if (!pdata->phydev)
- return -ENODEV;
-
speed = ethtool_cmd_speed(cmd);
- if (cmd->phy_address != phydev->addr)
+ if (cmd->phy_address != pdata->phy.address)
return -EINVAL;
if ((cmd->autoneg != AUTONEG_ENABLE) &&
@@ -333,23 +348,23 @@ static int xgbe_set_settings(struct net_device *netdev,
return -EINVAL;
}
- cmd->advertising &= phydev->supported;
+ cmd->advertising &= pdata->phy.supported;
if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
return -EINVAL;
ret = 0;
- phydev->autoneg = cmd->autoneg;
- phydev->speed = speed;
- phydev->duplex = cmd->duplex;
- phydev->advertising = cmd->advertising;
+ pdata->phy.autoneg = cmd->autoneg;
+ pdata->phy.speed = speed;
+ pdata->phy.duplex = cmd->duplex;
+ pdata->phy.advertising = cmd->advertising;
if (cmd->autoneg == AUTONEG_ENABLE)
- phydev->advertising |= ADVERTISED_Autoneg;
+ pdata->phy.advertising |= ADVERTISED_Autoneg;
else
- phydev->advertising &= ~ADVERTISED_Autoneg;
+ pdata->phy.advertising &= ~ADVERTISED_Autoneg;
if (netif_running(netdev))
- ret = phy_start_aneg(phydev);
+ ret = pdata->phy_if.phy_config_aneg(pdata);
DBGPR("<--xgbe_set_settings\n");
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 714905384..e83bd76ab 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -124,9 +124,11 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_address.h>
+#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/property.h>
#include <linux/acpi.h>
+#include <linux/mdio.h>
#include "xgbe.h"
#include "xgbe-common.h"
@@ -136,6 +138,49 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(XGBE_DRV_VERSION);
MODULE_DESCRIPTION(XGBE_DRV_DESC);
+static int debug = -1;
+module_param(debug, int, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(debug, " Network interface message level setting");
+
+static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP);
+
+static const u32 xgbe_serdes_blwc[] = {
+ XGBE_SPEED_1000_BLWC,
+ XGBE_SPEED_2500_BLWC,
+ XGBE_SPEED_10000_BLWC,
+};
+
+static const u32 xgbe_serdes_cdr_rate[] = {
+ XGBE_SPEED_1000_CDR,
+ XGBE_SPEED_2500_CDR,
+ XGBE_SPEED_10000_CDR,
+};
+
+static const u32 xgbe_serdes_pq_skew[] = {
+ XGBE_SPEED_1000_PQ,
+ XGBE_SPEED_2500_PQ,
+ XGBE_SPEED_10000_PQ,
+};
+
+static const u32 xgbe_serdes_tx_amp[] = {
+ XGBE_SPEED_1000_TXAMP,
+ XGBE_SPEED_2500_TXAMP,
+ XGBE_SPEED_10000_TXAMP,
+};
+
+static const u32 xgbe_serdes_dfe_tap_cfg[] = {
+ XGBE_SPEED_1000_DFE_TAP_CONFIG,
+ XGBE_SPEED_2500_DFE_TAP_CONFIG,
+ XGBE_SPEED_10000_DFE_TAP_CONFIG,
+};
+
+static const u32 xgbe_serdes_dfe_tap_ena[] = {
+ XGBE_SPEED_1000_DFE_TAP_ENABLE,
+ XGBE_SPEED_2500_DFE_TAP_ENABLE,
+ XGBE_SPEED_10000_DFE_TAP_ENABLE,
+};
+
static void xgbe_default_config(struct xgbe_prv_data *pdata)
{
DBGPR("-->xgbe_default_config\n");
@@ -153,8 +198,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
pdata->rx_pause = 1;
pdata->phy_speed = SPEED_UNKNOWN;
pdata->power_down = 0;
- pdata->default_autoneg = AUTONEG_ENABLE;
- pdata->default_speed = SPEED_10000;
DBGPR("<--xgbe_default_config\n");
}
@@ -162,19 +205,15 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
{
xgbe_init_function_ptrs_dev(&pdata->hw_if);
+ xgbe_init_function_ptrs_phy(&pdata->phy_if);
xgbe_init_function_ptrs_desc(&pdata->desc_if);
}
#ifdef CONFIG_ACPI
static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
{
- struct acpi_device *adev = pdata->adev;
struct device *dev = pdata->dev;
u32 property;
- acpi_handle handle;
- acpi_status status;
- unsigned long long data;
- int cca;
int ret;
/* Obtain the system clock setting */
@@ -195,24 +234,6 @@ static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
}
pdata->ptpclk_rate = property;
- /* Retrieve the device cache coherency value */
- handle = adev->handle;
- do {
- status = acpi_evaluate_integer(handle, "_CCA", NULL, &data);
- if (!ACPI_FAILURE(status)) {
- cca = data;
- break;
- }
-
- status = acpi_get_parent(handle, &handle);
- } while (!ACPI_FAILURE(status));
-
- if (ACPI_FAILURE(status)) {
- dev_err(dev, "error obtaining acpi coherency value\n");
- return -EINVAL;
- }
- pdata->coherent = !!cca;
-
return 0;
}
#else /* CONFIG_ACPI */
@@ -243,28 +264,84 @@ static int xgbe_of_support(struct xgbe_prv_data *pdata)
}
pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
- /* Retrieve the device cache coherency value */
- pdata->coherent = of_dma_is_coherent(dev->of_node);
-
return 0;
}
+
+static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+ struct device_node *phy_node;
+ struct platform_device *phy_pdev;
+
+ phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
+ if (phy_node) {
+ /* Old style device tree:
+ * The XGBE and PHY resources are separate
+ */
+ phy_pdev = of_find_device_by_node(phy_node);
+ of_node_put(phy_node);
+ } else {
+ /* New style device tree:
+ * The XGBE and PHY resources are grouped together with
+ * the PHY resources listed last
+ */
+ get_device(dev);
+ phy_pdev = pdata->pdev;
+ }
+
+ return phy_pdev;
+}
#else /* CONFIG_OF */
static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
return -EINVAL;
}
-#endif /*CONFIG_OF */
+
+static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+static unsigned int xgbe_resource_count(struct platform_device *pdev,
+ unsigned int type)
+{
+ unsigned int count;
+ int i;
+
+ for (i = 0, count = 0; i < pdev->num_resources; i++) {
+ struct resource *res = &pdev->resource[i];
+
+ if (type == resource_type(res))
+ count++;
+ }
+
+ return count;
+}
+
+static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+ struct platform_device *phy_pdev;
+
+ if (pdata->use_acpi) {
+ get_device(pdata->dev);
+ phy_pdev = pdata->pdev;
+ } else {
+ phy_pdev = xgbe_of_get_phy_pdev(pdata);
+ }
+
+ return phy_pdev;
+}
static int xgbe_probe(struct platform_device *pdev)
{
struct xgbe_prv_data *pdata;
- struct xgbe_hw_if *hw_if;
- struct xgbe_desc_if *desc_if;
struct net_device *netdev;
- struct device *dev = &pdev->dev;
+ struct device *dev = &pdev->dev, *phy_dev;
+ struct platform_device *phy_pdev;
struct resource *res;
const char *phy_mode;
- unsigned int i;
+ unsigned int i, phy_memnum, phy_irqnum;
int ret;
DBGPR("--> xgbe_probe\n");
@@ -289,9 +366,36 @@ static int xgbe_probe(struct platform_device *pdev)
mutex_init(&pdata->rss_mutex);
spin_lock_init(&pdata->tstamp_lock);
+ pdata->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ set_bit(XGBE_DOWN, &pdata->dev_state);
+
/* Check if we should use ACPI or DT */
pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
+ phy_pdev = xgbe_get_phy_pdev(pdata);
+ if (!phy_pdev) {
+ dev_err(dev, "unable to obtain phy device\n");
+ ret = -EINVAL;
+ goto err_phydev;
+ }
+ phy_dev = &phy_pdev->dev;
+
+ if (pdev == phy_pdev) {
+ /* New style device tree or ACPI:
+ * The XGBE and PHY resources are grouped together with
+ * the PHY resources listed last
+ */
+ phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
+ phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
+ } else {
+ /* Old style device tree:
+ * The XGBE and PHY resources are separate
+ */
+ phy_memnum = 0;
+ phy_irqnum = 0;
+ }
+
/* Set and validate the number of descriptors for a ring */
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
pdata->tx_desc_count = XGBE_TX_DESC_CNT;
@@ -318,7 +422,8 @@ static int xgbe_probe(struct platform_device *pdev)
ret = PTR_ERR(pdata->xgmac_regs);
goto err_io;
}
- DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs);
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
pdata->xpcs_regs = devm_ioremap_resource(dev, res);
@@ -327,7 +432,38 @@ static int xgbe_probe(struct platform_device *pdev)
ret = PTR_ERR(pdata->xpcs_regs);
goto err_io;
}
- DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
+
+ res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+ pdata->rxtx_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->rxtx_regs)) {
+ dev_err(dev, "rxtx ioremap failed\n");
+ ret = PTR_ERR(pdata->rxtx_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
+
+ res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+ pdata->sir0_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->sir0_regs)) {
+ dev_err(dev, "sir0 ioremap failed\n");
+ ret = PTR_ERR(pdata->sir0_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
+
+ res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+ pdata->sir1_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->sir1_regs)) {
+ dev_err(dev, "sir1 ioremap failed\n");
+ ret = PTR_ERR(pdata->sir1_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
/* Retrieve the MAC address */
ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
@@ -355,6 +491,115 @@ static int xgbe_probe(struct platform_device *pdev)
if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
pdata->per_channel_irq = 1;
+ /* Retrieve the PHY speedset */
+ ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY,
+ &pdata->speed_set);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
+ goto err_io;
+ }
+
+ switch (pdata->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ case XGBE_SPEEDSET_2500_10000:
+ break;
+ default:
+ dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
+ ret = -EINVAL;
+ goto err_io;
+ }
+
+ /* Retrieve the PHY configuration properties */
+ if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_BLWC_PROPERTY,
+ pdata->serdes_blwc,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_BLWC_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_blwc, xgbe_serdes_blwc,
+ sizeof(pdata->serdes_blwc));
+ }
+
+ if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_CDR_RATE_PROPERTY,
+ pdata->serdes_cdr_rate,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_CDR_RATE_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate,
+ sizeof(pdata->serdes_cdr_rate));
+ }
+
+ if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PQ_SKEW_PROPERTY,
+ pdata->serdes_pq_skew,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PQ_SKEW_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew,
+ sizeof(pdata->serdes_pq_skew));
+ }
+
+ if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_TX_AMP_PROPERTY,
+ pdata->serdes_tx_amp,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_TX_AMP_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp,
+ sizeof(pdata->serdes_tx_amp));
+ }
+
+ if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_DFE_CFG_PROPERTY,
+ pdata->serdes_dfe_tap_cfg,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_DFE_CFG_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg,
+ sizeof(pdata->serdes_dfe_tap_cfg));
+ }
+
+ if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_DFE_ENA_PROPERTY,
+ pdata->serdes_dfe_tap_ena,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_DFE_ENA_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena,
+ sizeof(pdata->serdes_dfe_tap_ena));
+ }
+
/* Obtain device settings unique to ACPI/OF */
if (pdata->use_acpi)
ret = xgbe_acpi_support(pdata);
@@ -364,6 +609,7 @@ static int xgbe_probe(struct platform_device *pdev)
goto err_io;
/* Set the DMA coherency values */
+ pdata->coherent = device_dma_is_coherent(pdata->dev);
if (pdata->coherent) {
pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
pdata->arcache = XGBE_DMA_OS_ARCACHE;
@@ -382,17 +628,23 @@ static int xgbe_probe(struct platform_device *pdev)
}
pdata->dev_irq = ret;
+ /* Get the auto-negotiation interrupt */
+ ret = platform_get_irq(phy_pdev, phy_irqnum++);
+ if (ret < 0) {
+ dev_err(dev, "platform_get_irq phy 0 failed\n");
+ goto err_io;
+ }
+ pdata->an_irq = ret;
+
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
/* Set all the function pointers */
xgbe_init_all_fptrs(pdata);
- hw_if = &pdata->hw_if;
- desc_if = &pdata->desc_if;
/* Issue software reset to device */
- hw_if->exit(pdata);
+ pdata->hw_if.exit(pdata);
/* Populate the hardware features */
xgbe_get_all_hw_features(pdata);
@@ -401,8 +653,6 @@ static int xgbe_probe(struct platform_device *pdev)
xgbe_default_config(pdata);
/* Set the DMA mask */
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
ret = dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(pdata->hw_feat.dma_width));
if (ret) {
@@ -447,16 +697,8 @@ static int xgbe_probe(struct platform_device *pdev)
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
- /* Prepare to regsiter with MDIO */
- pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
- if (!pdata->mii_bus_id) {
- dev_err(dev, "failed to allocate mii bus id\n");
- ret = -ENOMEM;
- goto err_io;
- }
- ret = xgbe_mdio_register(pdata);
- if (ret)
- goto err_bus_id;
+ /* Call MDIO/PHY initialization routine */
+ pdata->phy_if.phy_init(pdata);
/* Set device operations */
netdev->netdev_ops = xgbe_get_netdev_ops();
@@ -501,26 +743,52 @@ static int xgbe_probe(struct platform_device *pdev)
ret = register_netdev(netdev);
if (ret) {
dev_err(dev, "net device registration failed\n");
- goto err_reg_netdev;
+ goto err_io;
+ }
+
+ /* Create the PHY/ANEG name based on netdev name */
+ snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
+ netdev_name(netdev));
+
+ /* Create workqueues */
+ pdata->dev_workqueue =
+ create_singlethread_workqueue(netdev_name(netdev));
+ if (!pdata->dev_workqueue) {
+ netdev_err(netdev, "device workqueue creation failed\n");
+ ret = -ENOMEM;
+ goto err_netdev;
+ }
+
+ pdata->an_workqueue =
+ create_singlethread_workqueue(pdata->an_name);
+ if (!pdata->an_workqueue) {
+ netdev_err(netdev, "phy workqueue creation failed\n");
+ ret = -ENOMEM;
+ goto err_wq;
}
xgbe_ptp_register(pdata);
xgbe_debugfs_init(pdata);
+ platform_device_put(phy_pdev);
+
netdev_notice(netdev, "net device enabled\n");
DBGPR("<-- xgbe_probe\n");
return 0;
-err_reg_netdev:
- xgbe_mdio_unregister(pdata);
+err_wq:
+ destroy_workqueue(pdata->dev_workqueue);
-err_bus_id:
- kfree(pdata->mii_bus_id);
+err_netdev:
+ unregister_netdev(netdev);
err_io:
+ platform_device_put(phy_pdev);
+
+err_phydev:
free_netdev(netdev);
err_alloc:
@@ -540,11 +808,13 @@ static int xgbe_remove(struct platform_device *pdev)
xgbe_ptp_unregister(pdata);
- unregister_netdev(netdev);
+ flush_workqueue(pdata->an_workqueue);
+ destroy_workqueue(pdata->an_workqueue);
- xgbe_mdio_unregister(pdata);
+ flush_workqueue(pdata->dev_workqueue);
+ destroy_workqueue(pdata->dev_workqueue);
- kfree(pdata->mii_bus_id);
+ unregister_netdev(netdev);
free_netdev(netdev);
@@ -557,16 +827,17 @@ static int xgbe_remove(struct platform_device *pdev)
static int xgbe_suspend(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
- int ret;
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret = 0;
DBGPR("-->xgbe_suspend\n");
- if (!netif_running(netdev)) {
- DBGPR("<--xgbe_dev_suspend\n");
- return -EINVAL;
- }
+ if (netif_running(netdev))
+ ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
- ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
+ pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
DBGPR("<--xgbe_suspend\n");
@@ -576,16 +847,16 @@ static int xgbe_suspend(struct device *dev)
static int xgbe_resume(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
- int ret;
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret = 0;
DBGPR("-->xgbe_resume\n");
- if (!netif_running(netdev)) {
- DBGPR("<--xgbe_dev_resume\n");
- return -EINVAL;
- }
+ pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
- ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
+ if (netif_running(netdev))
+ ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
DBGPR("<--xgbe_resume\n");
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 59e267f3f..9088c3a35 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -119,194 +119,1246 @@
#include <linux/mdio.h>
#include <linux/phy.h>
#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
#include "xgbe.h"
#include "xgbe-common.h"
-static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
+static void xgbe_an_enable_kr_training(struct xgbe_prv_data *pdata)
{
- struct xgbe_prv_data *pdata = mii->priv;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- int mmd_data;
+ unsigned int reg;
- DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
- prtad, mmd_reg);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
- mmd_data = hw_if->read_mmd_regs(pdata, prtad, mmd_reg);
+ reg |= XGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+}
+
+static void xgbe_an_disable_kr_training(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
- DBGPR_MDIO("<--xgbe_mdio_read: mmd_data=%#x\n", mmd_data);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
- return mmd_data;
+ reg &= ~XGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
}
-static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
- u16 mmd_val)
+static void xgbe_pcs_power_cycle(struct xgbe_prv_data *pdata)
{
- struct xgbe_prv_data *pdata = mii->priv;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- int mmd_data = mmd_val;
+ unsigned int reg;
- DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
- prtad, mmd_reg, mmd_data);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
- hw_if->write_mmd_regs(pdata, prtad, mmd_reg, mmd_data);
+ reg |= MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
- DBGPR_MDIO("<--xgbe_mdio_write\n");
+ usleep_range(75, 100);
- return 0;
+ reg &= ~MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
}
-void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
+static void xgbe_serdes_start_ratechange(struct xgbe_prv_data *pdata)
{
- struct device *dev = pdata->dev;
- struct phy_device *phydev = pdata->mii->phy_map[XGBE_PRTAD];
- int i;
-
- dev_alert(dev, "\n************* PHY Reg dump **********************\n");
-
- dev_alert(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
- dev_alert(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
- dev_alert(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
- dev_alert(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
- dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
- dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
-
- dev_alert(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
- dev_alert(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
- dev_alert(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
- MDIO_AN_ADVERTISE,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
- dev_alert(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
- MDIO_AN_ADVERTISE + 1,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
- dev_alert(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
- MDIO_AN_ADVERTISE + 2,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
- dev_alert(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
- MDIO_AN_COMP_STAT,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
-
- dev_alert(dev, "MMD Device Mask = %#x\n",
- phydev->c45_ids.devices_in_package);
- for (i = 0; i < ARRAY_SIZE(phydev->c45_ids.device_ids); i++)
- dev_alert(dev, " MMD %d: ID = %#08x\n", i,
- phydev->c45_ids.device_ids[i]);
-
- dev_alert(dev, "\n*************************************************\n");
-}
-
-int xgbe_mdio_register(struct xgbe_prv_data *pdata)
-{
- struct mii_bus *mii;
- struct phy_device *phydev;
- int ret = 0;
-
- DBGPR("-->xgbe_mdio_register\n");
-
- mii = mdiobus_alloc();
- if (!mii) {
- dev_err(pdata->dev, "mdiobus_alloc failed\n");
- return -ENOMEM;
+ /* Assert Rx and Tx ratechange */
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 1);
+}
+
+static void xgbe_serdes_complete_ratechange(struct xgbe_prv_data *pdata)
+{
+ unsigned int wait;
+ u16 status;
+
+ /* Release Rx and Tx ratechange */
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 0);
+
+ /* Wait for Rx and Tx ready */
+ wait = XGBE_RATECHANGE_COUNT;
+ while (wait--) {
+ usleep_range(50, 75);
+
+ status = XSIR0_IOREAD(pdata, SIR0_STATUS);
+ if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
+ XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
+ goto rx_reset;
}
- /* Register on the MDIO bus (don't probe any PHYs) */
- mii->name = XGBE_PHY_NAME;
- mii->read = xgbe_mdio_read;
- mii->write = xgbe_mdio_write;
- snprintf(mii->id, sizeof(mii->id), "%s", pdata->mii_bus_id);
- mii->priv = pdata;
- mii->phy_mask = ~0;
- mii->parent = pdata->dev;
- ret = mdiobus_register(mii);
- if (ret) {
- dev_err(pdata->dev, "mdiobus_register failed\n");
- goto err_mdiobus_alloc;
+ netif_dbg(pdata, link, pdata->netdev, "SerDes rx/tx not ready (%#hx)\n",
+ status);
+
+rx_reset:
+ /* Perform Rx reset for the DFE changes */
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 1);
+}
+
+static void xgbe_xgmii_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ /* Enable KR training */
+ xgbe_an_enable_kr_training(pdata);
+
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_xgmii_speed(pdata);
+
+ /* Set PCS to KR/10G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBR;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED10G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 10G speed */
+ xgbe_serdes_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ pdata->serdes_cdr_rate[XGBE_SPEED_10000]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ pdata->serdes_tx_amp[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ pdata->serdes_blwc[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ pdata->serdes_pq_skew[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ pdata->serdes_dfe_tap_cfg[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ pdata->serdes_dfe_tap_ena[XGBE_SPEED_10000]);
+
+ xgbe_serdes_complete_ratechange(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
+}
+
+static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ /* Disable KR training */
+ xgbe_an_disable_kr_training(pdata);
+
+ /* Set MAC to 2.5G speed */
+ pdata->hw_if.set_gmii_2500_speed(pdata);
+
+ /* Set PCS to KX/1G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBX;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED1G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 2.5G speed */
+ xgbe_serdes_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ pdata->serdes_cdr_rate[XGBE_SPEED_2500]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ pdata->serdes_tx_amp[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ pdata->serdes_blwc[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ pdata->serdes_pq_skew[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ pdata->serdes_dfe_tap_cfg[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ pdata->serdes_dfe_tap_ena[XGBE_SPEED_2500]);
+
+ xgbe_serdes_complete_ratechange(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
+}
+
+static void xgbe_gmii_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ /* Disable KR training */
+ xgbe_an_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_gmii_speed(pdata);
+
+ /* Set PCS to KX/1G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBX;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED1G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 1G speed */
+ xgbe_serdes_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ pdata->serdes_cdr_rate[XGBE_SPEED_1000]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ pdata->serdes_tx_amp[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ pdata->serdes_blwc[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ pdata->serdes_pq_skew[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ pdata->serdes_dfe_tap_cfg[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ pdata->serdes_dfe_tap_ena[XGBE_SPEED_1000]);
+
+ xgbe_serdes_complete_ratechange(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
+}
+
+static void xgbe_cur_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode *mode)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ if ((reg & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
+ *mode = XGBE_MODE_KR;
+ else
+ *mode = XGBE_MODE_KX;
+}
+
+static bool xgbe_in_kr_mode(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mode mode;
+
+ xgbe_cur_mode(pdata, &mode);
+
+ return (mode == XGBE_MODE_KR);
+}
+
+static void xgbe_switch_mode(struct xgbe_prv_data *pdata)
+{
+ /* If we are in KR switch to KX, and vice-versa */
+ if (xgbe_in_kr_mode(pdata)) {
+ if (pdata->speed_set == XGBE_SPEEDSET_1000_10000)
+ xgbe_gmii_mode(pdata);
+ else
+ xgbe_gmii_2500_mode(pdata);
+ } else {
+ xgbe_xgmii_mode(pdata);
}
- DBGPR(" mdiobus_register succeeded for %s\n", pdata->mii_bus_id);
-
- /* Probe the PCS using Clause 45 */
- phydev = get_phy_device(mii, XGBE_PRTAD, true);
- if (IS_ERR(phydev) || !phydev ||
- !phydev->c45_ids.device_ids[MDIO_MMD_PCS]) {
- dev_err(pdata->dev, "get_phy_device failed\n");
- ret = phydev ? PTR_ERR(phydev) : -ENOLINK;
- goto err_mdiobus_register;
+}
+
+static void xgbe_set_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ enum xgbe_mode cur_mode;
+
+ xgbe_cur_mode(pdata, &cur_mode);
+ if (mode != cur_mode)
+ xgbe_switch_mode(pdata);
+}
+
+static bool xgbe_use_xgmii_mode(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+ if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+ return true;
+ } else {
+ if (pdata->phy.speed == SPEED_10000)
+ return true;
}
- request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
- MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
- ret = phy_device_register(phydev);
- if (ret) {
- dev_err(pdata->dev, "phy_device_register failed\n");
- goto err_phy_device;
+ return false;
+}
+
+static bool xgbe_use_gmii_2500_mode(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+ if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
+ return true;
+ } else {
+ if (pdata->phy.speed == SPEED_2500)
+ return true;
}
- if (!phydev->dev.driver) {
- dev_err(pdata->dev, "phy driver probe failed\n");
- ret = -EIO;
- goto err_phy_device;
+
+ return false;
+}
+
+static bool xgbe_use_gmii_mode(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+ if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
+ return true;
+ } else {
+ if (pdata->phy.speed == SPEED_1000)
+ return true;
}
- /* Add a reference to the PHY driver so it can't be unloaded */
- pdata->phy_module = phydev->dev.driver->owner;
- if (!try_module_get(pdata->phy_module)) {
- dev_err(pdata->dev, "try_module_get failed\n");
- ret = -EIO;
- goto err_phy_device;
+ return false;
+}
+
+static void xgbe_set_an(struct xgbe_prv_data *pdata, bool enable, bool restart)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+ reg &= ~MDIO_AN_CTRL1_ENABLE;
+
+ if (enable)
+ reg |= MDIO_AN_CTRL1_ENABLE;
+
+ if (restart)
+ reg |= MDIO_AN_CTRL1_RESTART;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+}
+
+static void xgbe_restart_an(struct xgbe_prv_data *pdata)
+{
+ xgbe_set_an(pdata, true, true);
+
+ netif_dbg(pdata, link, pdata->netdev, "AN enabled/restarted\n");
+}
+
+static void xgbe_disable_an(struct xgbe_prv_data *pdata)
+{
+ xgbe_set_an(pdata, false, false);
+
+ netif_dbg(pdata, link, pdata->netdev, "AN disabled\n");
+}
+
+static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
+{
+ unsigned int ad_reg, lp_reg, reg;
+
+ *state = XGBE_RX_COMPLETE;
+
+ /* If we're not in KR mode then we're done */
+ if (!xgbe_in_kr_mode(pdata))
+ return XGBE_AN_PAGE_RECEIVED;
+
+ /* Enable/Disable FEC */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL);
+ reg &= ~(MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE);
+ if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
+ reg |= pdata->fec_ability;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg);
+
+ /* Start KR training */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ if (reg & XGBE_KR_TRAINING_ENABLE) {
+ XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 1);
+
+ reg |= XGBE_KR_TRAINING_START;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
+ reg);
+
+ XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
+
+ netif_dbg(pdata, link, pdata->netdev,
+ "KR training initiated\n");
}
- pdata->mii = mii;
- pdata->mdio_mmd = MDIO_MMD_PCS;
+ return XGBE_AN_PAGE_RECEIVED;
+}
+
+static enum xgbe_an xgbe_an_tx_xnp(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
+{
+ u16 msg;
+
+ *state = XGBE_RX_XNP;
+
+ msg = XGBE_XNP_MCF_NULL_MESSAGE;
+ msg |= XGBE_XNP_MP_FORMATTED;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg);
+
+ return XGBE_AN_PAGE_RECEIVED;
+}
+
+static enum xgbe_an xgbe_an_rx_bpa(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
+{
+ unsigned int link_support;
+ unsigned int reg, ad_reg, lp_reg;
+
+ /* Read Base Ability register 2 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+
+ /* Check for a supported mode, otherwise restart in a different one */
+ link_support = xgbe_in_kr_mode(pdata) ? 0x80 : 0x20;
+ if (!(reg & link_support))
+ return XGBE_AN_INCOMPAT_LINK;
+
+ /* Check Extended Next Page support */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+
+ return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & XGBE_XNP_NP_EXCHANGE))
+ ? xgbe_an_tx_xnp(pdata, state)
+ : xgbe_an_tx_training(pdata, state);
+}
+
+static enum xgbe_an xgbe_an_rx_xnp(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
+{
+ unsigned int ad_reg, lp_reg;
+
+ /* Check Extended Next Page support */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX);
+
+ return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & XGBE_XNP_NP_EXCHANGE))
+ ? xgbe_an_tx_xnp(pdata, state)
+ : xgbe_an_tx_training(pdata, state);
+}
+
+static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_rx *state;
+ unsigned long an_timeout;
+ enum xgbe_an ret;
+
+ if (!pdata->an_start) {
+ pdata->an_start = jiffies;
+ } else {
+ an_timeout = pdata->an_start +
+ msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
+ if (time_after(jiffies, an_timeout)) {
+ /* Auto-negotiation timed out, reset state */
+ pdata->kr_state = XGBE_RX_BPA;
+ pdata->kx_state = XGBE_RX_BPA;
+
+ pdata->an_start = jiffies;
+
+ netif_dbg(pdata, link, pdata->netdev,
+ "AN timed out, resetting state\n");
+ }
+ }
- phydev->autoneg = pdata->default_autoneg;
- if (phydev->autoneg == AUTONEG_DISABLE) {
- phydev->speed = pdata->default_speed;
- phydev->duplex = DUPLEX_FULL;
+ state = xgbe_in_kr_mode(pdata) ? &pdata->kr_state
+ : &pdata->kx_state;
- phydev->advertising &= ~ADVERTISED_Autoneg;
+ switch (*state) {
+ case XGBE_RX_BPA:
+ ret = xgbe_an_rx_bpa(pdata, state);
+ break;
+
+ case XGBE_RX_XNP:
+ ret = xgbe_an_rx_xnp(pdata, state);
+ break;
+
+ default:
+ ret = XGBE_AN_ERROR;
+ }
+
+ return ret;
+}
+
+static enum xgbe_an xgbe_an_incompat_link(struct xgbe_prv_data *pdata)
+{
+ /* Be sure we aren't looping trying to negotiate */
+ if (xgbe_in_kr_mode(pdata)) {
+ pdata->kr_state = XGBE_RX_ERROR;
+
+ if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) &&
+ !(pdata->phy.advertising & ADVERTISED_2500baseX_Full))
+ return XGBE_AN_NO_LINK;
+
+ if (pdata->kx_state != XGBE_RX_BPA)
+ return XGBE_AN_NO_LINK;
+ } else {
+ pdata->kx_state = XGBE_RX_ERROR;
+
+ if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full))
+ return XGBE_AN_NO_LINK;
+
+ if (pdata->kr_state != XGBE_RX_BPA)
+ return XGBE_AN_NO_LINK;
+ }
+
+ xgbe_disable_an(pdata);
+
+ xgbe_switch_mode(pdata);
+
+ xgbe_restart_an(pdata);
+
+ return XGBE_AN_INCOMPAT_LINK;
+}
+
+static irqreturn_t xgbe_an_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+ netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
+
+ /* Interrupt reason must be read and cleared outside of IRQ context */
+ disable_irq_nosync(pdata->an_irq);
+
+ queue_work(pdata->an_workqueue, &pdata->an_irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static void xgbe_an_irq_work(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ an_irq_work);
+
+ /* Avoid a race between enabling the IRQ and exiting the work by
+ * waiting for the work to finish and then queueing it
+ */
+ flush_work(&pdata->an_work);
+ queue_work(pdata->an_workqueue, &pdata->an_work);
+}
+
+static const char *xgbe_state_as_string(enum xgbe_an state)
+{
+ switch (state) {
+ case XGBE_AN_READY:
+ return "Ready";
+ case XGBE_AN_PAGE_RECEIVED:
+ return "Page-Received";
+ case XGBE_AN_INCOMPAT_LINK:
+ return "Incompatible-Link";
+ case XGBE_AN_COMPLETE:
+ return "Complete";
+ case XGBE_AN_NO_LINK:
+ return "No-Link";
+ case XGBE_AN_ERROR:
+ return "Error";
+ default:
+ return "Undefined";
+ }
+}
+
+static void xgbe_an_state_machine(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ an_work);
+ enum xgbe_an cur_state = pdata->an_state;
+ unsigned int int_reg, int_mask;
+
+ mutex_lock(&pdata->an_mutex);
+
+ /* Read the interrupt */
+ int_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
+ if (!int_reg)
+ goto out;
+
+next_int:
+ if (int_reg & XGBE_AN_PG_RCV) {
+ pdata->an_state = XGBE_AN_PAGE_RECEIVED;
+ int_mask = XGBE_AN_PG_RCV;
+ } else if (int_reg & XGBE_AN_INC_LINK) {
+ pdata->an_state = XGBE_AN_INCOMPAT_LINK;
+ int_mask = XGBE_AN_INC_LINK;
+ } else if (int_reg & XGBE_AN_INT_CMPLT) {
+ pdata->an_state = XGBE_AN_COMPLETE;
+ int_mask = XGBE_AN_INT_CMPLT;
+ } else {
+ pdata->an_state = XGBE_AN_ERROR;
+ int_mask = 0;
}
- pdata->phydev = phydev;
+ /* Clear the interrupt to be processed */
+ int_reg &= ~int_mask;
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
+
+ pdata->an_result = pdata->an_state;
+
+again:
+ netif_dbg(pdata, link, pdata->netdev, "AN %s\n",
+ xgbe_state_as_string(pdata->an_state));
+
+ cur_state = pdata->an_state;
+
+ switch (pdata->an_state) {
+ case XGBE_AN_READY:
+ pdata->an_supported = 0;
+ break;
+
+ case XGBE_AN_PAGE_RECEIVED:
+ pdata->an_state = xgbe_an_page_received(pdata);
+ pdata->an_supported++;
+ break;
+
+ case XGBE_AN_INCOMPAT_LINK:
+ pdata->an_supported = 0;
+ pdata->parallel_detect = 0;
+ pdata->an_state = xgbe_an_incompat_link(pdata);
+ break;
- DBGPHY_REGS(pdata);
+ case XGBE_AN_COMPLETE:
+ pdata->parallel_detect = pdata->an_supported ? 0 : 1;
+ netif_dbg(pdata, link, pdata->netdev, "%s successful\n",
+ pdata->an_supported ? "Auto negotiation"
+ : "Parallel detection");
+ break;
- DBGPR("<--xgbe_mdio_register\n");
+ case XGBE_AN_NO_LINK:
+ break;
+
+ default:
+ pdata->an_state = XGBE_AN_ERROR;
+ }
+
+ if (pdata->an_state == XGBE_AN_NO_LINK) {
+ int_reg = 0;
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ } else if (pdata->an_state == XGBE_AN_ERROR) {
+ netdev_err(pdata->netdev,
+ "error during auto-negotiation, state=%u\n",
+ cur_state);
+
+ int_reg = 0;
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ }
+
+ if (pdata->an_state >= XGBE_AN_COMPLETE) {
+ pdata->an_result = pdata->an_state;
+ pdata->an_state = XGBE_AN_READY;
+ pdata->kr_state = XGBE_RX_BPA;
+ pdata->kx_state = XGBE_RX_BPA;
+ pdata->an_start = 0;
+
+ netif_dbg(pdata, link, pdata->netdev, "AN result: %s\n",
+ xgbe_state_as_string(pdata->an_result));
+ }
+
+ if (cur_state != pdata->an_state)
+ goto again;
+
+ if (int_reg)
+ goto next_int;
+
+out:
+ enable_irq(pdata->an_irq);
+
+ mutex_unlock(&pdata->an_mutex);
+}
+
+static void xgbe_an_init(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ /* Set up Advertisement register 3 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ if (pdata->phy.advertising & ADVERTISED_10000baseR_FEC)
+ reg |= 0xc000;
+ else
+ reg &= ~0xc000;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg);
+
+ /* Set up Advertisement register 2 next */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+ reg |= 0x80;
+ else
+ reg &= ~0x80;
+
+ if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) ||
+ (pdata->phy.advertising & ADVERTISED_2500baseX_Full))
+ reg |= 0x20;
+ else
+ reg &= ~0x20;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, reg);
+
+ /* Set up Advertisement register 1 last */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (pdata->phy.advertising & ADVERTISED_Pause)
+ reg |= 0x400;
+ else
+ reg &= ~0x400;
+
+ if (pdata->phy.advertising & ADVERTISED_Asym_Pause)
+ reg |= 0x800;
+ else
+ reg &= ~0x800;
+
+ /* We don't intend to perform XNP */
+ reg &= ~XGBE_XNP_NP_EXCHANGE;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
+
+ netif_dbg(pdata, link, pdata->netdev, "AN initialized\n");
+}
+
+static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
+{
+ if (pdata->tx_pause && pdata->rx_pause)
+ return "rx/tx";
+ else if (pdata->rx_pause)
+ return "rx";
+ else if (pdata->tx_pause)
+ return "tx";
+ else
+ return "off";
+}
+
+static const char *xgbe_phy_speed_string(int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return "1Gbps";
+ case SPEED_2500:
+ return "2.5Gbps";
+ case SPEED_10000:
+ return "10Gbps";
+ case SPEED_UNKNOWN:
+ return "Unknown";
+ default:
+ return "Unsupported";
+ }
+}
+
+static void xgbe_phy_print_status(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy.link)
+ netdev_info(pdata->netdev,
+ "Link is Up - %s/%s - flow control %s\n",
+ xgbe_phy_speed_string(pdata->phy.speed),
+ pdata->phy.duplex == DUPLEX_FULL ? "Full" : "Half",
+ xgbe_phy_fc_string(pdata));
+ else
+ netdev_info(pdata->netdev, "Link is Down\n");
+}
+
+static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
+{
+ int new_state = 0;
+
+ if (pdata->phy.link) {
+ /* Flow control support */
+ pdata->pause_autoneg = pdata->phy.pause_autoneg;
+
+ if (pdata->tx_pause != pdata->phy.tx_pause) {
+ new_state = 1;
+ pdata->hw_if.config_tx_flow_control(pdata);
+ pdata->tx_pause = pdata->phy.tx_pause;
+ }
+
+ if (pdata->rx_pause != pdata->phy.rx_pause) {
+ new_state = 1;
+ pdata->hw_if.config_rx_flow_control(pdata);
+ pdata->rx_pause = pdata->phy.rx_pause;
+ }
+
+ /* Speed support */
+ if (pdata->phy_speed != pdata->phy.speed) {
+ new_state = 1;
+ pdata->phy_speed = pdata->phy.speed;
+ }
+
+ if (pdata->phy_link != pdata->phy.link) {
+ new_state = 1;
+ pdata->phy_link = pdata->phy.link;
+ }
+ } else if (pdata->phy_link) {
+ new_state = 1;
+ pdata->phy_link = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ }
+
+ if (new_state && netif_msg_link(pdata))
+ xgbe_phy_print_status(pdata);
+}
+
+static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
+{
+ netif_dbg(pdata, link, pdata->netdev, "fixed PHY configuration\n");
+
+ /* Disable auto-negotiation */
+ xgbe_disable_an(pdata);
+
+ /* Validate/Set specified speed */
+ switch (pdata->phy.speed) {
+ case SPEED_10000:
+ xgbe_set_mode(pdata, XGBE_MODE_KR);
+ break;
+
+ case SPEED_2500:
+ case SPEED_1000:
+ xgbe_set_mode(pdata, XGBE_MODE_KX);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Validate duplex mode */
+ if (pdata->phy.duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+{
+ set_bit(XGBE_LINK_INIT, &pdata->dev_state);
+ pdata->link_check = jiffies;
+
+ if (pdata->phy.autoneg != AUTONEG_ENABLE)
+ return xgbe_phy_config_fixed(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "AN PHY configuration\n");
+
+ /* Disable auto-negotiation interrupt */
+ disable_irq(pdata->an_irq);
+
+ /* Start auto-negotiation in a supported mode */
+ if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) {
+ xgbe_set_mode(pdata, XGBE_MODE_KR);
+ } else if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) ||
+ (pdata->phy.advertising & ADVERTISED_2500baseX_Full)) {
+ xgbe_set_mode(pdata, XGBE_MODE_KX);
+ } else {
+ enable_irq(pdata->an_irq);
+ return -EINVAL;
+ }
+
+ /* Disable and stop any in progress auto-negotiation */
+ xgbe_disable_an(pdata);
+
+ /* Clear any auto-negotitation interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+ pdata->an_result = XGBE_AN_READY;
+ pdata->an_state = XGBE_AN_READY;
+ pdata->kr_state = XGBE_RX_BPA;
+ pdata->kx_state = XGBE_RX_BPA;
+
+ /* Re-enable auto-negotiation interrupt */
+ enable_irq(pdata->an_irq);
+
+ /* Set up advertisement registers based on current settings */
+ xgbe_an_init(pdata);
+
+ /* Enable and start auto-negotiation */
+ xgbe_restart_an(pdata);
return 0;
+}
+
+static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ mutex_lock(&pdata->an_mutex);
+
+ ret = __xgbe_phy_config_aneg(pdata);
+ if (ret)
+ set_bit(XGBE_LINK_ERR, &pdata->dev_state);
+ else
+ clear_bit(XGBE_LINK_ERR, &pdata->dev_state);
+
+ mutex_unlock(&pdata->an_mutex);
+
+ return ret;
+}
+
+static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
+{
+ return (pdata->an_result == XGBE_AN_COMPLETE);
+}
+
+static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
+{
+ unsigned long link_timeout;
+
+ link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
+ if (time_after(jiffies, link_timeout)) {
+ netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
+ xgbe_phy_config_aneg(pdata);
+ }
+}
+
+static void xgbe_phy_status_force(struct xgbe_prv_data *pdata)
+{
+ if (xgbe_in_kr_mode(pdata)) {
+ pdata->phy.speed = SPEED_10000;
+ } else {
+ switch (pdata->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ pdata->phy.speed = SPEED_1000;
+ break;
+
+ case XGBE_SPEEDSET_2500_10000:
+ pdata->phy.speed = SPEED_2500;
+ break;
+ }
+ }
+ pdata->phy.duplex = DUPLEX_FULL;
+}
+
+static void xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
+{
+ unsigned int ad_reg, lp_reg;
+
+ pdata->phy.lp_advertising = 0;
+
+ if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect)
+ return xgbe_phy_status_force(pdata);
+
+ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg & 0x400)
+ pdata->phy.lp_advertising |= ADVERTISED_Pause;
+ if (lp_reg & 0x800)
+ pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (ad_reg & lp_reg & 0x400) {
+ pdata->phy.tx_pause = 1;
+ pdata->phy.rx_pause = 1;
+ } else if (ad_reg & lp_reg & 0x800) {
+ if (ad_reg & 0x400)
+ pdata->phy.rx_pause = 1;
+ else if (lp_reg & 0x400)
+ pdata->phy.tx_pause = 1;
+ }
+ }
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
+ if (lp_reg & 0x20) {
+ switch (pdata->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ case XGBE_SPEEDSET_2500_10000:
+ pdata->phy.lp_advertising |= ADVERTISED_2500baseX_Full;
+ break;
+ }
+ }
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80) {
+ pdata->phy.speed = SPEED_10000;
+ xgbe_set_mode(pdata, XGBE_MODE_KR);
+ } else if (ad_reg & 0x20) {
+ switch (pdata->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ pdata->phy.speed = SPEED_1000;
+ break;
+
+ case XGBE_SPEEDSET_2500_10000:
+ pdata->phy.speed = SPEED_2500;
+ break;
+ }
+
+ xgbe_set_mode(pdata, XGBE_MODE_KX);
+ } else {
+ pdata->phy.speed = SPEED_UNKNOWN;
+ }
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
+
+ pdata->phy.duplex = DUPLEX_FULL;
+}
+
+static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg, link_aneg;
+
+ if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
+ if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
+ netif_carrier_off(pdata->netdev);
+
+ pdata->phy.link = 0;
+ goto adjust_link;
+ }
+
+ link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE);
+
+ /* Get the link status. Link status is latched low, so read
+ * once to clear and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ pdata->phy.link = (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
+
+ if (pdata->phy.link) {
+ if (link_aneg && !xgbe_phy_aneg_done(pdata)) {
+ xgbe_check_link_timeout(pdata);
+ return;
+ }
+
+ xgbe_phy_status_aneg(pdata);
+
+ if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
+ clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
+
+ if (!test_bit(XGBE_LINK, &pdata->dev_state)) {
+ set_bit(XGBE_LINK, &pdata->dev_state);
+ netif_carrier_on(pdata->netdev);
+ }
+ } else {
+ if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
+ xgbe_check_link_timeout(pdata);
+
+ if (link_aneg)
+ return;
+ }
+
+ xgbe_phy_status_aneg(pdata);
+
+ if (test_bit(XGBE_LINK, &pdata->dev_state)) {
+ clear_bit(XGBE_LINK, &pdata->dev_state);
+ netif_carrier_off(pdata->netdev);
+ }
+ }
+
+adjust_link:
+ xgbe_phy_adjust_link(pdata);
+}
+
+static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+{
+ netif_dbg(pdata, link, pdata->netdev, "stopping PHY\n");
+
+ /* Disable auto-negotiation */
+ xgbe_disable_an(pdata);
+
+ /* Disable auto-negotiation interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+
+ devm_free_irq(pdata->dev, pdata->an_irq, pdata);
-err_phy_device:
- phy_device_free(phydev);
+ pdata->phy.link = 0;
+ if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
+ netif_carrier_off(pdata->netdev);
-err_mdiobus_register:
- mdiobus_unregister(mii);
+ xgbe_phy_adjust_link(pdata);
+}
+
+static int xgbe_phy_start(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ int ret;
+
+ netif_dbg(pdata, link, pdata->netdev, "starting PHY\n");
+
+ ret = devm_request_irq(pdata->dev, pdata->an_irq,
+ xgbe_an_isr, 0, pdata->an_name,
+ pdata);
+ if (ret) {
+ netdev_err(netdev, "phy irq request failed\n");
+ return ret;
+ }
+
+ /* Set initial mode - call the mode setting routines
+ * directly to insure we are properly configured
+ */
+ if (xgbe_use_xgmii_mode(pdata)) {
+ xgbe_xgmii_mode(pdata);
+ } else if (xgbe_use_gmii_mode(pdata)) {
+ xgbe_gmii_mode(pdata);
+ } else if (xgbe_use_gmii_2500_mode(pdata)) {
+ xgbe_gmii_2500_mode(pdata);
+ } else {
+ ret = -EINVAL;
+ goto err_irq;
+ }
+
+ /* Set up advertisement registers based on current settings */
+ xgbe_an_init(pdata);
+
+ /* Enable auto-negotiation interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
-err_mdiobus_alloc:
- mdiobus_free(mii);
+ return xgbe_phy_config_aneg(pdata);
+
+err_irq:
+ devm_free_irq(pdata->dev, pdata->an_irq, pdata);
return ret;
}
-void xgbe_mdio_unregister(struct xgbe_prv_data *pdata)
+static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
+{
+ unsigned int count, reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg |= MDIO_CTRL1_RESET;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ count = 50;
+ do {
+ msleep(20);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ } while ((reg & MDIO_CTRL1_RESET) && --count);
+
+ if (reg & MDIO_CTRL1_RESET)
+ return -ETIMEDOUT;
+
+ /* Disable auto-negotiation for now */
+ xgbe_disable_an(pdata);
+
+ /* Clear auto-negotiation interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+ return 0;
+}
+
+static void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
{
- DBGPR("-->xgbe_mdio_unregister\n");
+ struct device *dev = pdata->dev;
+
+ dev_dbg(dev, "\n************* PHY Reg dump **********************\n");
+
+ dev_dbg(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
+ dev_dbg(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
+ dev_dbg(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
+ dev_dbg(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
+ dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
+ dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
+
+ dev_dbg(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
+ dev_dbg(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
+ dev_dbg(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
+ dev_dbg(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE + 1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
+ dev_dbg(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE + 2,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
+ dev_dbg(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
+ MDIO_AN_COMP_STAT,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
+
+ dev_dbg(dev, "\n*************************************************\n");
+}
+
+static void xgbe_phy_init(struct xgbe_prv_data *pdata)
+{
+ mutex_init(&pdata->an_mutex);
+ INIT_WORK(&pdata->an_irq_work, xgbe_an_irq_work);
+ INIT_WORK(&pdata->an_work, xgbe_an_state_machine);
+ pdata->mdio_mmd = MDIO_MMD_PCS;
+
+ /* Initialize supported features */
+ pdata->phy.supported = SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_Backplane;
+ pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
+ switch (pdata->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
+ break;
+ case XGBE_SPEEDSET_2500_10000:
+ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
+ break;
+ }
- pdata->phydev = NULL;
+ pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD,
+ MDIO_PMA_10GBR_FECABLE);
+ pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE |
+ MDIO_PMA_10GBR_FECABLE_ERRABLE);
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
- module_put(pdata->phy_module);
- pdata->phy_module = NULL;
+ pdata->phy.advertising = pdata->phy.supported;
- mdiobus_unregister(pdata->mii);
- pdata->mii->priv = NULL;
+ pdata->phy.address = 0;
+
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+
+ pdata->phy.link = 0;
+
+ pdata->phy.pause_autoneg = pdata->pause_autoneg;
+ pdata->phy.tx_pause = pdata->tx_pause;
+ pdata->phy.rx_pause = pdata->rx_pause;
+
+ /* Fix up Flow Control advertising */
+ pdata->phy.advertising &= ~ADVERTISED_Pause;
+ pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
+
+ if (pdata->rx_pause) {
+ pdata->phy.advertising |= ADVERTISED_Pause;
+ pdata->phy.advertising |= ADVERTISED_Asym_Pause;
+ }
+
+ if (pdata->tx_pause)
+ pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
+
+ if (netif_msg_drv(pdata))
+ xgbe_dump_phy_registers(pdata);
+}
+
+void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
+{
+ phy_if->phy_init = xgbe_phy_init;
- mdiobus_free(pdata->mii);
- pdata->mii = NULL;
+ phy_if->phy_reset = xgbe_phy_reset;
+ phy_if->phy_start = xgbe_phy_start;
+ phy_if->phy_stop = xgbe_phy_stop;
- DBGPR("<--xgbe_mdio_unregister\n");
+ phy_if->phy_status = xgbe_phy_status;
+ phy_if->phy_config_aneg = xgbe_phy_config_aneg;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index e62dfa2de..717ce21b6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -129,7 +129,7 @@
#include <net/dcbnl.h>
#define XGBE_DRV_NAME "amd-xgbe"
-#define XGBE_DRV_VERSION "1.0.0-a"
+#define XGBE_DRV_VERSION "1.0.2"
#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
/* Descriptor related defines */
@@ -178,14 +178,17 @@
#define XGMAC_JUMBO_PACKET_MTU 9000
#define XGMAC_MAX_JUMBO_PACKET 9018
-/* MDIO bus phy name */
-#define XGBE_PHY_NAME "amd_xgbe_phy"
-#define XGBE_PRTAD 0
-
/* Common property names */
#define XGBE_MAC_ADDR_PROPERTY "mac-address"
#define XGBE_PHY_MODE_PROPERTY "phy-mode"
#define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
+#define XGBE_SPEEDSET_PROPERTY "amd,speed-set"
+#define XGBE_BLWC_PROPERTY "amd,serdes-blwc"
+#define XGBE_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
+#define XGBE_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
+#define XGBE_TX_AMP_PROPERTY "amd,serdes-tx-amp"
+#define XGBE_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
+#define XGBE_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
/* Device-tree clock names */
#define XGBE_DMA_CLOCK "dma_clk"
@@ -241,6 +244,49 @@
#define XGBE_RSS_LOOKUP_TABLE_TYPE 0
#define XGBE_RSS_HASH_KEY_TYPE 1
+/* Auto-negotiation */
+#define XGBE_AN_MS_TIMEOUT 500
+#define XGBE_LINK_TIMEOUT 10
+
+#define XGBE_AN_INT_CMPLT 0x01
+#define XGBE_AN_INC_LINK 0x02
+#define XGBE_AN_PG_RCV 0x04
+#define XGBE_AN_INT_MASK 0x07
+
+/* Rate-change complete wait/retry count */
+#define XGBE_RATECHANGE_COUNT 500
+
+/* Default SerDes settings */
+#define XGBE_SPEED_10000_BLWC 0
+#define XGBE_SPEED_10000_CDR 0x7
+#define XGBE_SPEED_10000_PLL 0x1
+#define XGBE_SPEED_10000_PQ 0x12
+#define XGBE_SPEED_10000_RATE 0x0
+#define XGBE_SPEED_10000_TXAMP 0xa
+#define XGBE_SPEED_10000_WORD 0x7
+#define XGBE_SPEED_10000_DFE_TAP_CONFIG 0x1
+#define XGBE_SPEED_10000_DFE_TAP_ENABLE 0x7f
+
+#define XGBE_SPEED_2500_BLWC 1
+#define XGBE_SPEED_2500_CDR 0x2
+#define XGBE_SPEED_2500_PLL 0x0
+#define XGBE_SPEED_2500_PQ 0xa
+#define XGBE_SPEED_2500_RATE 0x1
+#define XGBE_SPEED_2500_TXAMP 0xf
+#define XGBE_SPEED_2500_WORD 0x1
+#define XGBE_SPEED_2500_DFE_TAP_CONFIG 0x3
+#define XGBE_SPEED_2500_DFE_TAP_ENABLE 0x0
+
+#define XGBE_SPEED_1000_BLWC 1
+#define XGBE_SPEED_1000_CDR 0x2
+#define XGBE_SPEED_1000_PLL 0x0
+#define XGBE_SPEED_1000_PQ 0xa
+#define XGBE_SPEED_1000_RATE 0x3
+#define XGBE_SPEED_1000_TXAMP 0xf
+#define XGBE_SPEED_1000_WORD 0x1
+#define XGBE_SPEED_1000_DFE_TAP_CONFIG 0x3
+#define XGBE_SPEED_1000_DFE_TAP_ENABLE 0x0
+
struct xgbe_prv_data;
struct xgbe_packet_data {
@@ -291,7 +337,8 @@ struct xgbe_buffer_data {
struct xgbe_page_alloc pa;
struct xgbe_page_alloc pa_unmap;
- dma_addr_t dma;
+ dma_addr_t dma_base;
+ unsigned long dma_off;
unsigned int dma_len;
};
@@ -334,8 +381,6 @@ struct xgbe_ring_data {
*/
unsigned int state_saved;
struct {
- unsigned int incomplete;
- unsigned int context_next;
struct sk_buff *skb;
unsigned int len;
unsigned int error;
@@ -414,6 +459,13 @@ struct xgbe_channel {
struct xgbe_ring *rx_ring;
} ____cacheline_aligned;
+enum xgbe_state {
+ XGBE_DOWN,
+ XGBE_LINK,
+ XGBE_LINK_INIT,
+ XGBE_LINK_ERR,
+};
+
enum xgbe_int {
XGMAC_INT_DMA_CH_SR_TI,
XGMAC_INT_DMA_CH_SR_TPS,
@@ -445,6 +497,57 @@ enum xgbe_mtl_fifo_size {
XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
};
+enum xgbe_speed {
+ XGBE_SPEED_1000 = 0,
+ XGBE_SPEED_2500,
+ XGBE_SPEED_10000,
+ XGBE_SPEEDS,
+};
+
+enum xgbe_an {
+ XGBE_AN_READY = 0,
+ XGBE_AN_PAGE_RECEIVED,
+ XGBE_AN_INCOMPAT_LINK,
+ XGBE_AN_COMPLETE,
+ XGBE_AN_NO_LINK,
+ XGBE_AN_ERROR,
+};
+
+enum xgbe_rx {
+ XGBE_RX_BPA = 0,
+ XGBE_RX_XNP,
+ XGBE_RX_COMPLETE,
+ XGBE_RX_ERROR,
+};
+
+enum xgbe_mode {
+ XGBE_MODE_KR = 0,
+ XGBE_MODE_KX,
+};
+
+enum xgbe_speedset {
+ XGBE_SPEEDSET_1000_10000 = 0,
+ XGBE_SPEEDSET_2500_10000,
+};
+
+struct xgbe_phy {
+ u32 supported;
+ u32 advertising;
+ u32 lp_advertising;
+
+ int address;
+
+ int autoneg;
+ int speed;
+ int duplex;
+
+ int link;
+
+ int pause_autoneg;
+ int tx_pause;
+ int rx_pause;
+};
+
struct xgbe_mmc_stats {
/* Tx Stats */
u64 txoctetcount_gb;
@@ -492,6 +595,11 @@ struct xgbe_mmc_stats {
u64 rxwatchdogerror;
};
+struct xgbe_ext_stats {
+ u64 tx_tso_packets;
+ u64 rx_split_header_packets;
+};
+
struct xgbe_hw_if {
int (*tx_complete)(struct xgbe_ring_desc *);
@@ -591,6 +699,20 @@ struct xgbe_hw_if {
int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
};
+struct xgbe_phy_if {
+ /* For initial PHY setup */
+ void (*phy_init)(struct xgbe_prv_data *);
+
+ /* For PHY support when setting device up/down */
+ int (*phy_reset)(struct xgbe_prv_data *);
+ int (*phy_start)(struct xgbe_prv_data *);
+ void (*phy_stop)(struct xgbe_prv_data *);
+
+ /* For PHY support while device is up */
+ void (*phy_status)(struct xgbe_prv_data *);
+ int (*phy_config_aneg)(struct xgbe_prv_data *);
+};
+
struct xgbe_desc_if {
int (*alloc_ring_resources)(struct xgbe_prv_data *);
void (*free_ring_resources)(struct xgbe_prv_data *);
@@ -660,6 +782,9 @@ struct xgbe_prv_data {
/* XGMAC/XPCS related mmio registers */
void __iomem *xgmac_regs; /* XGMAC CSRs */
void __iomem *xpcs_regs; /* XPCS MMD registers */
+ void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
+ void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
+ void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
/* Overall device lock */
spinlock_t lock;
@@ -670,10 +795,14 @@ struct xgbe_prv_data {
/* RSS addressing mutex */
struct mutex rss_mutex;
+ /* Flags representing xgbe_state */
+ unsigned long dev_state;
+
int dev_irq;
unsigned int per_channel_irq;
struct xgbe_hw_if hw_if;
+ struct xgbe_phy_if phy_if;
struct xgbe_desc_if desc_if;
/* AXI DMA settings */
@@ -682,6 +811,11 @@ struct xgbe_prv_data {
unsigned int arcache;
unsigned int awcache;
+ /* Service routine support */
+ struct workqueue_struct *dev_workqueue;
+ struct work_struct service_work;
+ struct timer_list service_timer;
+
/* Rings for Tx/Rx on a DMA channel */
struct xgbe_channel *channel;
unsigned int channel_count;
@@ -729,27 +863,12 @@ struct xgbe_prv_data {
u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE];
u32 rss_options;
- /* MDIO settings */
- struct module *phy_module;
- char *mii_bus_id;
- struct mii_bus *mii;
- int mdio_mmd;
- struct phy_device *phydev;
- int default_autoneg;
- int default_speed;
-
- /* Current PHY settings */
- phy_interface_t phy_mode;
- int phy_link;
- int phy_speed;
- unsigned int phy_tx_pause;
- unsigned int phy_rx_pause;
-
/* Netdev related settings */
unsigned char mac_addr[ETH_ALEN];
netdev_features_t netdev_features;
struct napi_struct napi;
struct xgbe_mmc_stats mmc_stats;
+ struct xgbe_ext_stats ext_stats;
/* Filtering support */
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -787,6 +906,54 @@ struct xgbe_prv_data {
/* Keeps track of power mode */
unsigned int power_down;
+ /* Network interface message level setting */
+ u32 msg_enable;
+
+ /* Current PHY settings */
+ phy_interface_t phy_mode;
+ int phy_link;
+ int phy_speed;
+
+ /* MDIO/PHY related settings */
+ struct xgbe_phy phy;
+ int mdio_mmd;
+ unsigned long link_check;
+
+ char an_name[IFNAMSIZ + 32];
+ struct workqueue_struct *an_workqueue;
+
+ int an_irq;
+ struct work_struct an_irq_work;
+
+ unsigned int speed_set;
+
+ /* SerDes UEFI configurable settings.
+ * Switching between modes/speeds requires new values for some
+ * SerDes settings. The values can be supplied as device
+ * properties in array format. The first array entry is for
+ * 1GbE, second for 2.5GbE and third for 10GbE
+ */
+ u32 serdes_blwc[XGBE_SPEEDS];
+ u32 serdes_cdr_rate[XGBE_SPEEDS];
+ u32 serdes_pq_skew[XGBE_SPEEDS];
+ u32 serdes_tx_amp[XGBE_SPEEDS];
+ u32 serdes_dfe_tap_cfg[XGBE_SPEEDS];
+ u32 serdes_dfe_tap_ena[XGBE_SPEEDS];
+
+ /* Auto-negotiation state machine support */
+ struct mutex an_mutex;
+ enum xgbe_an an_result;
+ enum xgbe_an an_state;
+ enum xgbe_rx kr_state;
+ enum xgbe_rx kx_state;
+ struct work_struct an_work;
+ unsigned int an_supported;
+ unsigned int parallel_detect;
+ unsigned int fec_ability;
+ unsigned long an_start;
+
+ unsigned int lpm_ctrl; /* CTRL1 for resume */
+
#ifdef CONFIG_DEBUG_FS
struct dentry *xgbe_debugfs;
@@ -800,6 +967,7 @@ struct xgbe_prv_data {
/* Function prototypes*/
void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
+void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
struct net_device_ops *xgbe_get_netdev_ops(void);
struct ethtool_ops *xgbe_get_ethtool_ops(void);
@@ -807,14 +975,11 @@ struct ethtool_ops *xgbe_get_ethtool_ops(void);
const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void);
#endif
-int xgbe_mdio_register(struct xgbe_prv_data *);
-void xgbe_mdio_unregister(struct xgbe_prv_data *);
-void xgbe_dump_phy_registers(struct xgbe_prv_data *);
void xgbe_ptp_register(struct xgbe_prv_data *);
void xgbe_ptp_unregister(struct xgbe_prv_data *);
-void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int,
- unsigned int);
-void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *,
+void xgbe_dump_tx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
+ unsigned int, unsigned int, unsigned int);
+void xgbe_dump_rx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
unsigned int);
void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool);
void xgbe_get_all_hw_features(struct xgbe_prv_data *);
@@ -831,18 +996,6 @@ static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {}
static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
#endif /* CONFIG_DEBUG_FS */
-/* NOTE: Uncomment for TX and RX DESCRIPTOR DUMP in KERNEL LOG */
-#if 0
-#define XGMAC_ENABLE_TX_DESC_DUMP
-#define XGMAC_ENABLE_RX_DESC_DUMP
-#endif
-
-/* NOTE: Uncomment for TX and RX PACKET DUMP in KERNEL LOG */
-#if 0
-#define XGMAC_ENABLE_TX_PKT_DUMP
-#define XGMAC_ENABLE_RX_PKT_DUMP
-#endif
-
/* NOTE: Uncomment for function trace log messages in KERNEL LOG */
#if 0
#define YDEBUG
@@ -852,10 +1005,8 @@ static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
/* For debug prints */
#ifdef YDEBUG
#define DBGPR(x...) pr_alert(x)
-#define DBGPHY_REGS(x...) xgbe_dump_phy_registers(x)
#else
#define DBGPR(x...) do { } while (0)
-#define DBGPHY_REGS(x...) do { } while (0)
#endif
#ifdef YDEBUG_MDIO
diff --git a/drivers/net/ethernet/apm/xgene/Makefile b/drivers/net/ethernet/apm/xgene/Makefile
index 68be56554..700b5abe5 100644
--- a/drivers/net/ethernet/apm/xgene/Makefile
+++ b/drivers/net/ethernet/apm/xgene/Makefile
@@ -3,5 +3,5 @@
#
xgene-enet-objs := xgene_enet_hw.o xgene_enet_sgmac.o xgene_enet_xgmac.o \
- xgene_enet_main.o xgene_enet_ethtool.o
+ xgene_enet_main.o xgene_enet_ring2.o xgene_enet_ethtool.o
obj-$(CONFIG_NET_XGENE) += xgene-enet.o
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index b927021c6..cfa37041a 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -87,10 +87,11 @@ static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
{
+ struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
int i;
xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
- for (i = 0; i < NUM_RING_CONFIG; i++) {
+ for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
ring->state[i]);
}
@@ -98,7 +99,7 @@ static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
{
- memset(ring->state, 0, sizeof(u32) * NUM_RING_CONFIG);
+ memset(ring->state, 0, sizeof(ring->state));
xgene_enet_write_ring_state(ring);
}
@@ -141,8 +142,8 @@ static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
}
-struct xgene_enet_desc_ring *xgene_enet_setup_ring(
- struct xgene_enet_desc_ring *ring)
+static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
+ struct xgene_enet_desc_ring *ring)
{
u32 size = ring->size;
u32 i, data;
@@ -168,7 +169,7 @@ struct xgene_enet_desc_ring *xgene_enet_setup_ring(
return ring;
}
-void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
+static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
{
u32 data;
bool is_bufpool;
@@ -186,6 +187,22 @@ out:
xgene_enet_clr_ring_state(ring);
}
+static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
+{
+ iowrite32(count, ring->cmd);
+}
+
+static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
+{
+ u32 __iomem *cmd_base = ring->cmd_base;
+ u32 ring_state, num_msgs;
+
+ ring_state = ioread32(&cmd_base[1]);
+ num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
+
+ return num_msgs;
+}
+
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status)
@@ -593,7 +610,7 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
if (!xgene_ring_mgr_init(pdata))
return -ENODEV;
- if (pdata->clk) {
+ if (!IS_ERR(pdata->clk)) {
clk_prepare_enable(pdata->clk);
clk_disable_unprepare(pdata->clk);
clk_prepare_enable(pdata->clk);
@@ -612,7 +629,8 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
{
- clk_disable_unprepare(pdata->clk);
+ if (!IS_ERR(pdata->clk))
+ clk_disable_unprepare(pdata->clk);
}
static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
@@ -734,7 +752,7 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
if (ret)
return -EINVAL;
- phy = get_phy_device(mdio, phy_id, true);
+ phy = get_phy_device(mdio, phy_id, false);
if (!phy || IS_ERR(phy))
return -EIO;
@@ -783,6 +801,9 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
{
+ if (pdata->phy_dev)
+ phy_disconnect(pdata->phy_dev);
+
mdiobus_unregister(pdata->mdio_bus);
mdiobus_free(pdata->mdio_bus);
pdata->mdio_bus = NULL;
@@ -803,3 +824,12 @@ struct xgene_port_ops xgene_gport_ops = {
.cle_bypass = xgene_enet_cle_bypass,
.shutdown = xgene_gport_shutdown,
};
+
+struct xgene_ring_ops xgene_ring1_ops = {
+ .num_ring_config = NUM_RING_CONFIG,
+ .num_ring_id_shift = 6,
+ .setup = xgene_enet_setup_ring,
+ .clear = xgene_enet_clear_ring,
+ .wr_cmd = xgene_enet_wr_cmd,
+ .len = xgene_enet_ring_len,
+};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index d9bc89d69..541bed056 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -26,6 +26,7 @@
struct xgene_enet_pdata;
struct xgene_enet_stats;
+struct xgene_enet_desc_ring;
/* clears and then set bits */
static inline void xgene_set_bits(u32 *dst, u32 val, u32 start, u32 len)
@@ -101,8 +102,8 @@ enum xgene_enet_rm {
#define BLOCK_ETH_CSR_OFFSET 0x2000
#define BLOCK_ETH_RING_IF_OFFSET 0x9000
+#define BLOCK_ETH_CLKRST_CSR_OFFSET 0xc000
#define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000
-
#define BLOCK_ETH_MAC_OFFSET 0x0000
#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800
@@ -261,6 +262,7 @@ enum xgene_enet_ring_type {
enum xgene_ring_owner {
RING_OWNER_ETH0,
+ RING_OWNER_ETH1,
RING_OWNER_CPU = 15,
RING_OWNER_INVALID
};
@@ -314,9 +316,6 @@ static inline u16 xgene_enet_get_numslots(u16 id, u32 size)
size / WORK_DESC_SIZE;
}
-struct xgene_enet_desc_ring *xgene_enet_setup_ring(
- struct xgene_enet_desc_ring *ring);
-void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring);
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status);
@@ -327,5 +326,6 @@ bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
extern struct xgene_mac_ops xgene_gmac_ops;
extern struct xgene_port_ops xgene_gport_ops;
+extern struct xgene_ring_ops xgene_ring1_ops;
#endif /* __XGENE_ENET_HW_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 40d3530d7..a02ea7f8f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -28,6 +28,9 @@
#define RES_RING_CSR 1
#define RES_RING_CMD 2
+static const struct of_device_id xgene_enet_of_match[];
+static const struct acpi_device_id xgene_enet_acpi_match[];
+
static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
struct xgene_enet_raw_desc16 *raw_desc;
@@ -48,6 +51,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
{
struct sk_buff *skb;
struct xgene_enet_raw_desc16 *raw_desc;
+ struct xgene_enet_pdata *pdata;
struct net_device *ndev;
struct device *dev;
dma_addr_t dma_addr;
@@ -58,6 +62,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
ndev = buf_pool->ndev;
dev = ndev_to_dev(buf_pool->ndev);
+ pdata = netdev_priv(ndev);
bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
len = XGENE_ENET_MAX_MTU;
@@ -82,7 +87,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
tail = (tail + 1) & slots;
}
- iowrite32(nbuf, buf_pool->cmd);
+ pdata->ring_ops->wr_cmd(buf_pool, nbuf);
buf_pool->tail = tail;
return 0;
@@ -102,26 +107,16 @@ static u8 xgene_enet_hdr_len(const void *data)
return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
}
-static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
-{
- u32 __iomem *cmd_base = ring->cmd_base;
- u32 ring_state, num_msgs;
-
- ring_state = ioread32(&cmd_base[1]);
- num_msgs = ring_state & CREATE_MASK(NUMMSGSINQ_POS, NUMMSGSINQ_LEN);
-
- return num_msgs >> NUMMSGSINQ_POS;
-}
-
static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
+ struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
struct xgene_enet_raw_desc16 *raw_desc;
u32 slots = buf_pool->slots - 1;
u32 tail = buf_pool->tail;
u32 userinfo;
int i, len;
- len = xgene_enet_ring_len(buf_pool);
+ len = pdata->ring_ops->len(buf_pool);
for (i = 0; i < len; i++) {
tail = (tail - 1) & slots;
raw_desc = &buf_pool->raw_desc16[tail];
@@ -131,7 +126,7 @@ static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
}
- iowrite32(-len, buf_pool->cmd);
+ pdata->ring_ops->wr_cmd(buf_pool, -len);
buf_pool->tail = tail;
}
@@ -263,8 +258,8 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
u32 tx_level, cq_level;
- tx_level = xgene_enet_ring_len(tx_ring);
- cq_level = xgene_enet_ring_len(cp_ring);
+ tx_level = pdata->ring_ops->len(tx_ring);
+ cq_level = pdata->ring_ops->len(cp_ring);
if (unlikely(tx_level > pdata->tx_qcnt_hi ||
cq_level > pdata->cp_qcnt_hi)) {
netif_stop_queue(ndev);
@@ -276,7 +271,7 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- iowrite32(1, tx_ring->cmd);
+ pdata->ring_ops->wr_cmd(tx_ring, 1);
skb_tx_timestamp(skb);
tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
@@ -389,11 +384,11 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
} while (--budget);
if (likely(count)) {
- iowrite32(-count, ring->cmd);
+ pdata->ring_ops->wr_cmd(ring, -count);
ring->head = head;
if (netif_queue_stopped(ring->ndev)) {
- if (xgene_enet_ring_len(ring) < pdata->cp_qcnt_low)
+ if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low)
netif_wake_queue(ring->ndev);
}
}
@@ -510,6 +505,7 @@ static int xgene_enet_open(struct net_device *ndev)
else
schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
+ netif_carrier_off(ndev);
netif_start_queue(ndev);
return ret;
@@ -545,7 +541,7 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
pdata = netdev_priv(ring->ndev);
dev = ndev_to_dev(ring->ndev);
- xgene_enet_clear_ring(ring);
+ pdata->ring_ops->clear(ring);
dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
}
@@ -598,15 +594,17 @@ static int xgene_enet_get_ring_size(struct device *dev,
static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
{
+ struct xgene_enet_pdata *pdata;
struct device *dev;
if (!ring)
return;
dev = ndev_to_dev(ring->ndev);
+ pdata = netdev_priv(ring->ndev);
if (ring->desc_addr) {
- xgene_enet_clear_ring(ring);
+ pdata->ring_ops->clear(ring);
dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
}
devm_kfree(dev, ring);
@@ -637,6 +635,25 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
}
}
+static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_desc_ring *ring)
+{
+ if ((pdata->enet_id == XGENE_ENET2) &&
+ (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
+ return true;
+ }
+
+ return false;
+}
+
+static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_desc_ring *ring)
+{
+ u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
+
+ return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
+}
+
static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
struct net_device *ndev, u32 ring_num,
enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
@@ -668,9 +685,20 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
}
ring->size = size;
- ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6);
+ if (is_irq_mbox_required(pdata, ring)) {
+ ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
+ &ring->irq_mbox_dma, GFP_KERNEL);
+ if (!ring->irq_mbox_addr) {
+ dma_free_coherent(dev, size, ring->desc_addr,
+ ring->dma);
+ devm_kfree(dev, ring);
+ return NULL;
+ }
+ }
+
+ ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
- ring = xgene_enet_setup_ring(ring);
+ ring = pdata->ring_ops->setup(ring);
netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
ring->num, ring->size, ring->id, ring->slots);
@@ -682,12 +710,34 @@ static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
return (owner << 6) | (bufnum & GENMASK(5, 0));
}
+static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
+{
+ enum xgene_ring_owner owner;
+
+ if (p->enet_id == XGENE_ENET1) {
+ switch (p->phy_mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ owner = RING_OWNER_ETH0;
+ break;
+ default:
+ owner = (!p->port_id) ? RING_OWNER_ETH0 :
+ RING_OWNER_ETH1;
+ break;
+ }
+ } else {
+ owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
+ }
+
+ return owner;
+}
+
static int xgene_enet_create_desc_rings(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct device *dev = ndev_to_dev(ndev);
struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
struct xgene_enet_desc_ring *buf_pool = NULL;
+ enum xgene_ring_owner owner;
u8 cpu_bufnum = pdata->cpu_bufnum;
u8 eth_bufnum = pdata->eth_bufnum;
u8 bp_bufnum = pdata->bp_bufnum;
@@ -696,6 +746,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
int ret;
/* allocate rx descriptor ring */
+ owner = xgene_derive_ring_owner(pdata);
ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_16KB, ring_id);
@@ -705,7 +756,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
}
/* allocate buffer pool for receiving packets */
- ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++);
+ owner = xgene_derive_ring_owner(pdata);
+ ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_2KB, ring_id);
if (!buf_pool) {
@@ -734,7 +786,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
pdata->rx_ring = rx_ring;
/* allocate tx descriptor ring */
- ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++);
+ owner = xgene_derive_ring_owner(pdata);
+ ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_16KB, ring_id);
if (!tx_ring) {
@@ -818,20 +871,38 @@ static const struct net_device_ops xgene_ndev_ops = {
.ndo_set_mac_address = xgene_enet_set_mac_address,
};
-static int xgene_get_port_id(struct device *dev, struct xgene_enet_pdata *pdata)
+#ifdef CONFIG_ACPI
+static int xgene_get_port_id_acpi(struct device *dev,
+ struct xgene_enet_pdata *pdata)
+{
+ acpi_status status;
+ u64 temp;
+
+ status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
+ if (ACPI_FAILURE(status)) {
+ pdata->port_id = 0;
+ } else {
+ pdata->port_id = temp;
+ }
+
+ return 0;
+}
+#endif
+
+static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
{
u32 id = 0;
int ret;
- ret = device_property_read_u32(dev, "port-id", &id);
- if (!ret && id > 1) {
- dev_err(dev, "Incorrect port-id specified\n");
- return -ENODEV;
+ ret = of_property_read_u32(dev->of_node, "port-id", &id);
+ if (ret) {
+ pdata->port_id = 0;
+ ret = 0;
+ } else {
+ pdata->port_id = id & BIT(0);
}
- pdata->port_id = id;
-
- return 0;
+ return ret;
}
static int xgene_get_mac_address(struct device *dev,
@@ -876,7 +947,8 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
struct device *dev;
struct resource *res;
void __iomem *base_addr;
- int ret;
+ u32 offset;
+ int ret = 0;
pdev = pdata->pdev;
dev = &pdev->dev;
@@ -917,7 +989,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
return -ENOMEM;
}
- ret = xgene_get_port_id(dev, pdata);
+ if (dev->of_node)
+ ret = xgene_get_port_id_dt(dev, pdata);
+#ifdef CONFIG_ACPI
+ else
+ ret = xgene_get_port_id_acpi(dev, pdata);
+#endif
if (ret)
return ret;
@@ -949,27 +1026,35 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) {
ret = platform_get_irq(pdev, 1);
if (ret <= 0) {
- dev_err(dev, "Unable to get ENET Tx completion IRQ\n");
- ret = ret ? : -ENXIO;
- return ret;
+ pdata->cq_cnt = 0;
+ dev_info(dev, "Unable to get Tx completion IRQ,"
+ "using Rx IRQ instead\n");
+ } else {
+ pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
+ pdata->txc_irq = ret;
}
- pdata->txc_irq = ret;
}
pdata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk)) {
/* Firmware may have set up the clock already. */
- pdata->clk = NULL;
+ dev_info(dev, "clocks have been setup already\n");
}
- base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
+ base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
+ else
+ base_addr = pdata->base_addr;
pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
- pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
+ offset = (pdata->enet_id == XGENE_ENET1) ?
+ BLOCK_ETH_MAC_CSR_OFFSET :
+ X2_BLOCK_ETH_MAC_CSR_OFFSET;
+ pdata->mcx_mac_csr_addr = base_addr + offset;
} else {
pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
@@ -1024,33 +1109,52 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
pdata->mac_ops = &xgene_sgmac_ops;
pdata->port_ops = &xgene_sgport_ops;
pdata->rm = RM1;
- pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
break;
default:
pdata->mac_ops = &xgene_xgmac_ops;
pdata->port_ops = &xgene_xgport_ops;
pdata->rm = RM0;
- pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
break;
}
- switch (pdata->port_id) {
- case 0:
- pdata->cpu_bufnum = START_CPU_BUFNUM_0;
- pdata->eth_bufnum = START_ETH_BUFNUM_0;
- pdata->bp_bufnum = START_BP_BUFNUM_0;
- pdata->ring_num = START_RING_NUM_0;
- break;
- case 1:
- pdata->cpu_bufnum = START_CPU_BUFNUM_1;
- pdata->eth_bufnum = START_ETH_BUFNUM_1;
- pdata->bp_bufnum = START_BP_BUFNUM_1;
- pdata->ring_num = START_RING_NUM_1;
- break;
- default:
- break;
+ if (pdata->enet_id == XGENE_ENET1) {
+ switch (pdata->port_id) {
+ case 0:
+ pdata->cpu_bufnum = START_CPU_BUFNUM_0;
+ pdata->eth_bufnum = START_ETH_BUFNUM_0;
+ pdata->bp_bufnum = START_BP_BUFNUM_0;
+ pdata->ring_num = START_RING_NUM_0;
+ break;
+ case 1:
+ pdata->cpu_bufnum = START_CPU_BUFNUM_1;
+ pdata->eth_bufnum = START_ETH_BUFNUM_1;
+ pdata->bp_bufnum = START_BP_BUFNUM_1;
+ pdata->ring_num = START_RING_NUM_1;
+ break;
+ default:
+ break;
+ }
+ pdata->ring_ops = &xgene_ring1_ops;
+ } else {
+ switch (pdata->port_id) {
+ case 0:
+ pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
+ pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
+ pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
+ pdata->ring_num = X2_START_RING_NUM_0;
+ break;
+ case 1:
+ pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
+ pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
+ pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
+ pdata->ring_num = X2_START_RING_NUM_1;
+ break;
+ default:
+ break;
+ }
+ pdata->rm = RM0;
+ pdata->ring_ops = &xgene_ring2_ops;
}
-
}
static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
@@ -1086,6 +1190,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
struct xgene_enet_pdata *pdata;
struct device *dev = &pdev->dev;
struct xgene_mac_ops *mac_ops;
+ const struct of_device_id *of_id;
int ret;
ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
@@ -1104,6 +1209,24 @@ static int xgene_enet_probe(struct platform_device *pdev)
NETIF_F_GSO |
NETIF_F_GRO;
+ of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
+ if (of_id) {
+ pdata->enet_id = (enum xgene_enet_id)of_id->data;
+ }
+#ifdef CONFIG_ACPI
+ else {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
+ if (acpi_id)
+ pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
+ }
+#endif
+ if (!pdata->enet_id) {
+ free_netdev(ndev);
+ return -ENODEV;
+ }
+
ret = xgene_enet_get_resources(pdata);
if (ret)
goto err;
@@ -1154,9 +1277,10 @@ static int xgene_enet_remove(struct platform_device *pdev)
mac_ops->tx_disable(pdata);
xgene_enet_napi_del(pdata);
- xgene_enet_mdio_remove(pdata);
- xgene_enet_delete_desc_rings(pdata);
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ xgene_enet_mdio_remove(pdata);
unregister_netdev(ndev);
+ xgene_enet_delete_desc_rings(pdata);
pdata->port_ops->shutdown(pdata);
free_netdev(ndev);
@@ -1165,9 +1289,11 @@ static int xgene_enet_remove(struct platform_device *pdev)
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgene_enet_acpi_match[] = {
- { "APMC0D05", },
- { "APMC0D30", },
- { "APMC0D31", },
+ { "APMC0D05", XGENE_ENET1},
+ { "APMC0D30", XGENE_ENET1},
+ { "APMC0D31", XGENE_ENET1},
+ { "APMC0D26", XGENE_ENET2},
+ { "APMC0D25", XGENE_ENET2},
{ }
};
MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
@@ -1175,9 +1301,11 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
#ifdef CONFIG_OF
static const struct of_device_id xgene_enet_of_match[] = {
- {.compatible = "apm,xgene-enet",},
- {.compatible = "apm,xgene1-sgenet",},
- {.compatible = "apm,xgene1-xgenet",},
+ {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
+ {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
{},
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 8f3d232b0..1c85fc877 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -35,6 +35,7 @@
#include <linux/if_vlan.h>
#include <linux/phy.h>
#include "xgene_enet_hw.h"
+#include "xgene_enet_ring2.h"
#define XGENE_DRV_VERSION "v1.0"
#define XGENE_ENET_MAX_MTU 1536
@@ -51,12 +52,26 @@
#define START_BP_BUFNUM_1 0x2A
#define START_RING_NUM_1 264
+#define X2_START_CPU_BUFNUM_0 0
+#define X2_START_ETH_BUFNUM_0 0
+#define X2_START_BP_BUFNUM_0 0x20
+#define X2_START_RING_NUM_0 0
+#define X2_START_CPU_BUFNUM_1 0xc
+#define X2_START_ETH_BUFNUM_1 0
+#define X2_START_BP_BUFNUM_1 0x20
+#define X2_START_RING_NUM_1 256
+
#define IRQ_ID_SIZE 16
#define XGENE_MAX_TXC_RINGS 1
#define PHY_POLL_LINK_ON (10 * HZ)
#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
+enum xgene_enet_id {
+ XGENE_ENET1 = 1,
+ XGENE_ENET2
+};
+
/* software context of a descriptor ring */
struct xgene_enet_desc_ring {
struct net_device *ndev;
@@ -68,10 +83,12 @@ struct xgene_enet_desc_ring {
u16 irq;
char irq_name[IRQ_ID_SIZE];
u32 size;
- u32 state[NUM_RING_CONFIG];
+ u32 state[X2_NUM_RING_CONFIG];
void __iomem *cmd_base;
void __iomem *cmd;
dma_addr_t dma;
+ dma_addr_t irq_mbox_dma;
+ void *irq_mbox_addr;
u16 dst_ring_num;
u8 nbufpool;
struct sk_buff *(*rx_skb);
@@ -105,6 +122,15 @@ struct xgene_port_ops {
void (*shutdown)(struct xgene_enet_pdata *pdata);
};
+struct xgene_ring_ops {
+ u8 num_ring_config;
+ u8 num_ring_id_shift;
+ struct xgene_enet_desc_ring * (*setup)(struct xgene_enet_desc_ring *);
+ void (*clear)(struct xgene_enet_desc_ring *);
+ void (*wr_cmd)(struct xgene_enet_desc_ring *, int);
+ u32 (*len)(struct xgene_enet_desc_ring *);
+};
+
/* ethernet private data */
struct xgene_enet_pdata {
struct net_device *ndev;
@@ -113,6 +139,7 @@ struct xgene_enet_pdata {
int phy_speed;
struct clk *clk;
struct platform_device *pdev;
+ enum xgene_enet_id enet_id;
struct xgene_enet_desc_ring *tx_ring;
struct xgene_enet_desc_ring *rx_ring;
char *dev_name;
@@ -136,6 +163,7 @@ struct xgene_enet_pdata {
struct rtnl_link_stats64 stats;
struct xgene_mac_ops *mac_ops;
struct xgene_port_ops *port_ops;
+ struct xgene_ring_ops *ring_ops;
struct delayed_work link_work;
u32 port_id;
u8 cpu_bufnum;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
new file mode 100644
index 000000000..0b6896bb3
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
@@ -0,0 +1,200 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xgene_enet_main.h"
+#include "xgene_enet_hw.h"
+#include "xgene_enet_ring2.h"
+
+static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
+{
+ u32 *ring_cfg = ring->state;
+ u64 addr = ring->dma;
+
+ if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
+ ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
+ ring_cfg[3] |= SET_BIT(X2_DEQINTEN);
+ }
+ ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1);
+
+ addr >>= 8;
+ ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr);
+
+ addr >>= 27;
+ ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize)
+ | ACCEPTLERR
+ | SET_VAL(RINGADDRH, addr);
+ ring_cfg[4] |= SET_VAL(X2_SELTHRSH, 1);
+ ring_cfg[5] |= SET_BIT(X2_QBASE_AM) | SET_BIT(X2_MSG_AM);
+}
+
+static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
+{
+ u32 *ring_cfg = ring->state;
+ bool is_bufpool;
+ u32 val;
+
+ is_bufpool = xgene_enet_is_bufpool(ring->id);
+ val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
+ ring_cfg[4] |= SET_VAL(X2_RINGTYPE, val);
+ if (is_bufpool)
+ ring_cfg[3] |= SET_VAL(RINGMODE, BUFPOOL_MODE);
+}
+
+static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
+{
+ u32 *ring_cfg = ring->state;
+
+ ring_cfg[3] |= RECOMBBUF;
+ ring_cfg[4] |= SET_VAL(X2_RECOMTIMEOUT, 0x7);
+}
+
+static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
+ u32 offset, u32 data)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+
+ iowrite32(data, pdata->ring_csr_addr + offset);
+}
+
+static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+ int i;
+
+ xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
+ for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
+ xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
+ ring->state[i]);
+ }
+}
+
+static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
+{
+ memset(ring->state, 0, sizeof(ring->state));
+ xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
+{
+ enum xgene_ring_owner owner;
+
+ xgene_enet_ring_set_type(ring);
+
+ owner = xgene_enet_ring_owner(ring->id);
+ if (owner == RING_OWNER_ETH0 || owner == RING_OWNER_ETH1)
+ xgene_enet_ring_set_recombbuf(ring);
+
+ xgene_enet_ring_init(ring);
+ xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
+{
+ u32 ring_id_val, ring_id_buf;
+ bool is_bufpool;
+
+ if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)
+ return;
+
+ is_bufpool = xgene_enet_is_bufpool(ring->id);
+
+ ring_id_val = ring->id & GENMASK(9, 0);
+ ring_id_val |= OVERWRITE;
+
+ ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
+ ring_id_buf |= PREFETCH_BUF_EN;
+ if (is_bufpool)
+ ring_id_buf |= IS_BUFFER_POOL;
+
+ xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
+ xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
+}
+
+static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
+{
+ u32 ring_id;
+
+ ring_id = ring->id | OVERWRITE;
+ xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
+ xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
+}
+
+static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
+ struct xgene_enet_desc_ring *ring)
+{
+ bool is_bufpool;
+ u32 addr, i;
+
+ xgene_enet_clr_ring_state(ring);
+ xgene_enet_set_ring_state(ring);
+ xgene_enet_set_ring_id(ring);
+
+ ring->slots = xgene_enet_get_numslots(ring->id, ring->size);
+
+ is_bufpool = xgene_enet_is_bufpool(ring->id);
+ if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
+ return ring;
+
+ addr = CSR_VMID0_INTR_MBOX + (4 * (ring->id & RING_BUFNUM_MASK));
+ xgene_enet_ring_wr32(ring, addr, ring->irq_mbox_dma >> 10);
+
+ for (i = 0; i < ring->slots; i++)
+ xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
+
+ return ring;
+}
+
+static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
+{
+ xgene_enet_clr_desc_ring_id(ring);
+ xgene_enet_clr_ring_state(ring);
+}
+
+static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
+{
+ u32 data = 0;
+
+ if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
+ data = SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK) |
+ INTR_CLEAR;
+ }
+ data |= (count & GENMASK(16, 0));
+
+ iowrite32(data, ring->cmd);
+}
+
+static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
+{
+ u32 __iomem *cmd_base = ring->cmd_base;
+ u32 ring_state, num_msgs;
+
+ ring_state = ioread32(&cmd_base[1]);
+ num_msgs = GET_VAL(X2_NUMMSGSINQ, ring_state);
+
+ return num_msgs;
+}
+
+struct xgene_ring_ops xgene_ring2_ops = {
+ .num_ring_config = X2_NUM_RING_CONFIG,
+ .num_ring_id_shift = 13,
+ .setup = xgene_enet_setup_ring,
+ .clear = xgene_enet_clear_ring,
+ .wr_cmd = xgene_enet_wr_cmd,
+ .len = xgene_enet_ring_len,
+};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h
new file mode 100644
index 000000000..8b235db23
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h
@@ -0,0 +1,49 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_RING2_H__
+#define __XGENE_ENET_RING2_H__
+
+#include "xgene_enet_main.h"
+
+#define X2_NUM_RING_CONFIG 6
+
+#define INTR_MBOX_SIZE 1024
+#define CSR_VMID0_INTR_MBOX 0x0270
+#define INTR_CLEAR BIT(23)
+
+#define X2_MSG_AM_POS 10
+#define X2_QBASE_AM_POS 11
+#define X2_INTLINE_POS 24
+#define X2_INTLINE_LEN 5
+#define X2_CFGCRID_POS 29
+#define X2_CFGCRID_LEN 3
+#define X2_SELTHRSH_POS 7
+#define X2_SELTHRSH_LEN 3
+#define X2_RINGTYPE_POS 23
+#define X2_RINGTYPE_LEN 2
+#define X2_DEQINTEN_POS 29
+#define X2_RECOMTIMEOUT_POS 0
+#define X2_RECOMTIMEOUT_LEN 7
+#define X2_NUMMSGSINQ_POS 0
+#define X2_NUMMSGSINQ_LEN 17
+
+extern struct xgene_ring_ops xgene_ring2_ops;
+
+#endif /* __XGENE_ENET_RING2_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index f27fb6f2a..05b817e56 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -21,6 +21,7 @@
#include "xgene_enet_main.h"
#include "xgene_enet_hw.h"
#include "xgene_enet_sgmac.h"
+#include "xgene_enet_xgmac.h"
static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
{
@@ -39,6 +40,14 @@ static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p,
iowrite32(val, p->eth_diag_csr_addr + offset);
}
+static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
+ u32 offset, u32 val)
+{
+ void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
+
+ iowrite32(val, addr);
+}
+
static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl,
u32 wr_addr, u32 wr_data)
{
@@ -140,8 +149,9 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
{
- u32 val = 0xffffffff;
+ u32 val;
+ val = (p->enet_id == XGENE_ENET1) ? 0xffffffff : 0;
xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
}
@@ -227,6 +237,8 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
{
u32 data, loop = 10;
u32 offset = p->port_id * 4;
+ u32 enet_spare_cfg_reg, rsif_config_reg;
+ u32 cfg_bypass_reg, rx_dv_gate_reg;
xgene_sgmac_reset(p);
@@ -239,7 +251,7 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
SGMII_STATUS_ADDR >> 2);
if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS))
break;
- usleep_range(10, 20);
+ usleep_range(1000, 2000);
}
if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
netdev_err(p->ndev, "Auto-negotiation failed\n");
@@ -249,33 +261,38 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2);
xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE);
- data = xgene_enet_rd_csr(p, ENET_SPARE_CFG_REG_ADDR);
+ if (p->enet_id == XGENE_ENET1) {
+ enet_spare_cfg_reg = ENET_SPARE_CFG_REG_ADDR;
+ rsif_config_reg = RSIF_CONFIG_REG_ADDR;
+ cfg_bypass_reg = CFG_BYPASS_ADDR;
+ rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR;
+ } else {
+ enet_spare_cfg_reg = XG_ENET_SPARE_CFG_REG_ADDR;
+ rsif_config_reg = XG_RSIF_CONFIG_REG_ADDR;
+ cfg_bypass_reg = XG_CFG_BYPASS_ADDR;
+ rx_dv_gate_reg = XG_MCX_RX_DV_GATE_REG_0_ADDR;
+ }
+
+ data = xgene_enet_rd_csr(p, enet_spare_cfg_reg);
data |= MPA_IDLE_WITH_QMI_EMPTY;
- xgene_enet_wr_csr(p, ENET_SPARE_CFG_REG_ADDR, data);
+ xgene_enet_wr_csr(p, enet_spare_cfg_reg, data);
xgene_sgmac_set_mac_addr(p);
- data = xgene_enet_rd_csr(p, DEBUG_REG_ADDR);
- data |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
- xgene_enet_wr_csr(p, DEBUG_REG_ADDR, data);
-
/* Adjust MDC clock frequency */
data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
MGMT_CLOCK_SEL_SET(&data, 7);
xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
/* Enable drop if bufpool not available */
- data = xgene_enet_rd_csr(p, RSIF_CONFIG_REG_ADDR);
+ data = xgene_enet_rd_csr(p, rsif_config_reg);
data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
- xgene_enet_wr_csr(p, RSIF_CONFIG_REG_ADDR, data);
-
- /* Rtype should be copied from FP */
- xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0);
+ xgene_enet_wr_csr(p, rsif_config_reg, data);
/* Bypass traffic gating */
- xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR + offset, TX_PORT0);
- xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX);
- xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR + offset, RESUME_RX0);
+ xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
+ xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
+ xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg + offset, RESUME_RX0);
}
static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
@@ -317,9 +334,11 @@ static int xgene_enet_reset(struct xgene_enet_pdata *p)
if (!xgene_ring_mgr_init(p))
return -ENODEV;
- clk_prepare_enable(p->clk);
- clk_disable_unprepare(p->clk);
- clk_prepare_enable(p->clk);
+ if (!IS_ERR(p->clk)) {
+ clk_prepare_enable(p->clk);
+ clk_disable_unprepare(p->clk);
+ clk_prepare_enable(p->clk);
+ }
xgene_enet_ecc_init(p);
xgene_enet_config_ring_if_assoc(p);
@@ -331,19 +350,29 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
u32 dst_ring_num, u16 bufpool_id)
{
u32 data, fpsel;
+ u32 cle_bypass_reg0, cle_bypass_reg1;
u32 offset = p->port_id * MAC_OFFSET;
+ if (p->enet_id == XGENE_ENET1) {
+ cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR;
+ cle_bypass_reg1 = CLE_BYPASS_REG1_0_ADDR;
+ } else {
+ cle_bypass_reg0 = XCLE_BYPASS_REG0_ADDR;
+ cle_bypass_reg1 = XCLE_BYPASS_REG1_ADDR;
+ }
+
data = CFG_CLE_BYPASS_EN0;
- xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR + offset, data);
+ xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data);
fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel);
- xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR + offset, data);
+ xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
}
static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
{
- clk_disable_unprepare(p->clk);
+ if (!IS_ERR(p->clk))
+ clk_disable_unprepare(p->clk);
}
static void xgene_enet_link_state(struct work_struct *work)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index a18a9d1f1..05edb847c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -122,7 +122,6 @@ static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
return true;
}
-
static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
u32 rd_addr, u32 *rd_data)
{
@@ -257,9 +256,11 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
if (!xgene_ring_mgr_init(pdata))
return -ENODEV;
- clk_prepare_enable(pdata->clk);
- clk_disable_unprepare(pdata->clk);
- clk_prepare_enable(pdata->clk);
+ if (!IS_ERR(pdata->clk)) {
+ clk_prepare_enable(pdata->clk);
+ clk_disable_unprepare(pdata->clk);
+ clk_prepare_enable(pdata->clk);
+ }
xgene_enet_ecc_init(pdata);
xgene_enet_config_ring_if_assoc(pdata);
@@ -286,7 +287,8 @@ static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
{
- clk_disable_unprepare(pdata->clk);
+ if (!IS_ERR(pdata->clk))
+ clk_disable_unprepare(pdata->clk);
}
static void xgene_enet_link_state(struct work_struct *work)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index 5a5296a6d..bf0a99435 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -21,9 +21,28 @@
#ifndef __XGENE_ENET_XGMAC_H__
#define __XGENE_ENET_XGMAC_H__
+#define X2_BLOCK_ETH_MAC_CSR_OFFSET 0x3000
#define BLOCK_AXG_MAC_OFFSET 0x0800
#define BLOCK_AXG_MAC_CSR_OFFSET 0x2000
+#define XGENET_CONFIG_REG_ADDR 0x20
+#define XGENET_SRST_ADDR 0x00
+#define XGENET_CLKEN_ADDR 0x08
+
+#define CSR_CLK BIT(0)
+#define XGENET_CLK BIT(1)
+#define PCS_CLK BIT(3)
+#define AN_REF_CLK BIT(4)
+#define AN_CLK BIT(5)
+#define AD_CLK BIT(6)
+
+#define CSR_RST BIT(0)
+#define XGENET_RST BIT(1)
+#define PCS_RST BIT(3)
+#define AN_REF_RST BIT(4)
+#define AN_RST BIT(5)
+#define AD_RST BIT(6)
+
#define AXGMAC_CONFIG_0 0x0000
#define AXGMAC_CONFIG_1 0x0004
#define HSTMACRST BIT(31)
@@ -38,6 +57,7 @@
#define HSTMACADR_MSW_ADDR 0x0014
#define HSTMAXFRAME_LENGTH_ADDR 0x0020
+#define XG_MCX_RX_DV_GATE_REG_0_ADDR 0x0004
#define XG_RSIF_CONFIG_REG_ADDR 0x00a0
#define XCLE_BYPASS_REG0_ADDR 0x0160
#define XCLE_BYPASS_REG1_ADDR 0x0164
diff --git a/drivers/net/ethernet/apple/Kconfig b/drivers/net/ethernet/apple/Kconfig
index 1375e2dc9..d19a41b0c 100644
--- a/drivers/net/ethernet/apple/Kconfig
+++ b/drivers/net/ethernet/apple/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_APPLE
default y
depends on (PPC_PMAC && PPC32) || MAC
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
@@ -59,7 +57,6 @@ config MACMACE
---help---
Support for the onboard AMD 79C940 MACE Ethernet controller used in
the 660AV and 840AV Macintosh. If you have one of these Macintoshes
- say Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ say Y here.
endif # NET_VENDOR_APPLE
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index dea29ee24..52a6b16f5 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -6,9 +6,7 @@ config NET_VENDOR_ARC
bool "ARC devices"
default y
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 58ad37c73..e05b25675 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_ATHEROS
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index a6f9142b9..8be9eab73 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -26,8 +26,7 @@ config B44
select PHYLIB
---help---
If you have a network (Ethernet) controller of this type, say Y
- or M and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ or M here.
To compile this driver as a module, choose M here. The module
will be called b44.
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index 3e9c3fc75..65d88d7c5 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -1,6 +1,8 @@
#ifndef _B44_H
#define _B44_H
+#include <linux/brcmphy.h>
+
/* Register layout. (These correspond to struct _bcmenettregs in bcm4400.) */
#define B44_DEVCTRL 0x0000UL /* Device Control */
#define DEVCTRL_MPM 0x00000040 /* Magic Packet PME Enable (B0 only) */
@@ -281,8 +283,10 @@ struct ring_info {
};
#define B44_MCAST_TABLE_SIZE 32
-#define B44_PHY_ADDR_NO_LOCAL_PHY 30 /* no local phy regs */
-#define B44_PHY_ADDR_NO_PHY 31 /* no phy present at all */
+/* no local phy regs, e.g: Broadcom switches pseudo-PHY */
+#define B44_PHY_ADDR_NO_LOCAL_PHY BRCM_PSEUDO_PHY_ADDR
+/* no phy present at all */
+#define B44_PHY_ADDR_NO_PHY 31
#define B44_MDC_RATIO 5000000
#define B44_STAT_REG_DECLARE \
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 783543ad1..4566cdf0b 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -456,6 +456,67 @@ static int bcm_sysport_set_wol(struct net_device *dev,
return 0;
}
+static int bcm_sysport_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
+
+ ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
+ ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
+
+ reg = rdma_readl(priv, RDMA_MBDONE_INTR);
+
+ ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
+ ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
+
+ return 0;
+}
+
+static int bcm_sysport_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ unsigned int i;
+ u32 reg;
+
+ /* Base system clock is 125Mhz, DMA timeout is this reference clock
+ * divided by 1024, which yield roughly 8.192 us, our maximum value has
+ * to fit in the RING_TIMEOUT_MASK (16 bits).
+ */
+ if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
+ ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
+ ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
+ ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
+ return -EINVAL;
+
+ if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
+ (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
+ return -EINVAL;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i));
+ reg &= ~(RING_INTR_THRESH_MASK |
+ RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
+ reg |= ec->tx_max_coalesced_frames;
+ reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
+ RING_TIMEOUT_SHIFT;
+ tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i));
+ }
+
+ reg = rdma_readl(priv, RDMA_MBDONE_INTR);
+ reg &= ~(RDMA_INTR_THRESH_MASK |
+ RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
+ reg |= ec->rx_max_coalesced_frames;
+ reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) <<
+ RDMA_TIMEOUT_SHIFT;
+ rdma_writel(priv, reg, RDMA_MBDONE_INTR);
+
+ return 0;
+}
+
static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
{
dev_kfree_skb_any(cb->skb);
@@ -463,67 +524,70 @@ static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
dma_unmap_addr_set(cb, dma_addr, 0);
}
-static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
- struct bcm_sysport_cb *cb)
+static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_cb *cb)
{
struct device *kdev = &priv->pdev->dev;
struct net_device *ndev = priv->netdev;
+ struct sk_buff *skb, *rx_skb;
dma_addr_t mapping;
- int ret;
- cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
- if (!cb->skb) {
+ /* Allocate a new SKB for a new packet */
+ skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
+ if (!skb) {
+ priv->mib.alloc_rx_buff_failed++;
netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
- return -ENOMEM;
+ return NULL;
}
- mapping = dma_map_single(kdev, cb->skb->data,
+ mapping = dma_map_single(kdev, skb->data,
RX_BUF_LENGTH, DMA_FROM_DEVICE);
- ret = dma_mapping_error(kdev, mapping);
- if (ret) {
+ if (dma_mapping_error(kdev, mapping)) {
priv->mib.rx_dma_failed++;
- bcm_sysport_free_cb(cb);
+ dev_kfree_skb_any(skb);
netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
- return ret;
+ return NULL;
}
- dma_unmap_addr_set(cb, dma_addr, mapping);
- dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+ /* Grab the current SKB on the ring */
+ rx_skb = cb->skb;
+ if (likely(rx_skb))
+ dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+ RX_BUF_LENGTH, DMA_FROM_DEVICE);
- priv->rx_bd_assign_index++;
- priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
- priv->rx_bd_assign_ptr = priv->rx_bds +
- (priv->rx_bd_assign_index * DESC_SIZE);
+ /* Put the new SKB on the ring */
+ cb->skb = skb;
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ dma_desc_set_addr(priv, cb->bd_addr, mapping);
netif_dbg(priv, rx_status, ndev, "RX refill\n");
- return 0;
+ /* Return the current SKB to the caller */
+ return rx_skb;
}
static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
{
struct bcm_sysport_cb *cb;
- int ret = 0;
+ struct sk_buff *skb;
unsigned int i;
for (i = 0; i < priv->num_rx_bds; i++) {
- cb = &priv->rx_cbs[priv->rx_bd_assign_index];
- if (cb->skb)
- continue;
-
- ret = bcm_sysport_rx_refill(priv, cb);
- if (ret)
- break;
+ cb = &priv->rx_cbs[i];
+ skb = bcm_sysport_rx_refill(priv, cb);
+ if (skb)
+ dev_kfree_skb(skb);
+ if (!cb->skb)
+ return -ENOMEM;
}
- return ret;
+ return 0;
}
/* Poll the hardware for up to budget packets to process */
static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
unsigned int budget)
{
- struct device *kdev = &priv->pdev->dev;
struct net_device *ndev = priv->netdev;
unsigned int processed = 0, to_process;
struct bcm_sysport_cb *cb;
@@ -531,7 +595,6 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
unsigned int p_index;
u16 len, status;
struct bcm_rsb *rsb;
- int ret;
/* Determine how much we should process since last call */
p_index = rdma_readl(priv, RDMA_PROD_INDEX);
@@ -549,13 +612,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
while ((processed < to_process) && (processed < budget)) {
cb = &priv->rx_cbs[priv->rx_read_ptr];
- skb = cb->skb;
+ skb = bcm_sysport_rx_refill(priv, cb);
- processed++;
- priv->rx_read_ptr++;
-
- if (priv->rx_read_ptr == priv->num_rx_bds)
- priv->rx_read_ptr = 0;
/* We do not have a backing SKB, so we do not a corresponding
* DMA mapping for this incoming packet since
@@ -566,12 +624,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
netif_err(priv, rx_err, ndev, "out of memory!\n");
ndev->stats.rx_dropped++;
ndev->stats.rx_errors++;
- goto refill;
+ goto next;
}
- dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
- RX_BUF_LENGTH, DMA_FROM_DEVICE);
-
/* Extract the Receive Status Block prepended */
rsb = (struct bcm_rsb *)skb->data;
len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
@@ -583,12 +638,20 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
p_index, priv->rx_c_index, priv->rx_read_ptr,
len, status);
+ if (unlikely(len > RX_BUF_LENGTH)) {
+ netif_err(priv, rx_status, ndev, "oversized packet\n");
+ ndev->stats.rx_length_errors++;
+ ndev->stats.rx_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
netif_err(priv, rx_status, ndev, "fragmented packet!\n");
ndev->stats.rx_dropped++;
ndev->stats.rx_errors++;
- bcm_sysport_free_cb(cb);
- goto refill;
+ dev_kfree_skb_any(skb);
+ goto next;
}
if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
@@ -597,8 +660,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
ndev->stats.rx_over_errors++;
ndev->stats.rx_dropped++;
ndev->stats.rx_errors++;
- bcm_sysport_free_cb(cb);
- goto refill;
+ dev_kfree_skb_any(skb);
+ goto next;
}
skb_put(skb, len);
@@ -625,10 +688,12 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
ndev->stats.rx_bytes += len;
napi_gro_receive(&priv->napi, skb);
-refill:
- ret = bcm_sysport_rx_refill(priv, cb);
- if (ret)
- priv->mib.alloc_rx_buff_failed++;
+next:
+ processed++;
+ priv->rx_read_ptr++;
+
+ if (priv->rx_read_ptr == priv->num_rx_bds)
+ priv->rx_read_ptr = 0;
}
return processed;
@@ -1269,14 +1334,14 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
{
+ struct bcm_sysport_cb *cb;
u32 reg;
int ret;
+ int i;
/* Initialize SW view of the RX ring */
priv->num_rx_bds = NUM_RX_DESC;
priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
- priv->rx_bd_assign_ptr = priv->rx_bds;
- priv->rx_bd_assign_index = 0;
priv->rx_c_index = 0;
priv->rx_read_ptr = 0;
priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
@@ -1286,6 +1351,11 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
return -ENOMEM;
}
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = priv->rx_cbs + i;
+ cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
+ }
+
ret = bcm_sysport_alloc_rx_bufs(priv);
if (ret) {
netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
@@ -1641,6 +1711,8 @@ static struct ethtool_ops bcm_sysport_ethtool_ops = {
.get_sset_count = bcm_sysport_get_sset_count,
.get_wol = bcm_sysport_get_wol,
.set_wol = bcm_sysport_set_wol,
+ .get_coalesce = bcm_sysport_get_coalesce,
+ .set_coalesce = bcm_sysport_set_coalesce,
};
static const struct net_device_ops bcm_sysport_netdev_ops = {
@@ -1721,7 +1793,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
macaddr = of_get_mac_address(dn);
if (!macaddr || !is_valid_ether_addr(macaddr)) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
} else {
ether_addr_copy(dev->dev_addr, macaddr);
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index e2c043eab..f28bf545d 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -292,7 +292,7 @@ struct bcm_rsb {
#define RDMA_END_ADDR_LO 0x102c
#define RDMA_MBDONE_INTR 0x1030
-#define RDMA_INTR_THRESH_MASK 0xff
+#define RDMA_INTR_THRESH_MASK 0x1ff
#define RDMA_TIMEOUT_SHIFT 16
#define RDMA_TIMEOUT_MASK 0xffff
@@ -663,8 +663,6 @@ struct bcm_sysport_priv {
/* Receive queue */
void __iomem *rx_bds;
- void __iomem *rx_bd_assign_ptr;
- unsigned int rx_bd_assign_index;
struct bcm_sysport_cb *rx_cbs;
unsigned int num_rx_bds;
unsigned int rx_read_ptr;
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index db27febbb..4fbb093e0 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -13,6 +13,7 @@
dev_dbg(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
#include <linux/bcma/bcma.h>
+#include <linux/brcmphy.h>
#include <linux/netdevice.h>
#define BGMAC_DEV_CTL 0x000
@@ -349,7 +350,7 @@
#define BGMAC_DESC_CTL0_SOF 0x80000000 /* Start of frame */
#define BGMAC_DESC_CTL1_LEN 0x00001FFF
-#define BGMAC_PHY_NOREGS 0x1E
+#define BGMAC_PHY_NOREGS BRCM_PSEUDO_PHY_ADDR
#define BGMAC_PHY_MASK 0x1F
#define BGMAC_MAX_TX_RINGS 4
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 1f82a04ce..cd4ae76bb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -357,6 +357,7 @@ struct sw_tx_bd {
struct sw_rx_page {
struct page *page;
DEFINE_DMA_UNMAP_ADDR(mapping);
+ unsigned int offset;
};
union db_prod {
@@ -381,9 +382,10 @@ union db_prod {
#define PAGES_PER_SGE_SHIFT 0
#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
-#define SGE_PAGE_SIZE PAGE_SIZE
-#define SGE_PAGE_SHIFT PAGE_SHIFT
-#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+#define SGE_PAGE_SHIFT 12
+#define SGE_PAGE_SIZE (1 << SGE_PAGE_SHIFT)
+#define SGE_PAGE_MASK (~(SGE_PAGE_SIZE - 1))
+#define SGE_PAGE_ALIGN(addr) (((addr) + SGE_PAGE_SIZE - 1) & SGE_PAGE_MASK)
#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE)
#define TPA_AGG_SIZE min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \
SGE_PAGES), 0xffff)
@@ -526,6 +528,11 @@ enum bnx2x_tpa_mode_t {
TPA_MODE_GRO
};
+struct bnx2x_alloc_pool {
+ struct page *page;
+ unsigned int offset;
+};
+
struct bnx2x_fastpath {
struct bnx2x *bp; /* parent */
@@ -599,6 +606,8 @@ struct bnx2x_fastpath {
4 (for the digits and to make it DWORD aligned) */
#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
char name[FP_NAME_SIZE];
+
+ struct bnx2x_alloc_pool page_pool;
};
#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var)
@@ -2408,10 +2417,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
-#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
- AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
- AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
- AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+#define HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD \
+ (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
+
+#define HW_PRTY_ASSERT_SET_3 (HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index c31f25cde..5ba60a7cf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -262,9 +262,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
if (likely(skb)) {
(*pkts_compl)++;
(*bytes_compl) += skb->len;
+ dev_kfree_skb_any(skb);
}
- dev_kfree_skb_any(skb);
tx_buf->first_bd = 0;
tx_buf->skb = NULL;
@@ -544,30 +544,46 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
u16 index, gfp_t gfp_mask)
{
- struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+ struct bnx2x_alloc_pool *pool = &fp->page_pool;
dma_addr_t mapping;
- if (unlikely(page == NULL)) {
- BNX2X_ERR("Can't alloc sge\n");
- return -ENOMEM;
+ if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
+
+ /* put page reference used by the memory pool, since we
+ * won't be using this page as the mempool anymore.
+ */
+ if (pool->page)
+ put_page(pool->page);
+
+ pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
+ if (unlikely(!pool->page)) {
+ BNX2X_ERR("Can't alloc sge\n");
+ return -ENOMEM;
+ }
+
+ pool->offset = 0;
}
- mapping = dma_map_page(&bp->pdev->dev, page, 0,
- SGE_PAGES, DMA_FROM_DEVICE);
+ mapping = dma_map_page(&bp->pdev->dev, pool->page,
+ pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
- __free_pages(page, PAGES_PER_SGE_SHIFT);
BNX2X_ERR("Can't map sge\n");
return -ENOMEM;
}
- sw_buf->page = page;
+ get_page(pool->page);
+ sw_buf->page = pool->page;
+ sw_buf->offset = pool->offset;
+
dma_unmap_addr_set(sw_buf, mapping, mapping);
sge->addr_hi = cpu_to_le32(U64_HI(mapping));
sge->addr_lo = cpu_to_le32(U64_LO(mapping));
+ pool->offset += SGE_PAGE_SIZE;
+
return 0;
}
@@ -629,20 +645,22 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return err;
}
- /* Unmap the page as we're going to pass it to the stack */
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(&old_rx_pg, mapping),
- SGE_PAGES, DMA_FROM_DEVICE);
+ SGE_PAGE_SIZE, DMA_FROM_DEVICE);
/* Add one frag and update the appropriate fields in the skb */
if (fp->mode == TPA_MODE_LRO)
- skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+ skb_fill_page_desc(skb, j, old_rx_pg.page,
+ old_rx_pg.offset, frag_len);
else { /* GRO */
int rem;
int offset = 0;
for (rem = frag_len; rem > 0; rem -= gro_size) {
int len = rem > gro_size ? gro_size : rem;
skb_fill_page_desc(skb, frag_id++,
- old_rx_pg.page, offset, len);
+ old_rx_pg.page,
+ old_rx_pg.offset + offset,
+ len);
if (offset)
get_page(old_rx_pg.page);
offset += len;
@@ -662,7 +680,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
{
if (fp->rx_frag_size)
- put_page(virt_to_head_page(data));
+ skb_free_frag(data);
else
kfree(data);
}
@@ -3399,8 +3417,13 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
u32 wnd_sum = 0;
/* Headers length */
- hlen = (int)(skb_transport_header(skb) - skb->data) +
- tcp_hdrlen(skb);
+ if (xmit_type & XMIT_GSO_ENC)
+ hlen = (int)(skb_inner_transport_header(skb) -
+ skb->data) +
+ inner_tcp_hdrlen(skb);
+ else
+ hlen = (int)(skb_transport_header(skb) -
+ skb->data) + tcp_hdrlen(skb);
/* Amount of data (w/o headers) on linear part of SKB*/
first_bd_sz = skb_headlen(skb) - hlen;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index d7a71758e..03b7404d5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -804,9 +804,13 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
if (!page)
return;
+ /* Since many fragments can share the same page, make sure to
+ * only unmap and free the page once.
+ */
dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
- SGE_PAGES, DMA_FROM_DEVICE);
- __free_pages(page, PAGES_PER_SGE_SHIFT);
+ SGE_PAGE_SIZE, DMA_FROM_DEVICE);
+
+ put_page(page);
sw_buf->page = NULL;
sge->addr_hi = 0;
@@ -964,6 +968,17 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
((u8 *)fw_lo)[1] = mac[4];
}
+static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
+ struct bnx2x_alloc_pool *pool)
+{
+ if (!pool->page)
+ return;
+
+ put_page(pool->page);
+
+ pool->page = NULL;
+}
+
static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
struct bnx2x_fastpath *fp, int last)
{
@@ -974,6 +989,8 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
for (i = 0; i < last; i++)
bnx2x_free_rx_sge(bp, fp, i);
+
+ bnx2x_free_rx_mem_pool(bp, &fp->page_pool);
}
static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 48ed005ba..5907c821d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -257,14 +257,15 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ u32 media_type;
/* Dual Media boards present all available port types */
cmd->supported = bp->port.supported[cfg_idx] |
(bp->port.supported[cfg_idx ^ 1] &
(SUPPORTED_TP | SUPPORTED_FIBRE));
cmd->advertising = bp->port.advertising[cfg_idx];
- if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type ==
- ETH_PHY_SFP_1G_FIBER) {
+ media_type = bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type;
+ if (media_type == ETH_PHY_SFP_1G_FIBER) {
cmd->supported &= ~(SUPPORTED_10000baseT_Full);
cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
}
@@ -312,12 +313,26 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->lp_advertising |= ADVERTISED_100baseT_Full;
if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE)
cmd->lp_advertising |= ADVERTISED_1000baseT_Half;
- if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_1000baseT_Full;
+ if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) {
+ if (media_type == ETH_PHY_KR) {
+ cmd->lp_advertising |=
+ ADVERTISED_1000baseKX_Full;
+ } else {
+ cmd->lp_advertising |=
+ ADVERTISED_1000baseT_Full;
+ }
+ }
if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE)
cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
- if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
+ if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) {
+ if (media_type == ETH_PHY_KR) {
+ cmd->lp_advertising |=
+ ADVERTISED_10000baseKR_Full;
+ } else {
+ cmd->lp_advertising |=
+ ADVERTISED_10000baseT_Full;
+ }
+ }
if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
}
@@ -564,15 +579,20 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return -EINVAL;
}
- if (!(bp->port.supported[cfg_idx] &
- SUPPORTED_1000baseT_Full)) {
+ if (bp->port.supported[cfg_idx] &
+ SUPPORTED_1000baseT_Full) {
+ advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_TP);
+
+ } else if (bp->port.supported[cfg_idx] &
+ SUPPORTED_1000baseKX_Full) {
+ advertising = ADVERTISED_1000baseKX_Full;
+ } else {
DP(BNX2X_MSG_ETHTOOL,
"1G full not supported\n");
return -EINVAL;
}
- advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_TP);
break;
case SPEED_2500:
@@ -600,17 +620,22 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return -EINVAL;
}
phy_idx = bnx2x_get_cur_phy_idx(bp);
- if (!(bp->port.supported[cfg_idx]
- & SUPPORTED_10000baseT_Full) ||
- (bp->link_params.phy[phy_idx].media_type ==
+ if ((bp->port.supported[cfg_idx] &
+ SUPPORTED_10000baseT_Full) &&
+ (bp->link_params.phy[phy_idx].media_type !=
ETH_PHY_SFP_1G_FIBER)) {
+ advertising = (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ } else if (bp->port.supported[cfg_idx] &
+ SUPPORTED_10000baseKR_Full) {
+ advertising = (ADVERTISED_10000baseKR_Full |
+ ADVERTISED_FIBRE);
+ } else {
DP(BNX2X_MSG_ETHTOOL,
"10G full not supported\n");
return -EINVAL;
}
- advertising = (ADVERTISED_10000baseT_Full |
- ADVERTISED_FIBRE);
break;
default:
@@ -633,6 +658,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bp->link_params.multi_phy_config = new_multi_phy_config;
if (netif_running(dev)) {
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_force_link_reset(bp);
bnx2x_link_set(bp);
}
@@ -1204,6 +1230,7 @@ static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"cannot get access to nvram interface\n");
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
return -EBUSY;
}
@@ -1691,6 +1718,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
offset += sizeof(u32);
data_buf += sizeof(u32);
written_so_far += sizeof(u32);
+
+ /* At end of each 4Kb page, release nvram lock to allow MFW
+ * chance to take it for its own use.
+ */
+ if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
+ (written_so_far < buf_size)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Releasing NVM lock after offset 0x%x\n",
+ (u32)(offset - sizeof(u32)));
+ bnx2x_release_nvram_lock(bp);
+ usleep_range(1000, 2000);
+ rc = bnx2x_acquire_nvram_lock(bp);
+ if (rc)
+ return rc;
+ }
+
cmd_flags = 0;
}
@@ -1944,6 +1987,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
if (netif_running(dev)) {
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_force_link_reset(bp);
bnx2x_link_set(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 21a0d6afc..a0b03c27e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -3392,9 +3392,9 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
case BNX2X_FLOW_CTRL_AUTO:
switch (params->req_fc_auto_adv) {
case BNX2X_FLOW_CTRL_BOTH:
+ case BNX2X_FLOW_CTRL_RX:
*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
break;
- case BNX2X_FLOW_CTRL_RX:
case BNX2X_FLOW_CTRL_TX:
*ieee_fc |=
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
@@ -3488,14 +3488,21 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
}
-static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
-{ /* LD LP */
+static void bnx2x_pause_resolve(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u32 pause_result)
+{
+ struct bnx2x *bp = params->bp;
+ /* LD LP */
switch (pause_result) { /* ASYM P ASYM P */
case 0xb: /* 1 0 1 1 */
+ DP(NETIF_MSG_LINK, "Flow Control: TX only\n");
vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
break;
case 0xe: /* 1 1 1 0 */
+ DP(NETIF_MSG_LINK, "Flow Control: RX only\n");
vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
break;
@@ -3503,10 +3510,22 @@ static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
case 0x7: /* 0 1 1 1 */
case 0xd: /* 1 1 0 1 */
case 0xf: /* 1 1 1 1 */
- vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+ /* If the user selected to advertise RX ONLY,
+ * although we advertised both, need to enable
+ * RX only.
+ */
+ if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
+ DP(NETIF_MSG_LINK, "Flow Control: RX & TX\n");
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+ } else {
+ DP(NETIF_MSG_LINK, "Flow Control: RX only\n");
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
+ }
break;
default:
+ DP(NETIF_MSG_LINK, "Flow Control: None\n");
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
break;
}
if (pause_result & (1<<0))
@@ -3567,7 +3586,7 @@ static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
pause_result |= (lp_pause &
MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", pause_result);
- bnx2x_pause_resolve(vars, pause_result);
+ bnx2x_pause_resolve(phy, params, vars, pause_result);
}
@@ -5396,7 +5415,7 @@ static void bnx2x_update_adv_fc(struct bnx2x_phy *phy,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", pause_result);
}
- bnx2x_pause_resolve(vars, pause_result);
+ bnx2x_pause_resolve(phy, params, vars, pause_result);
}
@@ -7129,7 +7148,7 @@ static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
pause_result |= (lp_pause &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
- bnx2x_pause_resolve(vars, pause_result);
+ bnx2x_pause_resolve(phy, params, vars, pause_result);
DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
pause_result);
}
@@ -11474,7 +11493,9 @@ static const struct bnx2x_phy phy_warpcore = {
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseKX_Full |
SUPPORTED_10000baseT_Full |
+ SUPPORTED_10000baseKR_Full |
SUPPORTED_20000baseKR2_Full |
SUPPORTED_20000baseMLD2_Full |
SUPPORTED_FIBRE |
@@ -11980,8 +12001,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
break;
case PORT_HW_CFG_NET_SERDES_IF_KR:
phy->media_type = ETH_PHY_KR;
- phy->supported &= (SUPPORTED_1000baseT_Full |
- SUPPORTED_10000baseT_Full |
+ phy->supported &= (SUPPORTED_1000baseKX_Full |
+ SUPPORTED_10000baseKR_Full |
SUPPORTED_FIBRE |
SUPPORTED_Autoneg |
SUPPORTED_Pause |
@@ -11999,8 +12020,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
phy->media_type = ETH_PHY_KR;
phy->flags |= FLAGS_WC_DUAL_MODE;
phy->supported &= (SUPPORTED_20000baseKR2_Full |
- SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseKR_Full |
+ SUPPORTED_1000baseKX_Full |
SUPPORTED_Autoneg |
SUPPORTED_FIBRE |
SUPPORTED_Pause |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b1405d20d..bbb733556 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2281,13 +2281,11 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
void bnx2x_calc_fc_adv(struct bnx2x *bp)
{
u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
+
+ bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
+ ADVERTISED_Pause);
switch (bp->link_vars.ieee_fc &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
- case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
- bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
- ADVERTISED_Pause);
- break;
-
case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
ADVERTISED_Pause);
@@ -2298,8 +2296,6 @@ void bnx2x_calc_fc_adv(struct bnx2x *bp)
break;
default:
- bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
- ADVERTISED_Pause);
break;
}
}
@@ -2345,12 +2341,16 @@ int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
if (load_mode == LOAD_DIAG) {
struct link_params *lp = &bp->link_params;
lp->loopback_mode = LOOPBACK_XGXS;
- /* do PHY loopback at 10G speed, if possible */
- if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
+ /* Prefer doing PHY loopback at highest speed */
+ if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
if (lp->speed_cap_mask[cfx_idx] &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
lp->req_line_speed[cfx_idx] =
- SPEED_10000;
+ SPEED_20000;
+ else if (lp->speed_cap_mask[cfx_idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ lp->req_line_speed[cfx_idx] =
+ SPEED_10000;
else
lp->req_line_speed[cfx_idx] =
SPEED_1000;
@@ -4861,9 +4861,7 @@ static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
res = true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
- if (print)
- _print_next_block((*par_num)++,
- "MCP SCPAD");
+ (*par_num)++;
/* clear latched SCPAD PATIRY from MCP */
REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
1UL << 10);
@@ -4925,6 +4923,7 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
(sig[3] & HW_PRTY_ASSERT_SET_3) ||
(sig[4] & HW_PRTY_ASSERT_SET_4)) {
int par_num = 0;
+
DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
"[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
sig[0] & HW_PRTY_ASSERT_SET_0,
@@ -4932,9 +4931,18 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
sig[2] & HW_PRTY_ASSERT_SET_2,
sig[3] & HW_PRTY_ASSERT_SET_3,
sig[4] & HW_PRTY_ASSERT_SET_4);
- if (print)
- netdev_err(bp->dev,
- "Parity errors detected in blocks: ");
+ if (print) {
+ if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
+ (sig[1] & HW_PRTY_ASSERT_SET_1) ||
+ (sig[2] & HW_PRTY_ASSERT_SET_2) ||
+ (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
+ (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
+ netdev_err(bp->dev,
+ "Parity errors detected in blocks: ");
+ } else {
+ print = false;
+ }
+ }
res |= bnx2x_check_blocks_with_parity0(bp,
sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
res |= bnx2x_check_blocks_with_parity1(bp,
@@ -8425,7 +8433,7 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
BNX2X_ETH_MAC, &ramrod_flags);
} else { /* vf */
return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
- bp->fp->index, true);
+ bp->fp->index, set);
}
}
@@ -11142,6 +11150,12 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising[idx] |=
(ADVERTISED_1000baseT_Full |
ADVERTISED_TP);
+ } else if (bp->port.supported[idx] &
+ SUPPORTED_1000baseKX_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_1000;
+ bp->port.advertising[idx] |=
+ ADVERTISED_1000baseKX_Full;
} else {
BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
link_config,
@@ -11174,6 +11188,13 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising[idx] |=
(ADVERTISED_10000baseT_Full |
ADVERTISED_FIBRE);
+ } else if (bp->port.supported[idx] &
+ SUPPORTED_10000baseKR_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10000baseKR_Full |
+ ADVERTISED_FIBRE);
} else {
BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
link_config,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 07cdf9bbf..4ad415ac8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -424,7 +424,7 @@ static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
o->head_exe_request = false;
o->saved_ramrod_flags = 0;
rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
- if (rc != 0) {
+ if ((rc != 0) && (rc != 1)) {
BNX2X_ERR("execution of pending commands failed with rc %d\n",
rc);
#ifdef BNX2X_STOP_ON_ERROR
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 6043734ea..09ff09f82 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1230,7 +1230,6 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
new_skb = skb_realloc_headroom(skb, sizeof(*status));
dev_kfree_skb(skb);
if (!new_skb) {
- dev->stats.tx_errors++;
dev->stats.tx_dropped++;
return NULL;
}
@@ -1465,7 +1464,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(!skb)) {
dev->stats.rx_dropped++;
- dev->stats.rx_errors++;
goto next;
}
@@ -1493,7 +1491,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
- dev->stats.rx_dropped++;
dev->stats.rx_errors++;
dev_kfree_skb_any(skb);
goto next;
@@ -1515,7 +1512,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
dev->stats.rx_frame_errors++;
if (dma_flag & DMA_RX_LG)
dev->stats.rx_length_errors++;
- dev->stats.rx_dropped++;
dev->stats.rx_errors++;
dev_kfree_skb_any(skb);
goto next;
@@ -2130,6 +2126,8 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
int ret = 0;
int timeout = 0;
u32 reg;
+ u32 dma_ctrl;
+ int i;
/* Disable TDMA to stop add more frames in TX DMA */
reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
@@ -2173,6 +2171,20 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
ret = -ETIMEDOUT;
}
+ dma_ctrl = 0;
+ for (i = 0; i < priv->hw_params->rx_queues; i++)
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ dma_ctrl = 0;
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
return ret;
}
@@ -2770,12 +2782,75 @@ static int bcmgenet_close(struct net_device *dev)
return ret;
}
+static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
+{
+ struct bcmgenet_priv *priv = ring->priv;
+ u32 p_index, c_index, intsts, intmsk;
+ struct netdev_queue *txq;
+ unsigned int free_bds;
+ unsigned long flags;
+ bool txq_stopped;
+
+ if (!netif_msg_tx_err(priv))
+ return;
+
+ txq = netdev_get_tx_queue(priv->dev, ring->queue);
+
+ spin_lock_irqsave(&ring->lock, flags);
+ if (ring->index == DESC_INDEX) {
+ intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
+ } else {
+ intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intmsk = 1 << ring->index;
+ }
+ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
+ p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
+ txq_stopped = netif_tx_queue_stopped(txq);
+ free_bds = ring->free_bds;
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
+ "TX queue status: %s, interrupts: %s\n"
+ "(sw)free_bds: %d (sw)size: %d\n"
+ "(sw)p_index: %d (hw)p_index: %d\n"
+ "(sw)c_index: %d (hw)c_index: %d\n"
+ "(sw)clean_p: %d (sw)write_p: %d\n"
+ "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
+ ring->index, ring->queue,
+ txq_stopped ? "stopped" : "active",
+ intsts & intmsk ? "enabled" : "disabled",
+ free_bds, ring->size,
+ ring->prod_index, p_index & DMA_P_INDEX_MASK,
+ ring->c_index, c_index & DMA_C_INDEX_MASK,
+ ring->clean_ptr, ring->write_ptr,
+ ring->cb_ptr, ring->end_ptr);
+}
+
static void bcmgenet_timeout(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 int0_enable = 0;
+ u32 int1_enable = 0;
+ unsigned int q;
netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
+ for (q = 0; q < priv->hw_params->tx_queues; q++)
+ bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
+ bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
+
+ bcmgenet_tx_reclaim_all(dev);
+
+ for (q = 0; q < priv->hw_params->tx_queues; q++)
+ int1_enable |= (1 << q);
+
+ int0_enable = UMAC_IRQ_TXDMA_DONE;
+
+ /* Re-enable TX interrupts if disabled */
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
+
dev->trans_start = jiffies;
dev->stats.tx_errors++;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 6f2887a5e..6159deab8 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -594,6 +594,7 @@ struct bcmgenet_priv {
wait_queue_head_t wq;
struct phy_device *phydev;
struct device_node *phy_dn;
+ struct device_node *mdio_dn;
struct mii_bus *mii_bus;
u16 gphy_rev;
struct clk *clk_eee;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 420949cc5..adf23d2ac 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -47,7 +47,12 @@ static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location)
HZ / 100);
ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
- if (ret & MDIO_READ_FAIL)
+ /* Some broken devices are known not to release the line during
+ * turn-around, e.g: Broadcom BCM53125 external switches, so check for
+ * that condition here and ignore the MDIO controller read failure
+ * indication.
+ */
+ if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (ret & MDIO_READ_FAIL))
return -EIO;
return ret & 0xffff;
@@ -403,6 +408,52 @@ static int bcmgenet_mii_probe(struct net_device *dev)
return 0;
}
+/* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with
+ * their internal MDIO management controller making them fail to successfully
+ * be read from or written to for the first transaction. We insert a dummy
+ * BMSR read here to make sure that phy_get_device() and get_phy_id() can
+ * correctly read the PHY MII_PHYSID1/2 registers and successfully register a
+ * PHY device for this peripheral.
+ *
+ * Once the PHY driver is registered, we can workaround subsequent reads from
+ * there (e.g: during system-wide power management).
+ *
+ * bus->reset is invoked before mdiobus_scan during mdiobus_register and is
+ * therefore the right location to stick that workaround. Since we do not want
+ * to read from non-existing PHYs, we either use bus->phy_mask or do a manual
+ * Device Tree scan to limit the search area.
+ */
+static int bcmgenet_mii_bus_reset(struct mii_bus *bus)
+{
+ struct net_device *dev = bus->priv;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device_node *np = priv->mdio_dn;
+ struct device_node *child = NULL;
+ u32 read_mask = 0;
+ int addr = 0;
+
+ if (!np) {
+ read_mask = 1 << priv->phy_addr;
+ } else {
+ for_each_available_child_of_node(np, child) {
+ addr = of_mdio_parse_addr(&dev->dev, child);
+ if (addr < 0)
+ continue;
+
+ read_mask |= 1 << addr;
+ }
+ }
+
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ if (read_mask & 1 << addr) {
+ dev_dbg(&dev->dev, "Workaround for PHY @ %d\n", addr);
+ mdiobus_read(bus, addr, MII_BMSR);
+ }
+ }
+
+ return 0;
+}
+
static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv)
{
struct mii_bus *bus;
@@ -422,6 +473,7 @@ static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv)
bus->parent = &priv->pdev->dev;
bus->read = bcmgenet_mii_read;
bus->write = bcmgenet_mii_write;
+ bus->reset = bcmgenet_mii_bus_reset;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d",
priv->pdev->name, priv->pdev->id);
@@ -438,7 +490,6 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
{
struct device_node *dn = priv->pdev->dev.of_node;
struct device *kdev = &priv->pdev->dev;
- struct device_node *mdio_dn;
char *compat;
int ret;
@@ -446,14 +497,14 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
if (!compat)
return -ENOMEM;
- mdio_dn = of_find_compatible_node(dn, NULL, compat);
+ priv->mdio_dn = of_find_compatible_node(dn, NULL, compat);
kfree(compat);
- if (!mdio_dn) {
+ if (!priv->mdio_dn) {
dev_err(kdev, "unable to find MDIO bus node\n");
return -ENODEV;
}
- ret = of_mdiobus_register(priv->mii_bus, mdio_dn);
+ ret = of_mdiobus_register(priv->mii_bus, priv->mdio_dn);
if (ret) {
dev_err(kdev, "failed to register MDIO bus\n");
return ret;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index ac27e2426..f557a2aae 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -1508,16 +1508,7 @@ static void sbmac_channel_start(struct sbmac_softc *s)
__raw_writeq(reg, port);
port = s->sbm_base + R_MAC_ETHERNET_ADDR;
-#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
- /*
- * Pass1 SOCs do not receive packets addressed to the
- * destination address in the R_MAC_ETHERNET_ADDR register.
- * Set the value to zero.
- */
- __raw_writeq(0, port);
-#else
__raw_writeq(reg, port);
-#endif
/*
* Set the receive filter for no packets, and write values
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 6eb005247..25662a960 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6609,7 +6609,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
static void tg3_frag_free(bool is_frag, void *data)
{
if (is_frag)
- put_page(virt_to_head_page(data));
+ skb_free_frag(data);
else
kfree(data);
}
diff --git a/drivers/net/ethernet/brocade/Kconfig b/drivers/net/ethernet/brocade/Kconfig
index 4e8c0b6c5..c4bbe54e2 100644
--- a/drivers/net/ethernet/brocade/Kconfig
+++ b/drivers/net/ethernet/brocade/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_BROCADE
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile
index 6e10b9973..8584abcf5 100644
--- a/drivers/net/ethernet/brocade/bna/Makefile
+++ b/drivers/net/ethernet/brocade/bna/Makefile
@@ -9,5 +9,3 @@ obj-$(CONFIG_BNA) += bna.o
bna-objs := bnad.o bnad_ethtool.o bnad_debugfs.o bna_enet.o bna_tx_rx.o
bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o
bna-objs += cna_fwimg.o
-
-EXTRA_CFLAGS := -Idrivers/net/bna
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index cf9f3956f..95bc8b644 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -282,7 +282,6 @@ bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
cee->ioc = ioc;
bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
- bfa_q_qe_init(&cee->ioc_notify);
bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
bfa_nw_ioc_notify_register(cee->ioc, &cee->ioc_notify);
}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
index af25d8e8f..1d11d666d 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
@@ -28,19 +28,6 @@
typedef void (*bfa_sm_t)(void *sm, int event);
-/* oc - object class eg. bfa_ioc
- * st - state, eg. reset
- * otype - object type, eg. struct bfa_ioc
- * etype - object type, eg. enum ioc_event
- */
-#define bfa_sm_state_decl(oc, st, otype, etype) \
- static void oc ## _sm_ ## st(otype * fsm, etype event)
-
-#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
-#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
-#define bfa_sm_get_state(_sm) ((_sm)->sm)
-#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
-
/* For converting from state machine function to state encoding. */
struct bfa_sm_table {
bfa_sm_t sm; /*!< state machine function */
@@ -67,7 +54,6 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
} while (0)
#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
-#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
#define bfa_fsm_cmp_state(_fsm, _state) \
((_fsm)->fsm == (bfa_fsm_t)(_state))
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 3bfd9da92..d152b3fa6 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -24,7 +24,6 @@
#include "bfa_defs_status.h"
#include "bfa_defs_mfg_comm.h"
-#define BFA_STRING_32 32
#define BFA_VERSION_LEN 64
/* ---------------------- adapter definitions ------------ */
@@ -55,7 +54,7 @@ struct bfa_adapter_attr {
char optrom_ver[BFA_VERSION_LEN];
char os_type[BFA_ADAPTER_OS_TYPE_LEN];
struct bfa_mfg_vpd vpd;
- struct mac mac;
+ u8 mac[ETH_ALEN];
u8 nports;
u8 max_speed;
@@ -187,8 +186,6 @@ enum {
#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
-#pragma pack(1)
-
/* BFA adapter manufacturing block definition.
*
* All numerical fields are in big-endian format.
@@ -211,7 +208,7 @@ struct bfa_mfg_block {
char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
- mac_t mfg_mac; /* base mac address */
+ u8 mfg_mac[ETH_ALEN]; /* base mac address */
u8 num_mac; /* number of mac addresses */
u8 rsv2;
u32 card_type; /* card type */
@@ -227,9 +224,7 @@ struct bfa_mfg_block {
char initial_mode[8]; /* initial mode: hba/cna/nic */
u8 rsv4[84];
u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */
-};
-
-#pragma pack()
+} __packed;
/* ---------------------- pci definitions ------------ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
index a37326d44..f048887cb 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
@@ -109,8 +109,6 @@ union bfa_port_stats_u {
struct bfa_port_eth_stats eth;
};
-#pragma pack(1)
-
#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
#define BFA_CEE_DCBX_MAX_PRIORITY (8)
#define BFA_CEE_DCBX_MAX_PGID (8)
@@ -133,7 +131,7 @@ struct bfa_cee_lldp_str {
u8 len;
u8 rsvd[2];
u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
-};
+} __packed;
/* LLDP parameters */
struct bfa_cee_lldp_cfg {
@@ -145,7 +143,7 @@ struct bfa_cee_lldp_cfg {
struct bfa_cee_lldp_str mgmt_addr;
u16 time_to_live;
u16 enabled_system_cap;
-};
+} __packed;
enum bfa_cee_dcbx_version {
DCBX_PROTOCOL_PRECEE = 1,
@@ -171,7 +169,7 @@ struct bfa_cee_dcbx_cfg {
u8 lls_fcoe; /* FCoE Logical Link Status */
u8 lls_lan; /* LAN Logical Link Status */
u8 rsvd[2];
-};
+} __packed;
/* CEE status */
/* Making this to tri-state for the benefit of port list command */
@@ -188,11 +186,11 @@ struct bfa_cee_attr {
u8 error_reason;
struct bfa_cee_lldp_cfg lldp_remote;
struct bfa_cee_dcbx_cfg dcbx_remote;
- mac_t src_mac;
+ u8 src_mac[ETH_ALEN];
u8 link_speed;
u8 nw_priority;
u8 filler[2];
-};
+} __packed;
/* LLDP/DCBX/CEE Statistics */
struct bfa_cee_stats {
@@ -214,8 +212,6 @@ struct bfa_cee_stats {
u32 cee_status_up; /*!< CEE status up */
u32 cee_hw_cfg_changed; /*!< CEE hw cfg changed */
u32 cee_rx_invalid_cfg; /*!< CEE invalid cfg */
-};
-
-#pragma pack()
+} __packed;
#endif /* __BFA_DEFS_CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
index 7a45cd0b5..7e17451c9 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
@@ -59,8 +59,6 @@ enum {
BFA_MFG_TYPE_INVALID = 0, /*!< Invalid card type */
};
-#pragma pack(1)
-
/* Check if Mezz card */
#define bfa_mfg_is_mezz(type) (( \
(type) == BFA_MFG_TYPE_JAYHAWK || \
@@ -77,7 +75,7 @@ enum {
CB_GPIO_FC4P2 = (4), /*!< 4G 2port FC card */
CB_GPIO_FC4P1 = (5), /*!< 4G 1port FC card */
CB_GPIO_DFLY = (6), /*!< 8G 2port FC mezzanine card */
- CB_GPIO_PROTO = (1 << 7) /*!< 8G 2port FC prototypes */
+ CB_GPIO_PROTO = BIT(7) /*!< 8G 2port FC prototypes */
};
#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop) \
@@ -86,7 +84,7 @@ do { \
(prop) |= BFI_ADAPTER_PROTO; \
(gpio) &= ~CB_GPIO_PROTO; \
} \
- switch ((gpio)) { \
+ switch (gpio) { \
case CB_GPIO_TTV: \
(prop) |= BFI_ADAPTER_TTV; \
case CB_GPIO_DFLY: \
@@ -148,8 +146,6 @@ struct bfa_mfg_vpd {
u8 len; /*!< vpd data length excluding header */
u8 rsv;
u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */
-};
-
-#pragma pack()
+} __packed;
#endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 68f3c13c9..b7a0f7879 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -23,14 +23,6 @@
/* IOC local definitions */
-#define bfa_ioc_state_disabled(__sm) \
- (((__sm) == BFI_IOC_UNINIT) || \
- ((__sm) == BFI_IOC_INITING) || \
- ((__sm) == BFI_IOC_HWINIT) || \
- ((__sm) == BFI_IOC_DISABLED) || \
- ((__sm) == BFI_IOC_FAIL) || \
- ((__sm) == BFI_IOC_CFG_DISABLED))
-
/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
#define bfa_ioc_firmware_lock(__ioc) \
@@ -57,12 +49,6 @@
((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
-#define bfa_ioc_get_alt_ioc_fwstate(__ioc) \
- ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
-
-#define bfa_ioc_mbox_cmd_pending(__ioc) \
- (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
- readl((__ioc)->ioc_regs.hfn_mbox_cmd))
static bool bfa_nw_auto_recover = true;
@@ -1105,12 +1091,9 @@ static void
bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
{
struct bfa_ioc_notify *notify;
- struct list_head *qe;
- list_for_each(qe, &ioc->notify_q) {
- notify = (struct bfa_ioc_notify *)qe;
+ list_for_each_entry(notify, &ioc->notify_q, qe)
notify->cbfn(notify->cbarg, event);
- }
}
static void
@@ -1321,7 +1304,7 @@ bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
i++) {
fwsig[i] =
- swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
+ swab32(readl(loff + ioc->ioc_regs.smem_page_start));
loff += sizeof(u32);
}
}
@@ -1387,7 +1370,7 @@ static enum bfi_ioc_img_ver_cmp
bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
struct bfi_ioc_image_hdr *fwhdr_to_cmp)
{
- if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == false)
+ if (!bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp))
return BFI_IOC_IMG_VER_INCOMP;
if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
@@ -1398,7 +1381,7 @@ bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
/* GA takes priority over internal builds of the same patch stream.
* At this point major minor maint and patch numbers are same.
*/
- if (fwhdr_is_ga(base_fwhdr) == true)
+ if (fwhdr_is_ga(base_fwhdr))
if (fwhdr_is_ga(fwhdr_to_cmp))
return BFI_IOC_IMG_VER_SAME;
else
@@ -1692,7 +1675,7 @@ bfa_raw_sem_get(void __iomem *bar)
{
int locked;
- locked = readl((bar + FLASH_SEM_LOCK_REG));
+ locked = readl(bar + FLASH_SEM_LOCK_REG);
return !locked;
}
@@ -1912,10 +1895,8 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
}
void
-bfa_nw_ioc_timeout(void *ioc_arg)
+bfa_nw_ioc_timeout(struct bfa_ioc *ioc)
{
- struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
-
bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
}
@@ -1980,10 +1961,9 @@ bfa_ioc_send_getattr(struct bfa_ioc *ioc)
}
void
-bfa_nw_ioc_hb_check(void *cbarg)
+bfa_nw_ioc_hb_check(struct bfa_ioc *ioc)
{
- struct bfa_ioc *ioc = cbarg;
- u32 hb_count;
+ u32 hb_count;
hb_count = readl(ioc->ioc_regs.heartbeat);
if (ioc->hb_count == hb_count) {
@@ -2069,8 +2049,8 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
/**
* write smem
*/
- writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
- ((ioc->ioc_regs.smem_page_start) + (loff)));
+ writel(swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]),
+ ioc->ioc_regs.smem_page_start + loff);
loff += sizeof(u32);
@@ -2177,7 +2157,8 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
/**
* Enqueue command to firmware.
*/
- bfa_q_deq(&mod->cmd_q, &cmd);
+ cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe);
+ list_del(&cmd->qe);
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
/**
@@ -2198,8 +2179,10 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
struct bfa_mbox_cmd *cmd;
- while (!list_empty(&mod->cmd_q))
- bfa_q_deq(&mod->cmd_q, &cmd);
+ while (!list_empty(&mod->cmd_q)) {
+ cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe);
+ list_del(&cmd->qe);
+ }
}
/**
@@ -2223,14 +2206,14 @@ bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
/*
* Hold semaphore to serialize pll init and fwtrc.
*/
- if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
+ if (!bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
return 1;
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
len = sz/sizeof(u32);
for (i = 0; i < len; i++) {
- r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
+ r32 = swab32(readl(loff + ioc->ioc_regs.smem_page_start));
buf[i] = be32_to_cpu(r32);
loff += sizeof(u32);
@@ -2278,7 +2261,7 @@ bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
int tlen;
if (ioc->dbg_fwsave_once) {
- ioc->dbg_fwsave_once = 0;
+ ioc->dbg_fwsave_once = false;
if (ioc->dbg_fwsave_len) {
tlen = ioc->dbg_fwsave_len;
bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
@@ -2796,7 +2779,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
ad_attr->prototype = 0;
ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
- ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
+ bfa_nw_ioc_get_mac(ioc, ad_attr->mac);
ad_attr->pcie_gen = ioc_attr->pcie_gen;
ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
@@ -2942,10 +2925,10 @@ bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
return ioc->attr->pwwn;
}
-mac_t
-bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
+void
+bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac)
{
- return ioc->attr->mac;
+ ether_addr_copy(mac, ioc->attr->mac);
}
/* Firmware failure detected. Start recovery actions. */
@@ -2997,9 +2980,8 @@ bfa_iocpf_stop(struct bfa_ioc *ioc)
}
void
-bfa_nw_iocpf_timeout(void *ioc_arg)
+bfa_nw_iocpf_timeout(struct bfa_ioc *ioc)
{
- struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
enum bfa_iocpf_state iocpf_st;
iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
@@ -3011,10 +2993,8 @@ bfa_nw_iocpf_timeout(void *ioc_arg)
}
void
-bfa_nw_iocpf_sem_timeout(void *ioc_arg)
+bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc)
{
- struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
-
bfa_ioc_hw_sem_get(ioc);
}
@@ -3245,7 +3225,6 @@ bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
flash->op_busy = 0;
bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
- bfa_q_qe_init(&flash->ioc_notify);
bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index effb7156e..2c0b4c076 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -232,12 +232,6 @@ struct bfa_ioc_hwif {
#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen)
#define bfa_ioc_is_default(__ioc) \
(bfa_ioc_pcifn(__ioc) == bfa_ioc_portid(__ioc))
-#define bfa_ioc_fetch_stats(__ioc, __stats) \
- (((__stats)->drv_stats) = (__ioc)->stats)
-#define bfa_ioc_clr_stats(__ioc) \
- memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
-#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
-#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
#define bfa_ioc_speed_sup(__ioc) \
BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
#define bfa_ioc_get_nports(__ioc) \
@@ -268,13 +262,6 @@ void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
(__ioc)->asic_mode))
-#define bfa_ioc_isr_mode_set(__ioc, __msix) do { \
- if ((__ioc)->ioc_hwif->ioc_isr_mode_set) \
- ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)); \
-} while (0)
-#define bfa_ioc_ownership_reset(__ioc) \
- ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
-
#define bfa_ioc_lpu_read_stat(__ioc) do { \
if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \
((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \
@@ -309,7 +296,7 @@ void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
struct bfi_ioc_image_hdr *fwhdr);
bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
struct bfi_ioc_image_hdr *fwhdr);
-mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
+void bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac);
void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave);
int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen);
int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
@@ -317,10 +304,10 @@ int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
/*
* Timeout APIs
*/
-void bfa_nw_ioc_timeout(void *ioc);
-void bfa_nw_ioc_hb_check(void *ioc);
-void bfa_nw_iocpf_timeout(void *ioc);
-void bfa_nw_iocpf_sem_timeout(void *ioc);
+void bfa_nw_ioc_timeout(struct bfa_ioc *ioc);
+void bfa_nw_ioc_hb_check(struct bfa_ioc *ioc);
+void bfa_nw_iocpf_timeout(struct bfa_ioc *ioc);
+void bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc);
/*
* F/W Image Size & Chunk
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index 2e72445db..74e5ed55a 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -23,8 +23,7 @@
#include "bfi_reg.h"
#include "bfa_defs.h"
-#define bfa_ioc_ct_sync_pos(__ioc) \
- ((u32) (1 << bfa_ioc_pcifn(__ioc)))
+#define bfa_ioc_ct_sync_pos(__ioc) BIT(bfa_ioc_pcifn(__ioc))
#define BFA_IOC_SYNC_REQD_SH 16
#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
@@ -536,7 +535,7 @@ bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
- writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
+ writel(r32 | bfa_ioc_ct_sync_pos(ioc), ioc->ioc_regs.ioc_fail_sync);
}
static bool
@@ -667,7 +666,7 @@ bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
}
- r32 = readl((rb + PSS_CTL_REG));
+ r32 = readl(rb + PSS_CTL_REG);
r32 &= ~__PSS_LMEM_RESET;
writel(r32, (rb + PSS_CTL_REG));
udelay(1000);
@@ -678,7 +677,7 @@ bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
udelay(1000);
- r32 = readl((rb + MBIST_STAT_REG));
+ r32 = readl(rb + MBIST_STAT_REG);
writel(0, (rb + MBIST_CTL_REG));
return BFA_STATUS_OK;
}
@@ -691,7 +690,7 @@ bfa_ioc_ct2_sclk_init(void __iomem *rb)
/*
* put s_clk PLL and PLL FSM in reset
*/
- r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
__APP_PLL_SCLK_LOGIC_SOFT_RESET);
@@ -701,28 +700,28 @@ bfa_ioc_ct2_sclk_init(void __iomem *rb)
* Ignore mode and program for the max clock (which is FC16)
* Firmware/NFC will do the PLL init appropriately
*/
- r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
/*
* while doing PLL init dont clock gate ethernet subsystem
*/
- r32 = readl((rb + CT2_CHIP_MISC_PRG));
- writel((r32 | __ETH_CLK_ENABLE_PORT0),
- (rb + CT2_CHIP_MISC_PRG));
+ r32 = readl(rb + CT2_CHIP_MISC_PRG);
+ writel(r32 | __ETH_CLK_ENABLE_PORT0,
+ rb + CT2_CHIP_MISC_PRG);
- r32 = readl((rb + CT2_PCIE_MISC_REG));
- writel((r32 | __ETH_CLK_ENABLE_PORT1),
- (rb + CT2_PCIE_MISC_REG));
+ r32 = readl(rb + CT2_PCIE_MISC_REG);
+ writel(r32 | __ETH_CLK_ENABLE_PORT1,
+ rb + CT2_PCIE_MISC_REG);
/*
* set sclk value
*/
- r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
__APP_PLL_SCLK_CLK_DIV2);
- writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+ writel(r32 | 0x1061731b, rb + CT2_APP_PLL_SCLK_CTL_REG);
/*
* poll for s_clk lock or delay 1ms
@@ -743,28 +742,28 @@ bfa_ioc_ct2_lclk_init(void __iomem *rb)
/*
* put l_clk PLL and PLL FSM in reset
*/
- r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
__APP_PLL_LCLK_LOGIC_SOFT_RESET);
- writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+ writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG);
/*
* set LPU speed (set for FC16 which will work for other modes)
*/
- r32 = readl((rb + CT2_CHIP_MISC_PRG));
+ r32 = readl(rb + CT2_CHIP_MISC_PRG);
writel(r32, (rb + CT2_CHIP_MISC_PRG));
/*
* set LPU half speed (set for FC16 which will work for other modes)
*/
- r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
- writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+ writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG);
/*
* set lclk for mode (set for FC16)
*/
- r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
r32 |= 0x20c1731b;
writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
@@ -780,14 +779,14 @@ bfa_ioc_ct2_mem_init(void __iomem *rb)
{
u32 r32;
- r32 = readl((rb + PSS_CTL_REG));
+ r32 = readl(rb + PSS_CTL_REG);
r32 &= ~__PSS_LMEM_RESET;
- writel(r32, (rb + PSS_CTL_REG));
+ writel(r32, rb + PSS_CTL_REG);
udelay(1000);
- writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
+ writel(__EDRAM_BISTR_START, rb + CT2_MBIST_CTL_REG);
udelay(1000);
- writel(0, (rb + CT2_MBIST_CTL_REG));
+ writel(0, rb + CT2_MBIST_CTL_REG);
}
static void
@@ -801,22 +800,22 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb)
/*
* release soft reset on s_clk & l_clk
*/
- r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
- writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
- (rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
+ writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+ rb + CT2_APP_PLL_SCLK_CTL_REG);
/*
* release soft reset on s_clk & l_clk
*/
- r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
- writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET),
- (rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+ writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+ rb + CT2_APP_PLL_LCLK_CTL_REG);
/* put port0, port1 MAC & AHB in reset */
- writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
- (rb + CT2_CSI_MAC_CONTROL_REG(0)));
- writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
- (rb + CT2_CSI_MAC_CONTROL_REG(1)));
+ writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET,
+ rb + CT2_CSI_MAC_CONTROL_REG(0));
+ writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET,
+ rb + CT2_CSI_MAC_CONTROL_REG(1));
}
#define CT2_NFC_MAX_DELAY 1000
@@ -861,8 +860,8 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
- if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
- (nfc_ver >= CT2_NFC_VER_VALID)) {
+ if (wgn == (__A2T_AHB_LOAD | __WGN_READY) &&
+ nfc_ver >= CT2_NFC_VER_VALID) {
if (bfa_ioc_ct2_nfc_halted(rb))
bfa_ioc_ct2_nfc_resume(rb);
writel(__RESET_AND_START_SCLK_LCLK_PLLS,
@@ -899,19 +898,19 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
bfa_ioc_ct2_lclk_init(rb);
/* release soft reset on s_clk & l_clk */
- r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
rb + CT2_APP_PLL_SCLK_CTL_REG);
- r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
rb + CT2_APP_PLL_LCLK_CTL_REG);
}
/* Announce flash device presence, if flash was corrupted. */
if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
- r32 = readl((rb + PSS_GPIO_OUT_REG));
+ r32 = readl(rb + PSS_GPIO_OUT_REG);
writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
- r32 = readl((rb + PSS_GPIO_OE_REG));
+ r32 = readl(rb + PSS_GPIO_OE_REG);
writel(r32 | 1, rb + PSS_GPIO_OE_REG);
}
@@ -919,27 +918,27 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
* Mask the interrupts and clear any
* pending interrupts left by BIOS/EFI
*/
- writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
- writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
+ writel(1, rb + CT2_LPU0_HOSTFN_MBOX0_MSK);
+ writel(1, rb + CT2_LPU1_HOSTFN_MBOX0_MSK);
/* For first time initialization, no need to clear interrupts */
r32 = readl(rb + HOST_SEM5_REG);
if (r32 & 0x1) {
- r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
if (r32 == 1) {
- writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
- readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT);
+ readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
}
- r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+ r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
if (r32 == 1) {
- writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
- readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+ writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT);
+ readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
}
}
bfa_ioc_ct2_mem_init(rb);
- writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
- writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
+ writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG);
+ writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG);
return BFA_STATUS_OK;
}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
index c07d5b937..9c5bb24e8 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
@@ -66,8 +66,9 @@ cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
cmdq->offset = 0;
cmdq->bytes_to_copy = 0;
while (!list_empty(&cmdq->pending_q)) {
- bfa_q_deq(&cmdq->pending_q, &cmdq_ent);
- bfa_q_qe_init(&cmdq_ent->qe);
+ cmdq_ent = list_first_entry(&cmdq->pending_q,
+ struct bfa_msgq_cmd_entry, qe);
+ list_del(&cmdq_ent->qe);
call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
}
}
@@ -242,8 +243,8 @@ bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
/* Walk through pending list to see if the command can be posted */
while (!list_empty(&cmdq->pending_q)) {
- cmd =
- (struct bfa_msgq_cmd_entry *)bfa_q_first(&cmdq->pending_q);
+ cmd = list_first_entry(&cmdq->pending_q,
+ struct bfa_msgq_cmd_entry, qe);
if (ntohs(cmd->msg_hdr->num_entries) <=
BFA_MSGQ_FREE_CNT(cmdq)) {
list_del(&cmd->qe);
@@ -615,7 +616,6 @@ bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
bfa_msgq_rspq_attach(&msgq->rspq, msgq);
bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
- bfa_q_qe_init(&msgq->ioc_notify);
bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
}
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 2bcde4042..81e59ea8b 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -21,8 +21,6 @@
#include "bfa_defs.h"
-#pragma pack(1)
-
/* BFI FW image type */
#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
@@ -36,10 +34,10 @@ struct bfi_mhdr {
struct {
u8 qid;
u8 fn_lpu; /*!< msg destination */
- } h2i;
+ } __packed h2i;
u16 i2htok; /*!< token in msgs to host */
- } mtag;
-};
+ } __packed mtag;
+} __packed;
#define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu))
#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1)
@@ -75,14 +73,14 @@ union bfi_addr_u {
struct {
u32 addr_lo;
u32 addr_hi;
- } a32;
-};
+ } __packed a32;
+} __packed;
/* Generic DMA addr-len pair. */
struct bfi_alen {
union bfi_addr_u al_addr; /* DMA addr of buffer */
u32 al_len; /* length of buffer */
-};
+} __packed;
/*
* Large Message structure - 128 Bytes size Msgs
@@ -96,7 +94,7 @@ struct bfi_alen {
struct bfi_mbmsg {
struct bfi_mhdr mh;
u32 pl[BFI_MBMSG_SZ];
-};
+} __packed;
/* Supported PCI function class codes (personality) */
enum bfi_pcifn_class {
@@ -184,19 +182,19 @@ enum bfi_ioc_i2h_msgs {
struct bfi_ioc_getattr_req {
struct bfi_mhdr mh;
union bfi_addr_u attr_addr;
-};
+} __packed;
struct bfi_ioc_attr {
u64 mfg_pwwn; /*!< Mfg port wwn */
u64 mfg_nwwn; /*!< Mfg node wwn */
- mac_t mfg_mac; /*!< Mfg mac */
+ u8 mfg_mac[ETH_ALEN]; /*!< Mfg mac */
u8 port_mode; /* enum bfi_port_mode */
u8 rsvd_a;
u64 pwwn;
u64 nwwn;
- mac_t mac; /*!< PBC or Mfg mac */
+ u8 mac[ETH_ALEN]; /*!< PBC or Mfg mac */
u16 rsvd_b;
- mac_t fcoe_mac;
+ u8 fcoe_mac[ETH_ALEN];
u16 rsvd_c;
char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
u8 pcie_gen;
@@ -211,14 +209,14 @@ struct bfi_ioc_attr {
char optrom_version[BFA_VERSION_LEN];
struct bfa_mfg_vpd vpd;
u32 card_type; /*!< card type */
-};
+} __packed;
/* BFI_IOC_I2H_GETATTR_REPLY message */
struct bfi_ioc_getattr_reply {
struct bfi_mhdr mh; /*!< Common msg header */
u8 status; /*!< cfg reply status */
u8 rsvd[3];
-};
+} __packed;
/* Firmware memory page offsets */
#define BFI_IOC_SMEM_PG0_CB (0x40)
@@ -256,7 +254,7 @@ struct bfi_ioc_fwver {
u8 build;
u8 rsvd[2];
#endif
-};
+} __packed;
struct bfi_ioc_image_hdr {
u32 signature; /*!< constant signature */
@@ -269,7 +267,7 @@ struct bfi_ioc_image_hdr {
u32 rsvd_b[2];
struct bfi_ioc_fwver fwver;
u32 md5sum[BFI_IOC_MD5SUM_SZ];
-};
+} __packed;
enum bfi_ioc_img_ver_cmp {
BFI_IOC_IMG_VER_INCOMP,
@@ -301,7 +299,7 @@ enum bfi_port_mode {
struct bfi_ioc_hbeat {
struct bfi_mhdr mh; /*!< common msg header */
u32 hb_count; /*!< current heart beat count */
-};
+} __packed;
/* IOC hardware/firmware state */
enum bfi_ioc_state {
@@ -317,8 +315,6 @@ enum bfi_ioc_state {
BFI_IOC_MEMTEST = 9, /*!< IOC is doing memtest */
};
-#define BFI_IOC_ENDIAN_SIG 0x12345678
-
enum {
BFI_ADAPTER_TYPE_FC = 0x01, /*!< FC adapters */
BFI_ADAPTER_TYPE_MK = 0x0f0000, /*!< adapter type mask */
@@ -337,12 +333,6 @@ enum {
BFI_ADAPTER_ ## __prop ## _SH)
#define BFI_ADAPTER_SETP(__prop, __val) \
((__val) << BFI_ADAPTER_ ## __prop ## _SH)
-#define BFI_ADAPTER_IS_PROTO(__adap_type) \
- ((__adap_type) & BFI_ADAPTER_PROTO)
-#define BFI_ADAPTER_IS_TTV(__adap_type) \
- ((__adap_type) & BFI_ADAPTER_TTV)
-#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
- ((__adap_type) & BFI_ADAPTER_UNSUPP)
#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
BFI_ADAPTER_UNSUPP))
@@ -353,7 +343,7 @@ struct bfi_ioc_ctrl_req {
u16 clscode;
u16 rsvd;
u32 tv_sec;
-};
+} __packed;
/* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages */
struct bfi_ioc_ctrl_reply {
@@ -362,7 +352,7 @@ struct bfi_ioc_ctrl_reply {
u8 port_mode; /*!< enum bfa_mode */
u8 cap_bm; /*!< capability bit mask */
u8 rsvd;
-};
+} __packed;
#define BFI_IOC_MSGSZ 8
/* H2I Messages */
@@ -372,14 +362,14 @@ union bfi_ioc_h2i_msg_u {
struct bfi_ioc_ctrl_req disable_req;
struct bfi_ioc_getattr_req getattr_req;
u32 mboxmsg[BFI_IOC_MSGSZ];
-};
+} __packed;
/* I2H Messages */
union bfi_ioc_i2h_msg_u {
struct bfi_mhdr mh;
struct bfi_ioc_ctrl_reply fw_event;
u32 mboxmsg[BFI_IOC_MSGSZ];
-};
+} __packed;
/*----------------------------------------------------------------------
* MSGQ
@@ -408,7 +398,7 @@ struct bfi_msgq_mhdr {
u16 num_entries;
u8 enet_id;
u8 rsvd[1];
-};
+} __packed;
#define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do { \
(_mh).msg_class = (_mc); \
@@ -430,21 +420,21 @@ struct bfi_msgq {
union bfi_addr_u addr;
u16 q_depth; /* Total num of entries in the queue */
u8 rsvd[2];
-};
+} __packed;
/* BFI_ENET_MSGQ_CFG_REQ TBD init or cfg? */
struct bfi_msgq_cfg_req {
struct bfi_mhdr mh;
struct bfi_msgq cmdq;
struct bfi_msgq rspq;
-};
+} __packed;
/* BFI_ENET_MSGQ_CFG_RSP */
struct bfi_msgq_cfg_rsp {
struct bfi_mhdr mh;
u8 cmd_status;
u8 rsvd[3];
-};
+} __packed;
/* BFI_MSGQ_H2I_DOORBELL */
struct bfi_msgq_h2i_db {
@@ -452,8 +442,8 @@ struct bfi_msgq_h2i_db {
union {
u16 cmdq_pi;
u16 rspq_ci;
- } idx;
-};
+ } __packed idx;
+} __packed;
/* BFI_MSGQ_I2H_DOORBELL */
struct bfi_msgq_i2h_db {
@@ -461,8 +451,8 @@ struct bfi_msgq_i2h_db {
union {
u16 rspq_pi;
u16 cmdq_ci;
- } idx;
-};
+ } __packed idx;
+} __packed;
#define BFI_CMD_COPY_SZ 28
@@ -470,14 +460,14 @@ struct bfi_msgq_i2h_db {
struct bfi_msgq_h2i_cmdq_copy_rsp {
struct bfi_mhdr mh;
u8 data[BFI_CMD_COPY_SZ];
-};
+} __packed;
/* BFI_MSGQ_I2H_CMD_COPY_REQ */
struct bfi_msgq_i2h_cmdq_copy_req {
struct bfi_mhdr mh;
u16 offset;
u16 len;
-};
+} __packed;
/*
* FLASH module specific
@@ -505,7 +495,7 @@ enum bfi_flash_i2h_msgs {
struct bfi_flash_query_req {
struct bfi_mhdr mh; /* Common msg header */
struct bfi_alen alen;
-};
+} __packed;
/*
* Flash write request
@@ -519,7 +509,7 @@ struct bfi_flash_write_req {
u8 rsv[2];
u32 offset;
u32 length;
-};
+} __packed;
/*
* Flash read request
@@ -532,7 +522,7 @@ struct bfi_flash_read_req {
u32 offset;
u32 length;
struct bfi_alen alen;
-};
+} __packed;
/*
* Flash query response
@@ -540,7 +530,7 @@ struct bfi_flash_read_req {
struct bfi_flash_query_rsp {
struct bfi_mhdr mh; /* Common msg header */
u32 status;
-};
+} __packed;
/*
* Flash read response
@@ -552,7 +542,7 @@ struct bfi_flash_read_rsp {
u8 rsv[3];
u32 status;
u32 length;
-};
+} __packed;
/*
* Flash write response
@@ -564,8 +554,6 @@ struct bfi_flash_write_rsp {
u8 rsv[3];
u32 status;
u32 length;
-};
-
-#pragma pack()
+} __packed;
#endif /* __BFI_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h
index bd605bee7..fad651101 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h
@@ -22,8 +22,6 @@
#include "bfi.h"
#include "bfa_defs_cna.h"
-#pragma pack(1)
-
enum bfi_port_h2i {
BFI_PORT_H2I_ENABLE_REQ = (1),
BFI_PORT_H2I_DISABLE_REQ = (2),
@@ -43,7 +41,7 @@ struct bfi_port_generic_req {
struct bfi_mhdr mh; /*!< msg header */
u32 msgtag; /*!< msgtag for reply */
u32 rsvd;
-};
+} __packed;
/* Generic RSP type */
struct bfi_port_generic_rsp {
@@ -51,13 +49,13 @@ struct bfi_port_generic_rsp {
u8 status; /*!< port enable status */
u8 rsvd[3];
u32 msgtag; /*!< msgtag for reply */
-};
+} __packed;
/* BFI_PORT_H2I_GET_STATS_REQ */
struct bfi_port_get_stats_req {
struct bfi_mhdr mh; /*!< common msg header */
union bfi_addr_u dma_addr;
-};
+} __packed;
union bfi_port_h2i_msg_u {
struct bfi_mhdr mh;
@@ -65,7 +63,7 @@ union bfi_port_h2i_msg_u {
struct bfi_port_generic_req disable_req;
struct bfi_port_get_stats_req getstats_req;
struct bfi_port_generic_req clearstats_req;
-};
+} __packed;
union bfi_port_i2h_msg_u {
struct bfi_mhdr mh;
@@ -73,7 +71,7 @@ union bfi_port_i2h_msg_u {
struct bfi_port_generic_rsp disable_rsp;
struct bfi_port_generic_rsp getstats_rsp;
struct bfi_port_generic_rsp clearstats_rsp;
-};
+} __packed;
/* @brief Mailbox commands from host to (DCBX/LLDP) firmware */
enum bfi_cee_h2i_msgs {
@@ -97,7 +95,7 @@ enum bfi_cee_i2h_msgs {
*/
struct bfi_lldp_reset_stats {
struct bfi_mhdr mh;
-};
+} __packed;
/*
* @brief H2I command structure for resetting the stats.
@@ -105,7 +103,7 @@ struct bfi_lldp_reset_stats {
*/
struct bfi_cee_reset_stats {
struct bfi_mhdr mh;
-};
+} __packed;
/*
* @brief get configuration command from host
@@ -114,7 +112,7 @@ struct bfi_cee_reset_stats {
struct bfi_cee_get_req {
struct bfi_mhdr mh;
union bfi_addr_u dma_addr;
-};
+} __packed;
/*
* @brief reply message from firmware
@@ -124,7 +122,7 @@ struct bfi_cee_get_rsp {
struct bfi_mhdr mh;
u8 cmd_status;
u8 rsvd[3];
-};
+} __packed;
/*
* @brief get configuration command from host
@@ -133,7 +131,7 @@ struct bfi_cee_get_rsp {
struct bfi_cee_stats_req {
struct bfi_mhdr mh;
union bfi_addr_u dma_addr;
-};
+} __packed;
/*
* @brief reply message from firmware
@@ -143,22 +141,20 @@ struct bfi_cee_stats_rsp {
struct bfi_mhdr mh;
u8 cmd_status;
u8 rsvd[3];
-};
+} __packed;
/* @brief mailbox command structures from host to firmware */
union bfi_cee_h2i_msg_u {
struct bfi_mhdr mh;
struct bfi_cee_get_req get_req;
struct bfi_cee_stats_req stats_req;
-};
+} __packed;
/* @brief mailbox message structures from firmware to host */
union bfi_cee_i2h_msg_u {
struct bfi_mhdr mh;
struct bfi_cee_get_rsp get_rsp;
struct bfi_cee_stats_rsp stats_rsp;
-};
-
-#pragma pack()
+} __packed;
#endif /* __BFI_CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index bccca3bba..d7be7ea8c 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -36,8 +36,6 @@
#include "bfa_defs.h"
#include "bfi.h"
-#pragma pack(1)
-
#define BFI_ENET_CFG_MAX 32 /* Max resources per PF */
#define BFI_ENET_TXQ_PRIO_MAX 8
@@ -59,8 +57,8 @@ union bfi_addr_be_u {
struct {
u32 addr_hi; /* Most Significant 32-bits */
u32 addr_lo; /* Least Significant 32-Bits */
- } a32;
-};
+ } __packed a32;
+} __packed;
/* T X Q U E U E D E F I N E S */
/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
@@ -70,13 +68,13 @@ union bfi_addr_be_u {
#define BFI_ENET_TXQ_WI_EXTENSION (0x104) /* Extension WI */
/* TxQ Entry Control Flags */
-#define BFI_ENET_TXQ_WI_CF_FCOE_CRC (1 << 8)
-#define BFI_ENET_TXQ_WI_CF_IPID_MODE (1 << 5)
-#define BFI_ENET_TXQ_WI_CF_INS_PRIO (1 << 4)
-#define BFI_ENET_TXQ_WI_CF_INS_VLAN (1 << 3)
-#define BFI_ENET_TXQ_WI_CF_UDP_CKSUM (1 << 2)
-#define BFI_ENET_TXQ_WI_CF_TCP_CKSUM (1 << 1)
-#define BFI_ENET_TXQ_WI_CF_IP_CKSUM (1 << 0)
+#define BFI_ENET_TXQ_WI_CF_FCOE_CRC BIT(8)
+#define BFI_ENET_TXQ_WI_CF_IPID_MODE BIT(5)
+#define BFI_ENET_TXQ_WI_CF_INS_PRIO BIT(4)
+#define BFI_ENET_TXQ_WI_CF_INS_VLAN BIT(3)
+#define BFI_ENET_TXQ_WI_CF_UDP_CKSUM BIT(2)
+#define BFI_ENET_TXQ_WI_CF_TCP_CKSUM BIT(1)
+#define BFI_ENET_TXQ_WI_CF_IP_CKSUM BIT(0)
struct bfi_enet_txq_wi_base {
u8 reserved;
@@ -88,28 +86,28 @@ struct bfi_enet_txq_wi_base {
u16 vlan_tag;
u16 lso_mss; /* Only 14 LSB are valid */
u32 frame_length; /* Only 24 LSB are valid */
-};
+} __packed;
struct bfi_enet_txq_wi_ext {
u16 reserved;
u16 opcode; /* BFI_ENET_TXQ_WI_EXTENSION */
u32 reserved2[3];
-};
+} __packed;
struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */
u16 reserved;
u16 length; /* Only 14 LSB are valid */
union bfi_addr_be_u addr;
-};
+} __packed;
/* TxQ Entry Structure */
struct bfi_enet_txq_entry {
union {
struct bfi_enet_txq_wi_base base;
struct bfi_enet_txq_wi_ext ext;
- } wi;
+ } __packed wi;
struct bfi_enet_txq_wi_vector vector[BFI_ENET_TXQ_WI_VECT_MAX];
-};
+} __packed;
#define wi_hdr wi.base
#define wi_ext_hdr wi.ext
@@ -120,36 +118,36 @@ struct bfi_enet_txq_entry {
/* R X Q U E U E D E F I N E S */
struct bfi_enet_rxq_entry {
union bfi_addr_be_u rx_buffer;
-};
+} __packed;
/* R X C O M P L E T I O N Q U E U E D E F I N E S */
/* CQ Entry Flags */
-#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0)
-#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1)
-#define BFI_ENET_CQ_EF_TOO_LONG (1 << 2)
-#define BFI_ENET_CQ_EF_FC_CRC_OK (1 << 3)
+#define BFI_ENET_CQ_EF_MAC_ERROR BIT(0)
+#define BFI_ENET_CQ_EF_FCS_ERROR BIT(1)
+#define BFI_ENET_CQ_EF_TOO_LONG BIT(2)
+#define BFI_ENET_CQ_EF_FC_CRC_OK BIT(3)
-#define BFI_ENET_CQ_EF_RSVD1 (1 << 4)
-#define BFI_ENET_CQ_EF_L4_CKSUM_OK (1 << 5)
-#define BFI_ENET_CQ_EF_L3_CKSUM_OK (1 << 6)
-#define BFI_ENET_CQ_EF_HDS_HEADER (1 << 7)
+#define BFI_ENET_CQ_EF_RSVD1 BIT(4)
+#define BFI_ENET_CQ_EF_L4_CKSUM_OK BIT(5)
+#define BFI_ENET_CQ_EF_L3_CKSUM_OK BIT(6)
+#define BFI_ENET_CQ_EF_HDS_HEADER BIT(7)
-#define BFI_ENET_CQ_EF_UDP (1 << 8)
-#define BFI_ENET_CQ_EF_TCP (1 << 9)
-#define BFI_ENET_CQ_EF_IP_OPTIONS (1 << 10)
-#define BFI_ENET_CQ_EF_IPV6 (1 << 11)
+#define BFI_ENET_CQ_EF_UDP BIT(8)
+#define BFI_ENET_CQ_EF_TCP BIT(9)
+#define BFI_ENET_CQ_EF_IP_OPTIONS BIT(10)
+#define BFI_ENET_CQ_EF_IPV6 BIT(11)
-#define BFI_ENET_CQ_EF_IPV4 (1 << 12)
-#define BFI_ENET_CQ_EF_VLAN (1 << 13)
-#define BFI_ENET_CQ_EF_RSS (1 << 14)
-#define BFI_ENET_CQ_EF_RSVD2 (1 << 15)
+#define BFI_ENET_CQ_EF_IPV4 BIT(12)
+#define BFI_ENET_CQ_EF_VLAN BIT(13)
+#define BFI_ENET_CQ_EF_RSS BIT(14)
+#define BFI_ENET_CQ_EF_RSVD2 BIT(15)
-#define BFI_ENET_CQ_EF_MCAST_MATCH (1 << 16)
-#define BFI_ENET_CQ_EF_MCAST (1 << 17)
-#define BFI_ENET_CQ_EF_BCAST (1 << 18)
-#define BFI_ENET_CQ_EF_REMOTE (1 << 19)
+#define BFI_ENET_CQ_EF_MCAST_MATCH BIT(16)
+#define BFI_ENET_CQ_EF_MCAST BIT(17)
+#define BFI_ENET_CQ_EF_BCAST BIT(18)
+#define BFI_ENET_CQ_EF_REMOTE BIT(19)
-#define BFI_ENET_CQ_EF_LOCAL (1 << 20)
+#define BFI_ENET_CQ_EF_LOCAL BIT(20)
/* CQ Entry Structure */
struct bfi_enet_cq_entry {
@@ -161,7 +159,7 @@ struct bfi_enet_cq_entry {
u8 reserved1;
u8 reserved2;
u8 rxq_id;
-};
+} __packed;
/* E N E T C O N T R O L P A T H C O M M A N D S */
struct bfi_enet_q {
@@ -169,23 +167,23 @@ struct bfi_enet_q {
union bfi_addr_u first_entry;
u16 pages; /* # of pages */
u16 page_sz;
-};
+} __packed;
struct bfi_enet_txq {
struct bfi_enet_q q;
u8 priority;
u8 rsvd[3];
-};
+} __packed;
struct bfi_enet_rxq {
struct bfi_enet_q q;
u16 rx_buffer_size;
u16 rsvd;
-};
+} __packed;
struct bfi_enet_cq {
struct bfi_enet_q q;
-};
+} __packed;
struct bfi_enet_ib_cfg {
u8 int_pkt_dma;
@@ -198,16 +196,16 @@ struct bfi_enet_ib_cfg {
u32 inter_pkt_timeout;
u8 inter_pkt_count;
u8 rsvd1[3];
-};
+} __packed;
struct bfi_enet_ib {
union bfi_addr_u index_addr;
union {
u16 msix_index;
u16 intx_bitmask;
- } intr;
+ } __packed intr;
u16 rsvd;
-};
+} __packed;
/* ENET command messages */
enum bfi_enet_h2i_msgs {
@@ -355,7 +353,7 @@ enum bfi_enet_err {
*/
struct bfi_enet_req {
struct bfi_msgq_mhdr mh;
-};
+} __packed;
/* Enable/Disable Request
*
@@ -370,7 +368,7 @@ struct bfi_enet_enable_req {
struct bfi_msgq_mhdr mh;
u8 enable; /* 1 = enable; 0 = disable */
u8 rsvd[3];
-};
+} __packed;
/* Generic Response */
struct bfi_enet_rsp {
@@ -378,7 +376,7 @@ struct bfi_enet_rsp {
u8 error; /*!< if error see cmd_offset */
u8 rsvd;
u16 cmd_offset; /*!< offset to invalid parameter */
-};
+} __packed;
/* GLOBAL CONFIGURATION */
@@ -387,7 +385,7 @@ struct bfi_enet_rsp {
*/
struct bfi_enet_attr_req {
struct bfi_msgq_mhdr mh;
-};
+} __packed;
/* bfi_enet_attr_rsp is used by:
* BFI_ENET_I2H_GET_ATTR_RSP
@@ -400,7 +398,7 @@ struct bfi_enet_attr_rsp {
u32 max_cfg;
u32 max_ucmac;
u32 rit_size;
-};
+} __packed;
/* Tx Configuration
*
@@ -421,7 +419,7 @@ struct bfi_enet_tx_cfg {
u8 apply_vlan_filter;
u8 add_to_vswitch;
u8 rsvd1[1];
-};
+} __packed;
struct bfi_enet_tx_cfg_req {
struct bfi_msgq_mhdr mh;
@@ -431,7 +429,7 @@ struct bfi_enet_tx_cfg_req {
struct {
struct bfi_enet_txq q;
struct bfi_enet_ib ib;
- } q_cfg[BFI_ENET_TXQ_PRIO_MAX];
+ } __packed q_cfg[BFI_ENET_TXQ_PRIO_MAX];
struct bfi_enet_ib_cfg ib_cfg;
@@ -448,7 +446,7 @@ struct bfi_enet_tx_cfg_rsp {
u32 i_dbell; /* PCI base address offset */
u8 hw_qid; /* For debugging */
u8 rsvd[3];
- } q_handles[BFI_ENET_TXQ_PRIO_MAX];
+ } __packed q_handles[BFI_ENET_TXQ_PRIO_MAX];
};
/* Rx Configuration
@@ -481,13 +479,13 @@ struct bfi_enet_rx_cfg {
u8 force_offset;
u8 type;
u8 rsvd1;
- } hds;
+ } __packed hds;
u8 multi_buffer;
u8 strip_vlan;
u8 drop_untagged;
u8 rsvd2;
-};
+} __packed;
/*
* Multicast frames are received on the ql of q-set index zero.
@@ -504,12 +502,12 @@ struct bfi_enet_rx_cfg_req {
struct bfi_enet_rxq qs; /* small/header buffers */
struct bfi_enet_cq cq;
struct bfi_enet_ib ib;
- } q_cfg[BFI_ENET_RX_QSET_MAX];
+ } __packed q_cfg[BFI_ENET_RX_QSET_MAX];
struct bfi_enet_ib_cfg ib_cfg;
struct bfi_enet_rx_cfg rx_cfg;
-};
+} __packed;
struct bfi_enet_rx_cfg_rsp {
struct bfi_msgq_mhdr mh;
@@ -524,8 +522,8 @@ struct bfi_enet_rx_cfg_rsp {
u8 hw_sqid; /* For debugging */
u8 hw_cqid; /* For debugging */
u8 rsvd;
- } q_handles[BFI_ENET_RX_QSET_MAX];
-};
+ } __packed q_handles[BFI_ENET_RX_QSET_MAX];
+} __packed;
/* RIT
*
@@ -537,7 +535,7 @@ struct bfi_enet_rit_req {
u16 size; /* number of table-entries used */
u8 rsvd[2];
u8 table[BFI_ENET_RSS_RIT_MAX];
-};
+} __packed;
/* RSS
*
@@ -556,12 +554,12 @@ struct bfi_enet_rss_cfg {
u8 mask;
u8 rsvd[2];
u32 key[BFI_ENET_RSS_KEY_LEN];
-};
+} __packed;
struct bfi_enet_rss_cfg_req {
struct bfi_msgq_mhdr mh;
struct bfi_enet_rss_cfg cfg;
-};
+} __packed;
/* MAC Unicast
*
@@ -573,16 +571,16 @@ struct bfi_enet_rss_cfg_req {
*/
struct bfi_enet_ucast_req {
struct bfi_msgq_mhdr mh;
- mac_t mac_addr;
+ u8 mac_addr[ETH_ALEN];
u8 rsvd[2];
-};
+} __packed;
/* MAC Unicast + VLAN */
struct bfi_enet_mac_n_vlan_req {
struct bfi_msgq_mhdr mh;
u16 vlan_id;
- mac_t mac_addr;
-};
+ u8 mac_addr[ETH_ALEN];
+} __packed;
/* MAC Multicast
*
@@ -591,9 +589,9 @@ struct bfi_enet_mac_n_vlan_req {
*/
struct bfi_enet_mcast_add_req {
struct bfi_msgq_mhdr mh;
- mac_t mac_addr;
+ u8 mac_addr[ETH_ALEN];
u8 rsvd[2];
-};
+} __packed;
/* bfi_enet_mac_mfilter_add_rsp is used by:
* BFI_ENET_I2H_MAC_MCAST_ADD_RSP
@@ -605,7 +603,7 @@ struct bfi_enet_mcast_add_rsp {
u16 cmd_offset;
u16 handle;
u8 rsvd1[2];
-};
+} __packed;
/* bfi_enet_mac_mfilter_del_req is used by:
* BFI_ENET_H2I_MAC_MCAST_DEL_REQ
@@ -614,7 +612,7 @@ struct bfi_enet_mcast_del_req {
struct bfi_msgq_mhdr mh;
u16 handle;
u8 rsvd[2];
-};
+} __packed;
/* VLAN
*
@@ -626,7 +624,7 @@ struct bfi_enet_rx_vlan_req {
u8 block_idx;
u8 rsvd[3];
u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX];
-};
+} __packed;
/* PAUSE
*
@@ -638,7 +636,7 @@ struct bfi_enet_set_pause_req {
u8 rsvd[2];
u8 tx_pause; /* 1 = enable; 0 = disable */
u8 rx_pause; /* 1 = enable; 0 = disable */
-};
+} __packed;
/* DIAGNOSTICS
*
@@ -650,7 +648,7 @@ struct bfi_enet_diag_lb_req {
u8 rsvd[2];
u8 mode; /* cable or Serdes */
u8 enable; /* 1 = enable; 0 = disable */
-};
+} __packed;
/* enum for Loopback opmodes */
enum {
@@ -671,14 +669,14 @@ struct bfi_enet_stats_req {
u32 rx_enet_mask;
u32 tx_enet_mask;
union bfi_addr_u host_buffer;
-};
+} __packed;
/* defines for "stats_mask" above. */
-#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */
-#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
-#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
-#define BFI_ENET_STATS_RX_FC (1 << 3) /* !< Rx FC Stats from RxA */
-#define BFI_ENET_STATS_TX_FC (1 << 4) /* !< Tx FC Stats from TxA */
+#define BFI_ENET_STATS_MAC BIT(0) /* !< MAC Statistics */
+#define BFI_ENET_STATS_BPC BIT(1) /* !< Pause Stats from BPC */
+#define BFI_ENET_STATS_RAD BIT(2) /* !< Rx Admission Statistics */
+#define BFI_ENET_STATS_RX_FC BIT(3) /* !< Rx FC Stats from RxA */
+#define BFI_ENET_STATS_TX_FC BIT(4) /* !< Tx FC Stats from TxA */
#define BFI_ENET_STATS_ALL 0x1f
@@ -699,7 +697,7 @@ struct bfi_enet_stats_txf {
u64 errors;
u64 filter_vlan; /* frames filtered due to VLAN */
u64 filter_mac_sa; /* frames filtered due to SA check */
-};
+} __packed;
/* RxF Frame Statistics */
struct bfi_enet_stats_rxf {
@@ -715,7 +713,7 @@ struct bfi_enet_stats_rxf {
u64 bcast;
u64 bcast_vlan;
u64 frame_drops;
-};
+} __packed;
/* FC Tx Frame Statistics */
struct bfi_enet_stats_fc_tx {
@@ -734,7 +732,7 @@ struct bfi_enet_stats_fc_tx {
u64 txf_parity_errors;
u64 txf_timeout;
u64 txf_fid_parity_errors;
-};
+} __packed;
/* FC Rx Frame Statistics */
struct bfi_enet_stats_fc_rx {
@@ -749,7 +747,7 @@ struct bfi_enet_stats_fc_rx {
u64 rxf_bcast_octets;
u64 rxf_bcast;
u64 rxf_bcast_vlan;
-};
+} __packed;
/* RAD Frame Statistics */
struct bfi_enet_stats_rad {
@@ -770,7 +768,7 @@ struct bfi_enet_stats_rad {
u64 rx_bcast_vlan;
u64 rx_drops;
-};
+} __packed;
/* BPC Tx Registers */
struct bfi_enet_stats_bpc {
@@ -785,7 +783,7 @@ struct bfi_enet_stats_bpc {
u64 rx_zero_pause[8]; /*!< Pause cancellation */
/*!<Pause initiation rather than retention */
u64 rx_first_pause[8];
-};
+} __packed;
/* MAC Rx Statistics */
struct bfi_enet_stats_mac {
@@ -838,7 +836,7 @@ struct bfi_enet_stats_mac {
u64 tx_oversize;
u64 tx_undersize;
u64 tx_fragments;
-};
+} __packed;
/* Complete statistics, DMAed from fw to host followed by
* BFI_ENET_I2H_STATS_GET_RSP
@@ -852,8 +850,6 @@ struct bfi_enet_stats {
struct bfi_enet_stats_fc_tx fc_tx_stats;
struct bfi_enet_stats_rxf rxf_stats[BFI_ENET_CFG_MAX];
struct bfi_enet_stats_txf txf_stats[BFI_ENET_CFG_MAX];
-};
-
-#pragma pack()
+} __packed;
#endif /* __BFI_ENET_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index 8ba72b1f3..006dcad9a 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -28,36 +28,8 @@ extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
/* Macros and constants */
-#define BNA_IOC_TIMER_FREQ 200
-
-/* Log string size */
-#define BNA_MESSAGE_SIZE 256
-
#define bna_is_small_rxq(_id) ((_id) & 0x1)
-#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
- (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
-
-#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
-
-#define BNA_TO_POWER_OF_2(x) \
-do { \
- int _shift = 0; \
- while ((x) && (x) != 1) { \
- (x) >>= 1; \
- _shift++; \
- } \
- (x) <<= _shift; \
-} while (0)
-
-#define BNA_TO_POWER_OF_2_HIGH(x) \
-do { \
- int n = 1; \
- while (n < (x)) \
- n <<= 1; \
- (x) = n; \
-} while (0)
-
/*
* input : _addr-> os dma addr in host endian format,
* output : _bna_dma_addr-> pointer to hw dma addr
@@ -80,62 +52,8 @@ do { \
| ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
} while (0)
-#define containing_rec(addr, type, field) \
- ((type *)((unsigned char *)(addr) - \
- (unsigned char *)(&((type *)0)->field)))
-
#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
-/* TxQ element is 64 bytes */
-#define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6)
-#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6)
-
-#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
-{ \
- unsigned int page_index; /* index within a page */ \
- void *page_addr; \
- page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
- (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
- page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
- (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
-}
-
-/* RxQ element is 8 bytes */
-#define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3)
-#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3)
-
-#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
-{ \
- unsigned int page_index; /* index within a page */ \
- void *page_addr; \
- page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \
- (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \
- page_addr = (_qpt_ptr)[((_qe_idx) >> \
- BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \
- (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
-}
-
-/* CQ element is 16 bytes */
-#define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4)
-#define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4)
-
-#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
-{ \
- unsigned int page_index; /* index within a page */ \
- void *page_addr; \
- \
- page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \
- (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \
- page_addr = (_qpt_ptr)[((_qe_idx) >> \
- BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \
- (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
-}
-
-#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \
- (&((_cast *)(_q_base))[(_qe_idx)])
-
-#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
-
#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
@@ -147,31 +65,10 @@ do { \
#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \
(((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \
((_q_depth) - 1))
-
#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \
((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \
(_q_depth - 1))
-#define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index)
-
-#define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index)
-
-#define BNA_Q_PI_ADD(_q_ptr, _num) \
- (_q_ptr)->q.producer_index = \
- (((_q_ptr)->q.producer_index + (_num)) & \
- ((_q_ptr)->q.q_depth - 1))
-
-#define BNA_Q_CI_ADD(_q_ptr, _num) \
- (_q_ptr)->q.consumer_index = \
- (((_q_ptr)->q.consumer_index + (_num)) \
- & ((_q_ptr)->q.q_depth - 1))
-
-#define BNA_Q_FREE_COUNT(_q_ptr) \
- (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
-
-#define BNA_Q_IN_USE_COUNT(_q_ptr) \
- (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
-
#define BNA_LARGE_PKT_SIZE 1000
#define BNA_UPDATE_PKT_CNT(_pkt, _len) \
@@ -222,21 +119,6 @@ do { \
} \
} while (0)
-#define call_rxf_pause_cbfn(rxf) \
-do { \
- if ((rxf)->oper_state_cbfn) { \
- void (*cbfn)(struct bnad *, struct bna_rx *); \
- struct bnad *cbarg; \
- cbfn = (rxf)->oper_state_cbfn; \
- cbarg = (rxf)->oper_state_cbarg; \
- (rxf)->oper_state_cbfn = NULL; \
- (rxf)->oper_state_cbarg = NULL; \
- cbfn(cbarg, rxf->rx); \
- } \
-} while (0)
-
-#define call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf)
-
#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
@@ -326,28 +208,24 @@ do { \
#define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask)
#define bna_tx_from_rid(_bna, _rid, _tx) \
-do { \
- struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \
- struct bna_tx *__tx; \
- struct list_head *qe; \
- _tx = NULL; \
- list_for_each(qe, &__tx_mod->tx_active_q) { \
- __tx = (struct bna_tx *)qe; \
- if (__tx->rid == (_rid)) { \
- (_tx) = __tx; \
- break; \
- } \
- } \
+do { \
+ struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \
+ struct bna_tx *__tx; \
+ _tx = NULL; \
+ list_for_each_entry(__tx, &__tx_mod->tx_active_q, qe) { \
+ if (__tx->rid == (_rid)) { \
+ (_tx) = __tx; \
+ break; \
+ } \
+ } \
} while (0)
#define bna_rx_from_rid(_bna, _rid, _rx) \
do { \
struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod; \
struct bna_rx *__rx; \
- struct list_head *qe; \
_rx = NULL; \
- list_for_each(qe, &__rx_mod->rx_active_q) { \
- __rx = (struct bna_rx *)qe; \
+ list_for_each_entry(__rx, &__rx_mod->rx_active_q, qe) { \
if (__rx->rid == (_rid)) { \
(_rx) = __rx; \
break; \
@@ -365,17 +243,14 @@ do { \
/* Inline functions */
-static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
+static inline struct bna_mac *bna_mac_find(struct list_head *q, const u8 *addr)
{
- struct bna_mac *mac = NULL;
- struct list_head *qe;
- list_for_each(qe, q) {
- if (BNA_MAC_IS_EQUAL(((struct bna_mac *)qe)->addr, addr)) {
- mac = (struct bna_mac *)qe;
- break;
- }
- }
- return mac;
+ struct bna_mac *mac;
+
+ list_for_each_entry(mac, q, qe)
+ if (ether_addr_equal(mac->addr, addr))
+ return mac;
+ return NULL;
}
#define bna_attr(_bna) (&(_bna)->ioceth.attr)
@@ -401,7 +276,6 @@ void bna_hw_stats_get(struct bna *bna);
/* APIs for RxF */
struct bna_mac *bna_cam_mod_mac_get(struct list_head *head);
-void bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac);
struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
struct bna_mcam_handle *handle);
@@ -488,31 +362,19 @@ void bna_rx_cleanup_complete(struct bna_rx *rx);
void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
void bna_rx_dim_update(struct bna_ccb *ccb);
-enum bna_cb_status
-bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
- void (*cbfn)(struct bnad *, struct bna_rx *));
-enum bna_cb_status
-bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
- void (*cbfn)(struct bnad *, struct bna_rx *));
-enum bna_cb_status
-bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
- void (*cbfn)(struct bnad *, struct bna_rx *));
-enum bna_cb_status
-bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
- void (*cbfn)(struct bnad *, struct bna_rx *));
-enum bna_cb_status
-bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
- void (*cbfn)(struct bnad *, struct bna_rx *));
-enum bna_cb_status
-bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
- void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status bna_rx_ucast_set(struct bna_rx *rx, const u8 *ucmac);
+enum bna_cb_status bna_rx_ucast_listset(struct bna_rx *rx, int count,
+ const u8 *uclist);
+enum bna_cb_status bna_rx_mcast_add(struct bna_rx *rx, const u8 *mcmac,
+ void (*cbfn)(struct bnad *,
+ struct bna_rx *));
+enum bna_cb_status bna_rx_mcast_listset(struct bna_rx *rx, int count,
+ const u8 *mcmac);
void
-bna_rx_mcast_delall(struct bna_rx *rx,
- void (*cbfn)(struct bnad *, struct bna_rx *));
+bna_rx_mcast_delall(struct bna_rx *rx);
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
- enum bna_rxmode bitmask,
- void (*cbfn)(struct bnad *, struct bna_rx *));
+ enum bna_rxmode bitmask);
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
@@ -532,11 +394,10 @@ void bna_enet_enable(struct bna_enet *enet);
void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
void (*cbfn)(void *));
void bna_enet_pause_config(struct bna_enet *enet,
- struct bna_pause_config *pause_config,
- void (*cbfn)(struct bnad *));
+ struct bna_pause_config *pause_config);
void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
void (*cbfn)(struct bnad *));
-void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
+void bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac);
/* IOCETH */
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index deb8da6ab..4e5c3874a 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -207,7 +207,7 @@ bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
- if (rx_enet_mask & ((u32)(1 << i))) {
+ if (rx_enet_mask & BIT(i)) {
int k;
count = sizeof(struct bfi_enet_stats_rxf) /
sizeof(u64);
@@ -222,7 +222,7 @@ bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
- if (tx_enet_mask & ((u32)(1 << i))) {
+ if (tx_enet_mask & BIT(i)) {
int k;
count = sizeof(struct bfi_enet_stats_txf) /
sizeof(u64);
@@ -884,16 +884,6 @@ do { \
} \
} while (0)
-#define call_enet_pause_cbfn(enet) \
-do { \
- if ((enet)->pause_cbfn) { \
- void (*cbfn)(struct bnad *); \
- cbfn = (enet)->pause_cbfn; \
- (enet)->pause_cbfn = NULL; \
- cbfn((enet)->bna->bnad); \
- } \
-} while (0)
-
#define call_enet_mtu_cbfn(enet) \
do { \
if ((enet)->mtu_cbfn) { \
@@ -925,7 +915,6 @@ bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
static void
bna_enet_sm_stopped_entry(struct bna_enet *enet)
{
- call_enet_pause_cbfn(enet);
call_enet_mtu_cbfn(enet);
call_enet_stop_cbfn(enet);
}
@@ -947,7 +936,6 @@ bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
break;
case ENET_E_PAUSE_CFG:
- call_enet_pause_cbfn(enet);
break;
case ENET_E_MTU_CFG:
@@ -1039,7 +1027,6 @@ bna_enet_sm_started_entry(struct bna_enet *enet)
* NOTE: Do not call bna_enet_chld_start() here, since it will be
* inadvertently called during cfg_wait->started transition as well
*/
- call_enet_pause_cbfn(enet);
call_enet_mtu_cbfn(enet);
}
@@ -1211,8 +1198,6 @@ bna_enet_init(struct bna_enet *enet, struct bna *bna)
enet->stop_cbfn = NULL;
enet->stop_cbarg = NULL;
- enet->pause_cbfn = NULL;
-
enet->mtu_cbfn = NULL;
bfa_fsm_set_state(enet, bna_enet_sm_stopped);
@@ -1308,13 +1293,10 @@ bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
void
bna_enet_pause_config(struct bna_enet *enet,
- struct bna_pause_config *pause_config,
- void (*cbfn)(struct bnad *))
+ struct bna_pause_config *pause_config)
{
enet->pause_config = *pause_config;
- enet->pause_cbfn = cbfn;
-
bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
}
@@ -1330,9 +1312,9 @@ bna_enet_mtu_set(struct bna_enet *enet, int mtu,
}
void
-bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
+bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac)
{
- *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
+ bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc, mac);
}
/* IOCETH */
@@ -1810,17 +1792,13 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
INIT_LIST_HEAD(&ucam_mod->free_q);
- for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
- bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+ for (i = 0; i < bna->ioceth.attr.num_ucmac; i++)
list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
- }
/* A separate queue to allow synchronous setting of a list of MACs */
INIT_LIST_HEAD(&ucam_mod->del_q);
- for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) {
- bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+ for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++)
list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
- }
ucam_mod->bna = bna;
}
@@ -1828,17 +1806,6 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
static void
bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
{
- struct list_head *qe;
- int i;
-
- i = 0;
- list_for_each(qe, &ucam_mod->free_q)
- i++;
-
- i = 0;
- list_for_each(qe, &ucam_mod->del_q)
- i++;
-
ucam_mod->bna = NULL;
}
@@ -1852,27 +1819,21 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
INIT_LIST_HEAD(&mcam_mod->free_q);
- for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
- bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+ for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
- }
mcam_mod->mchandle = (struct bna_mcam_handle *)
res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
INIT_LIST_HEAD(&mcam_mod->free_handle_q);
- for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
- bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
+ for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
list_add_tail(&mcam_mod->mchandle[i].qe,
- &mcam_mod->free_handle_q);
- }
+ &mcam_mod->free_handle_q);
/* A separate queue to allow synchronous setting of a list of MACs */
INIT_LIST_HEAD(&mcam_mod->del_q);
- for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) {
- bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+ for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++)
list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
- }
mcam_mod->bna = bna;
}
@@ -1880,18 +1841,6 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
static void
bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
{
- struct list_head *qe;
- int i;
-
- i = 0;
- list_for_each(qe, &mcam_mod->free_q) i++;
-
- i = 0;
- list_for_each(qe, &mcam_mod->del_q) i++;
-
- i = 0;
- list_for_each(qe, &mcam_mod->free_handle_q) i++;
-
mcam_mod->bna = NULL;
}
@@ -2108,32 +2057,26 @@ bna_num_rxp_set(struct bna *bna, int num_rxp)
struct bna_mac *
bna_cam_mod_mac_get(struct list_head *head)
{
- struct list_head *qe;
-
- if (list_empty(head))
- return NULL;
+ struct bna_mac *mac;
- bfa_q_deq(head, &qe);
- return (struct bna_mac *)qe;
-}
+ mac = list_first_entry_or_null(head, struct bna_mac, qe);
+ if (mac)
+ list_del(&mac->qe);
-void
-bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac)
-{
- list_add_tail(&mac->qe, tail);
+ return mac;
}
struct bna_mcam_handle *
bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
{
- struct list_head *qe;
-
- if (list_empty(&mcam_mod->free_handle_q))
- return NULL;
+ struct bna_mcam_handle *handle;
- bfa_q_deq(&mcam_mod->free_handle_q, &qe);
+ handle = list_first_entry_or_null(&mcam_mod->free_handle_q,
+ struct bna_mcam_handle, qe);
+ if (handle)
+ list_del(&handle->qe);
- return (struct bna_mcam_handle *)qe;
+ return handle;
}
void
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index 174af0e9d..52b45c993 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -213,7 +213,7 @@ do { \
* 15 bits (32K) should be large enough to accumulate, anyways, and the max.
* acked events to h/w can be (32K + max poll weight) (currently 64).
*/
-#define BNA_IB_MAX_ACK_EVENTS (1 << 15)
+#define BNA_IB_MAX_ACK_EVENTS BIT(15)
/* These macros build the data portion of the TxQ/RxQ doorbell */
#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
@@ -282,13 +282,13 @@ do { \
#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */
/* TxQ Entry Control Flags */
-#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
-#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
-#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
-#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
-#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
-#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
-#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
+#define BNA_TXQ_WI_CF_FCOE_CRC BIT(8)
+#define BNA_TXQ_WI_CF_IPID_MODE BIT(5)
+#define BNA_TXQ_WI_CF_INS_PRIO BIT(4)
+#define BNA_TXQ_WI_CF_INS_VLAN BIT(3)
+#define BNA_TXQ_WI_CF_UDP_CKSUM BIT(2)
+#define BNA_TXQ_WI_CF_TCP_CKSUM BIT(1)
+#define BNA_TXQ_WI_CF_IP_CKSUM BIT(0)
#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
(((_hdr_size) << 10) | ((_offset) & 0x3FF))
@@ -297,36 +297,36 @@ do { \
* Completion Q defines
*/
/* CQ Entry Flags */
-#define BNA_CQ_EF_MAC_ERROR (1 << 0)
-#define BNA_CQ_EF_FCS_ERROR (1 << 1)
-#define BNA_CQ_EF_TOO_LONG (1 << 2)
-#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
-
-#define BNA_CQ_EF_RSVD1 (1 << 4)
-#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5)
-#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6)
-#define BNA_CQ_EF_HDS_HEADER (1 << 7)
-
-#define BNA_CQ_EF_UDP (1 << 8)
-#define BNA_CQ_EF_TCP (1 << 9)
-#define BNA_CQ_EF_IP_OPTIONS (1 << 10)
-#define BNA_CQ_EF_IPV6 (1 << 11)
-
-#define BNA_CQ_EF_IPV4 (1 << 12)
-#define BNA_CQ_EF_VLAN (1 << 13)
-#define BNA_CQ_EF_RSS (1 << 14)
-#define BNA_CQ_EF_RSVD2 (1 << 15)
-
-#define BNA_CQ_EF_MCAST_MATCH (1 << 16)
-#define BNA_CQ_EF_MCAST (1 << 17)
-#define BNA_CQ_EF_BCAST (1 << 18)
-#define BNA_CQ_EF_REMOTE (1 << 19)
-
-#define BNA_CQ_EF_LOCAL (1 << 20)
+#define BNA_CQ_EF_MAC_ERROR BIT(0)
+#define BNA_CQ_EF_FCS_ERROR BIT(1)
+#define BNA_CQ_EF_TOO_LONG BIT(2)
+#define BNA_CQ_EF_FC_CRC_OK BIT(3)
+
+#define BNA_CQ_EF_RSVD1 BIT(4)
+#define BNA_CQ_EF_L4_CKSUM_OK BIT(5)
+#define BNA_CQ_EF_L3_CKSUM_OK BIT(6)
+#define BNA_CQ_EF_HDS_HEADER BIT(7)
+
+#define BNA_CQ_EF_UDP BIT(8)
+#define BNA_CQ_EF_TCP BIT(9)
+#define BNA_CQ_EF_IP_OPTIONS BIT(10)
+#define BNA_CQ_EF_IPV6 BIT(11)
+
+#define BNA_CQ_EF_IPV4 BIT(12)
+#define BNA_CQ_EF_VLAN BIT(13)
+#define BNA_CQ_EF_RSS BIT(14)
+#define BNA_CQ_EF_RSVD2 BIT(15)
+
+#define BNA_CQ_EF_MCAST_MATCH BIT(16)
+#define BNA_CQ_EF_MCAST BIT(17)
+#define BNA_CQ_EF_BCAST BIT(18)
+#define BNA_CQ_EF_REMOTE BIT(19)
+
+#define BNA_CQ_EF_LOCAL BIT(20)
/* CAT2 ASIC does not use bit 21 as per the SPEC.
* Bit 31 is set in every end of frame completion
*/
-#define BNA_CQ_EF_EOP (1 << 31)
+#define BNA_CQ_EF_EOP BIT(31)
/* Data structures */
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 8ab3a5f62..5d0753cc7 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -46,7 +46,6 @@ do { \
static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
-static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
@@ -60,14 +59,10 @@ static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
enum bna_rxf_event);
-bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
- enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
enum bna_rxf_event);
-bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
- enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
enum bna_rxf_event);
@@ -82,11 +77,7 @@ bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_START:
- if (rxf->flags & BNA_RXF_F_PAUSED) {
- bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
- call_rxf_start_cbfn(rxf);
- } else
- bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
break;
case RXF_E_STOP:
@@ -101,45 +92,6 @@ bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
call_rxf_cam_fltr_cbfn(rxf);
break;
- case RXF_E_PAUSE:
- rxf->flags |= BNA_RXF_F_PAUSED;
- call_rxf_pause_cbfn(rxf);
- break;
-
- case RXF_E_RESUME:
- rxf->flags &= ~BNA_RXF_F_PAUSED;
- call_rxf_resume_cbfn(rxf);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
-{
- call_rxf_pause_cbfn(rxf);
-}
-
-static void
-bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
-{
- switch (event) {
- case RXF_E_STOP:
- case RXF_E_FAIL:
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
-
- case RXF_E_CONFIG:
- call_rxf_cam_fltr_cbfn(rxf);
- break;
-
- case RXF_E_RESUME:
- rxf->flags &= ~BNA_RXF_F_PAUSED;
- bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
- break;
-
default:
bfa_sm_fault(event);
}
@@ -166,7 +118,6 @@ bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
bna_rxf_cfg_reset(rxf);
call_rxf_start_cbfn(rxf);
call_rxf_cam_fltr_cbfn(rxf);
- call_rxf_resume_cbfn(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
@@ -174,12 +125,6 @@ bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
/* No-op */
break;
- case RXF_E_PAUSE:
- rxf->flags |= BNA_RXF_F_PAUSED;
- call_rxf_start_cbfn(rxf);
- bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
- break;
-
case RXF_E_FW_RESP:
if (!bna_rxf_cfg_apply(rxf)) {
/* No more pending config updates */
@@ -197,7 +142,6 @@ bna_rxf_sm_started_entry(struct bna_rxf *rxf)
{
call_rxf_start_cbfn(rxf);
call_rxf_cam_fltr_cbfn(rxf);
- call_rxf_resume_cbfn(rxf);
}
static void
@@ -214,41 +158,6 @@ bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
break;
- case RXF_E_PAUSE:
- rxf->flags |= BNA_RXF_F_PAUSED;
- if (!bna_rxf_fltr_clear(rxf))
- bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
- else
- bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
-{
-}
-
-static void
-bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
-{
- switch (event) {
- case RXF_E_FAIL:
- bna_rxf_cfg_reset(rxf);
- call_rxf_pause_cbfn(rxf);
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
-
- case RXF_E_FW_RESP:
- if (!bna_rxf_fltr_clear(rxf)) {
- /* No more pending CAM entries to clear */
- bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
- }
- break;
-
default:
bfa_sm_fault(event);
}
@@ -283,7 +192,7 @@ bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
- memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
+ ether_addr_copy(req->mac_addr, mac->addr);
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_ucast_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
@@ -299,7 +208,7 @@ bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
- memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
+ ether_addr_copy(req->mac_addr, mac->addr);
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_mcast_add_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
@@ -444,22 +353,17 @@ bna_bfi_rss_enable(struct bna_rxf *rxf)
/* This function gets the multicast MAC that has already been added to CAM */
static struct bna_mac *
-bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
+bna_rxf_mcmac_get(struct bna_rxf *rxf, const u8 *mac_addr)
{
struct bna_mac *mac;
- struct list_head *qe;
- list_for_each(qe, &rxf->mcast_active_q) {
- mac = (struct bna_mac *)qe;
- if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
+ list_for_each_entry(mac, &rxf->mcast_active_q, qe)
+ if (ether_addr_equal(mac->addr, mac_addr))
return mac;
- }
- list_for_each(qe, &rxf->mcast_pending_del_q) {
- mac = (struct bna_mac *)qe;
- if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
+ list_for_each_entry(mac, &rxf->mcast_pending_del_q, qe)
+ if (ether_addr_equal(mac->addr, mac_addr))
return mac;
- }
return NULL;
}
@@ -468,13 +372,10 @@ static struct bna_mcam_handle *
bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
{
struct bna_mcam_handle *mchandle;
- struct list_head *qe;
- list_for_each(qe, &rxf->mcast_handle_q) {
- mchandle = (struct bna_mcam_handle *)qe;
+ list_for_each_entry(mchandle, &rxf->mcast_handle_q, qe)
if (mchandle->handle == handle)
return mchandle;
- }
return NULL;
}
@@ -515,7 +416,6 @@ bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
ret = 1;
}
list_del(&mchandle->qe);
- bfa_q_qe_init(&mchandle->qe);
bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
}
mac->handle = NULL;
@@ -527,26 +427,23 @@ static int
bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
{
struct bna_mac *mac = NULL;
- struct list_head *qe;
int ret;
/* First delete multicast entries to maintain the count */
while (!list_empty(&rxf->mcast_pending_del_q)) {
- bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->mcast_pending_del_q,
+ struct bna_mac, qe);
ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
- bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
+ list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
if (ret)
return ret;
}
/* Add multicast entries */
if (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- list_add_tail(&mac->qe, &rxf->mcast_active_q);
+ mac = list_first_entry(&rxf->mcast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, &rxf->mcast_active_q);
bna_bfi_mcast_add_req(rxf, mac);
return 1;
}
@@ -566,7 +463,7 @@ bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
block_idx++;
vlan_pending_bitmask >>= 1;
}
- rxf->vlan_pending_bitmask &= ~(1 << block_idx);
+ rxf->vlan_pending_bitmask &= ~BIT(block_idx);
bna_bfi_rx_vlan_filter_set(rxf, block_idx);
return 1;
}
@@ -577,27 +474,24 @@ bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
static int
bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
{
- struct list_head *qe;
struct bna_mac *mac;
int ret;
/* Throw away delete pending mcast entries */
while (!list_empty(&rxf->mcast_pending_del_q)) {
- bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->mcast_pending_del_q,
+ struct bna_mac, qe);
ret = bna_rxf_mcast_del(rxf, mac, cleanup);
- bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
+ list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
if (ret)
return ret;
}
/* Move active mcast entries to pending_add_q */
while (!list_empty(&rxf->mcast_active_q)) {
- bfa_q_deq(&rxf->mcast_active_q, &qe);
- bfa_q_qe_init(qe);
- list_add_tail(qe, &rxf->mcast_pending_add_q);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->mcast_active_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
if (bna_rxf_mcast_del(rxf, mac, cleanup))
return 1;
}
@@ -658,25 +552,6 @@ bna_rxf_cfg_apply(struct bna_rxf *rxf)
return 0;
}
-/* Only software reset */
-static int
-bna_rxf_fltr_clear(struct bna_rxf *rxf)
-{
- if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
- return 1;
-
- if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
- return 1;
-
- if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
- return 1;
-
- if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
- return 1;
-
- return 0;
-}
-
static void
bna_rxf_cfg_reset(struct bna_rxf *rxf)
{
@@ -693,16 +568,13 @@ bna_rit_init(struct bna_rxf *rxf, int rit_size)
{
struct bna_rx *rx = rxf->rx;
struct bna_rxp *rxp;
- struct list_head *qe;
int offset = 0;
rxf->rit_size = rit_size;
- list_for_each(qe, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe;
+ list_for_each_entry(rxp, &rx->rxp_q, qe) {
rxf->rit[offset] = rxp->cq.ccb->id;
offset++;
}
-
}
void
@@ -760,9 +632,6 @@ bna_rxf_init(struct bna_rxf *rxf,
INIT_LIST_HEAD(&rxf->mcast_active_q);
INIT_LIST_HEAD(&rxf->mcast_handle_q);
- if (q_config->paused)
- rxf->flags |= BNA_RXF_F_PAUSED;
-
rxf->rit = (u8 *)
res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
bna_rit_init(rxf, q_config->num_paths);
@@ -795,22 +664,21 @@ bna_rxf_uninit(struct bna_rxf *rxf)
rxf->ucast_active_set = 0;
while (!list_empty(&rxf->ucast_pending_add_q)) {
- bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
- bfa_q_qe_init(&mac->qe);
- bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
+ mac = list_first_entry(&rxf->ucast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, bna_ucam_mod_free_q(rxf->rx->bna));
}
if (rxf->ucast_pending_mac) {
- bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
- bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
- rxf->ucast_pending_mac);
+ list_add_tail(&rxf->ucast_pending_mac->qe,
+ bna_ucam_mod_free_q(rxf->rx->bna));
rxf->ucast_pending_mac = NULL;
}
while (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
- bfa_q_qe_init(&mac->qe);
- bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ mac = list_first_entry(&rxf->mcast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
}
rxf->rxmode_pending = 0;
@@ -823,8 +691,6 @@ bna_rxf_uninit(struct bna_rxf *rxf)
rxf->rss_pending = 0;
rxf->vlan_strip_pending = false;
- rxf->flags = 0;
-
rxf->rx = NULL;
}
@@ -863,8 +729,7 @@ bna_rxf_fail(struct bna_rxf *rxf)
}
enum bna_cb_status
-bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
- void (*cbfn)(struct bnad *, struct bna_rx *))
+bna_rx_ucast_set(struct bna_rx *rx, const u8 *ucmac)
{
struct bna_rxf *rxf = &rx->rxf;
@@ -873,12 +738,11 @@ bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
if (rxf->ucast_pending_mac == NULL)
return BNA_CB_UCAST_CAM_FULL;
- bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
}
- memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
+ ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac);
rxf->ucast_pending_set = 1;
- rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbfn = NULL;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
@@ -887,7 +751,7 @@ bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
}
enum bna_cb_status
-bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
+bna_rx_mcast_add(struct bna_rx *rx, const u8 *addr,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
struct bna_rxf *rxf = &rx->rxf;
@@ -904,8 +768,7 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
if (mac == NULL)
return BNA_CB_MCAST_LIST_FULL;
- bfa_q_qe_init(&mac->qe);
- memcpy(mac->addr, addr, ETH_ALEN);
+ ether_addr_copy(mac->addr, addr);
list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
rxf->cam_fltr_cbfn = cbfn;
@@ -917,35 +780,31 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
}
enum bna_cb_status
-bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
- void (*cbfn)(struct bnad *, struct bna_rx *))
+bna_rx_ucast_listset(struct bna_rx *rx, int count, const u8 *uclist)
{
struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
- struct list_head *qe;
- u8 *mcaddr;
+ const u8 *mcaddr;
struct bna_mac *mac, *del_mac;
int i;
/* Purge the pending_add_q */
while (!list_empty(&rxf->ucast_pending_add_q)) {
- bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ mac = list_first_entry(&rxf->ucast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, &ucam_mod->free_q);
}
/* Schedule active_q entries for deletion */
while (!list_empty(&rxf->ucast_active_q)) {
- bfa_q_deq(&rxf->ucast_active_q, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
-
+ mac = list_first_entry(&rxf->ucast_active_q,
+ struct bna_mac, qe);
del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
- memcpy(del_mac, mac, sizeof(*del_mac));
+ ether_addr_copy(del_mac->addr, mac->addr);
+ del_mac->handle = mac->handle;
list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
- bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ list_move_tail(&mac->qe, &ucam_mod->free_q);
}
/* Allocate nodes */
@@ -954,69 +813,57 @@ bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
if (mac == NULL)
goto err_return;
- bfa_q_qe_init(&mac->qe);
- memcpy(mac->addr, mcaddr, ETH_ALEN);
+ ether_addr_copy(mac->addr, mcaddr);
list_add_tail(&mac->qe, &list_head);
mcaddr += ETH_ALEN;
}
/* Add the new entries */
while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
+ mac = list_first_entry(&list_head, struct bna_mac, qe);
+ list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
}
- rxf->cam_fltr_cbfn = cbfn;
- rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return BNA_CB_SUCCESS;
err_return:
while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ mac = list_first_entry(&list_head, struct bna_mac, qe);
+ list_move_tail(&mac->qe, &ucam_mod->free_q);
}
return BNA_CB_UCAST_CAM_FULL;
}
enum bna_cb_status
-bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
- void (*cbfn)(struct bnad *, struct bna_rx *))
+bna_rx_mcast_listset(struct bna_rx *rx, int count, const u8 *mclist)
{
struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
- struct list_head *qe;
- u8 *mcaddr;
+ const u8 *mcaddr;
struct bna_mac *mac, *del_mac;
int i;
/* Purge the pending_add_q */
while (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+ mac = list_first_entry(&rxf->mcast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, &mcam_mod->free_q);
}
/* Schedule active_q entries for deletion */
while (!list_empty(&rxf->mcast_active_q)) {
- bfa_q_deq(&rxf->mcast_active_q, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
-
+ mac = list_first_entry(&rxf->mcast_active_q,
+ struct bna_mac, qe);
del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
-
- memcpy(del_mac, mac, sizeof(*del_mac));
+ ether_addr_copy(del_mac->addr, mac->addr);
+ del_mac->handle = mac->handle;
list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
mac->handle = NULL;
- bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+ list_move_tail(&mac->qe, &mcam_mod->free_q);
}
/* Allocate nodes */
@@ -1025,8 +872,7 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
if (mac == NULL)
goto err_return;
- bfa_q_qe_init(&mac->qe);
- memcpy(mac->addr, mcaddr, ETH_ALEN);
+ ether_addr_copy(mac->addr, mcaddr);
list_add_tail(&mac->qe, &list_head);
mcaddr += ETH_ALEN;
@@ -1034,70 +880,52 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
/* Add the new entries */
while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+ mac = list_first_entry(&list_head, struct bna_mac, qe);
+ list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
}
- rxf->cam_fltr_cbfn = cbfn;
- rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return BNA_CB_SUCCESS;
err_return:
while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+ mac = list_first_entry(&list_head, struct bna_mac, qe);
+ list_move_tail(&mac->qe, &mcam_mod->free_q);
}
return BNA_CB_MCAST_LIST_FULL;
}
void
-bna_rx_mcast_delall(struct bna_rx *rx,
- void (*cbfn)(struct bnad *, struct bna_rx *))
+bna_rx_mcast_delall(struct bna_rx *rx)
{
struct bna_rxf *rxf = &rx->rxf;
- struct list_head *qe;
struct bna_mac *mac, *del_mac;
int need_hw_config = 0;
/* Purge all entries from pending_add_q */
while (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ mac = list_first_entry(&rxf->mcast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
}
/* Schedule all entries in active_q for deletion */
while (!list_empty(&rxf->mcast_active_q)) {
- bfa_q_deq(&rxf->mcast_active_q, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
-
+ mac = list_first_entry(&rxf->mcast_active_q,
+ struct bna_mac, qe);
+ list_del(&mac->qe);
del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
-
memcpy(del_mac, mac, sizeof(*del_mac));
list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
mac->handle = NULL;
- bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ list_add_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
need_hw_config = 1;
}
- if (need_hw_config) {
- rxf->cam_fltr_cbfn = cbfn;
- rxf->cam_fltr_cbarg = rx->bna->bnad;
+ if (need_hw_config)
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
- return;
- }
-
- if (cbfn)
- (*cbfn)(rx->bna->bnad, rx);
}
void
@@ -1105,12 +933,12 @@ bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
- int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
+ int bit = BIT(vlan_id & BFI_VLAN_WORD_MASK);
int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
rxf->vlan_filter_table[index] |= bit;
if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
- rxf->vlan_pending_bitmask |= (1 << group_id);
+ rxf->vlan_pending_bitmask |= BIT(group_id);
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
@@ -1120,12 +948,12 @@ bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
- int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
+ int bit = BIT(vlan_id & BFI_VLAN_WORD_MASK);
int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
rxf->vlan_filter_table[index] &= ~bit;
if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
- rxf->vlan_pending_bitmask |= (1 << group_id);
+ rxf->vlan_pending_bitmask |= BIT(group_id);
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
@@ -1134,23 +962,21 @@ static int
bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
{
struct bna_mac *mac = NULL;
- struct list_head *qe;
/* Delete MAC addresses previousely added */
if (!list_empty(&rxf->ucast_pending_del_q)) {
- bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->ucast_pending_del_q,
+ struct bna_mac, qe);
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
- bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
+ list_move_tail(&mac->qe, bna_ucam_mod_del_q(rxf->rx->bna));
return 1;
}
/* Set default unicast MAC */
if (rxf->ucast_pending_set) {
rxf->ucast_pending_set = 0;
- memcpy(rxf->ucast_active_mac.addr,
- rxf->ucast_pending_mac->addr, ETH_ALEN);
+ ether_addr_copy(rxf->ucast_active_mac.addr,
+ rxf->ucast_pending_mac->addr);
rxf->ucast_active_set = 1;
bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
BFI_ENET_H2I_MAC_UCAST_SET_REQ);
@@ -1159,9 +985,8 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
/* Add additional MAC entries */
if (!list_empty(&rxf->ucast_pending_add_q)) {
- bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->ucast_pending_add_q,
+ struct bna_mac, qe);
list_add_tail(&mac->qe, &rxf->ucast_active_q);
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
return 1;
@@ -1173,33 +998,30 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
static int
bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
{
- struct list_head *qe;
struct bna_mac *mac;
/* Throw away delete pending ucast entries */
while (!list_empty(&rxf->ucast_pending_del_q)) {
- bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->ucast_pending_del_q,
+ struct bna_mac, qe);
if (cleanup == BNA_SOFT_CLEANUP)
- bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
- mac);
+ list_move_tail(&mac->qe,
+ bna_ucam_mod_del_q(rxf->rx->bna));
else {
bna_bfi_ucast_req(rxf, mac,
- BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
- bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
- mac);
+ BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
+ list_move_tail(&mac->qe,
+ bna_ucam_mod_del_q(rxf->rx->bna));
return 1;
}
}
/* Move active ucast entries to pending_add_q */
while (!list_empty(&rxf->ucast_active_q)) {
- bfa_q_deq(&rxf->ucast_active_q, &qe);
- bfa_q_qe_init(qe);
- list_add_tail(qe, &rxf->ucast_pending_add_q);
+ mac = list_first_entry(&rxf->ucast_active_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
if (cleanup == BNA_HARD_CLEANUP) {
- mac = (struct bna_mac *)qe;
bna_bfi_ucast_req(rxf, mac,
BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
return 1;
@@ -1654,14 +1476,11 @@ static void
bna_rx_sm_started_entry(struct bna_rx *rx)
{
struct bna_rxp *rxp;
- struct list_head *qe_rxp;
int is_regular = (rx->type == BNA_RX_T_REGULAR);
/* Start IB */
- list_for_each(qe_rxp, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe_rxp;
+ list_for_each_entry(rxp, &rx->rxp_q, qe)
bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
- }
bna_ethport_cb_rx_started(&rx->bna->ethport);
}
@@ -1804,7 +1623,6 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
struct bna_rxp *rxp = NULL;
struct bna_rxq *q0 = NULL, *q1 = NULL;
- struct list_head *rxp_qe;
int i;
bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
@@ -1814,11 +1632,9 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
cfg_req->num_queue_sets = rx->num_paths;
- for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
- i < rx->num_paths;
- i++, rxp_qe = bfa_q_next(rxp_qe)) {
- rxp = (struct bna_rxp *)rxp_qe;
-
+ for (i = 0; i < rx->num_paths; i++) {
+ rxp = rxp ? list_next_entry(rxp, qe)
+ : list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
GET_RXQS(rxp, q0, q1);
switch (rxp->type) {
case BNA_RXP_SLR:
@@ -1921,13 +1737,10 @@ static void
bna_rx_enet_stop(struct bna_rx *rx)
{
struct bna_rxp *rxp;
- struct list_head *qe_rxp;
/* Stop IB */
- list_for_each(qe_rxp, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe_rxp;
+ list_for_each_entry(rxp, &rx->rxp_q, qe)
bna_ib_stop(rx->bna, &rxp->cq.ib);
- }
bna_bfi_rx_enet_stop(rx);
}
@@ -1957,12 +1770,10 @@ static struct bna_rxq *
bna_rxq_get(struct bna_rx_mod *rx_mod)
{
struct bna_rxq *rxq = NULL;
- struct list_head *qe = NULL;
- bfa_q_deq(&rx_mod->rxq_free_q, &qe);
+ rxq = list_first_entry(&rx_mod->rxq_free_q, struct bna_rxq, qe);
+ list_del(&rxq->qe);
rx_mod->rxq_free_count--;
- rxq = (struct bna_rxq *)qe;
- bfa_q_qe_init(&rxq->qe);
return rxq;
}
@@ -1970,7 +1781,6 @@ bna_rxq_get(struct bna_rx_mod *rx_mod)
static void
bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
{
- bfa_q_qe_init(&rxq->qe);
list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
rx_mod->rxq_free_count++;
}
@@ -1978,13 +1788,11 @@ bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
static struct bna_rxp *
bna_rxp_get(struct bna_rx_mod *rx_mod)
{
- struct list_head *qe = NULL;
struct bna_rxp *rxp = NULL;
- bfa_q_deq(&rx_mod->rxp_free_q, &qe);
+ rxp = list_first_entry(&rx_mod->rxp_free_q, struct bna_rxp, qe);
+ list_del(&rxp->qe);
rx_mod->rxp_free_count--;
- rxp = (struct bna_rxp *)qe;
- bfa_q_qe_init(&rxp->qe);
return rxp;
}
@@ -1992,7 +1800,6 @@ bna_rxp_get(struct bna_rx_mod *rx_mod)
static void
bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
{
- bfa_q_qe_init(&rxp->qe);
list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
rx_mod->rxp_free_count++;
}
@@ -2000,18 +1807,16 @@ bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
static struct bna_rx *
bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
- struct list_head *qe = NULL;
struct bna_rx *rx = NULL;
- if (type == BNA_RX_T_REGULAR) {
- bfa_q_deq(&rx_mod->rx_free_q, &qe);
- } else
- bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
+ BUG_ON(list_empty(&rx_mod->rx_free_q));
+ if (type == BNA_RX_T_REGULAR)
+ rx = list_first_entry(&rx_mod->rx_free_q, struct bna_rx, qe);
+ else
+ rx = list_last_entry(&rx_mod->rx_free_q, struct bna_rx, qe);
rx_mod->rx_free_count--;
- rx = (struct bna_rx *)qe;
- bfa_q_qe_init(&rx->qe);
- list_add_tail(&rx->qe, &rx_mod->rx_active_q);
+ list_move_tail(&rx->qe, &rx_mod->rx_active_q);
rx->type = type;
return rx;
@@ -2020,32 +1825,13 @@ bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
static void
bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
{
- struct list_head *prev_qe = NULL;
struct list_head *qe;
- bfa_q_qe_init(&rx->qe);
-
- list_for_each(qe, &rx_mod->rx_free_q) {
+ list_for_each_prev(qe, &rx_mod->rx_free_q)
if (((struct bna_rx *)qe)->rid < rx->rid)
- prev_qe = qe;
- else
break;
- }
-
- if (prev_qe == NULL) {
- /* This is the first entry */
- bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
- } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
- /* This is the last entry */
- list_add_tail(&rx->qe, &rx_mod->rx_free_q);
- } else {
- /* Somewhere in the middle */
- bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
- bfa_q_prev(&rx->qe) = prev_qe;
- bfa_q_next(prev_qe) = &rx->qe;
- bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
- }
+ list_add(&rx->qe, qe);
rx_mod->rx_free_count++;
}
@@ -2199,24 +1985,20 @@ void
bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
struct bna_rx *rx;
- struct list_head *qe;
rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
if (type == BNA_RX_T_LOOPBACK)
rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
- list_for_each(qe, &rx_mod->rx_active_q) {
- rx = (struct bna_rx *)qe;
+ list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
if (rx->type == type)
bna_rx_start(rx);
- }
}
void
bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
struct bna_rx *rx;
- struct list_head *qe;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
@@ -2225,13 +2007,11 @@ bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
- list_for_each(qe, &rx_mod->rx_active_q) {
- rx = (struct bna_rx *)qe;
+ list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
if (rx->type == type) {
bfa_wc_up(&rx_mod->rx_stop_wc);
bna_rx_stop(rx);
}
- }
bfa_wc_wait(&rx_mod->rx_stop_wc);
}
@@ -2240,15 +2020,12 @@ void
bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
{
struct bna_rx *rx;
- struct list_head *qe;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
- list_for_each(qe, &rx_mod->rx_active_q) {
- rx = (struct bna_rx *)qe;
+ list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
bna_rx_fail(rx);
- }
}
void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
@@ -2282,7 +2059,6 @@ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
rx_ptr = &rx_mod->rx[index];
- bfa_q_qe_init(&rx_ptr->qe);
INIT_LIST_HEAD(&rx_ptr->rxp_q);
rx_ptr->bna = NULL;
rx_ptr->rid = index;
@@ -2296,7 +2072,6 @@ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
/* build RX-path queue */
for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
rxp_ptr = &rx_mod->rxp[index];
- bfa_q_qe_init(&rxp_ptr->qe);
list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
rx_mod->rxp_free_count++;
}
@@ -2304,7 +2079,6 @@ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
/* build RXQ queue */
for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
rxq_ptr = &rx_mod->rxq[index];
- bfa_q_qe_init(&rxq_ptr->qe);
list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
rx_mod->rxq_free_count++;
}
@@ -2313,21 +2087,6 @@ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
void
bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
{
- struct list_head *qe;
- int i;
-
- i = 0;
- list_for_each(qe, &rx_mod->rx_free_q)
- i++;
-
- i = 0;
- list_for_each(qe, &rx_mod->rxp_free_q)
- i++;
-
- i = 0;
- list_for_each(qe, &rx_mod->rxq_free_q)
- i++;
-
rx_mod->bna = NULL;
}
@@ -2337,7 +2096,6 @@ bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
struct bna_rxp *rxp = NULL;
struct bna_rxq *q0 = NULL, *q1 = NULL;
- struct list_head *rxp_qe;
int i;
bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
@@ -2345,10 +2103,8 @@ bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
rx->hw_id = cfg_rsp->hw_id;
- for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
- i < rx->num_paths;
- i++, rxp_qe = bfa_q_next(rxp_qe)) {
- rxp = (struct bna_rxp *)rxp_qe;
+ for (i = 0, rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
+ i < rx->num_paths; i++, rxp = list_next_entry(rxp, qe)) {
GET_RXQS(rxp, q0, q1);
/* Setup doorbells */
@@ -2396,20 +2152,19 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
dq_depth = q_cfg->q0_depth;
hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
- cq_depth = dq_depth + hq_depth;
+ cq_depth = roundup_pow_of_two(dq_depth + hq_depth);
- BNA_TO_POWER_OF_2_HIGH(cq_depth);
cq_size = cq_depth * BFI_CQ_WI_SIZE;
cq_size = ALIGN(cq_size, PAGE_SIZE);
cpage_count = SIZE_TO_PAGES(cq_size);
- BNA_TO_POWER_OF_2_HIGH(dq_depth);
+ dq_depth = roundup_pow_of_two(dq_depth);
dq_size = dq_depth * BFI_RXQ_WI_SIZE;
dq_size = ALIGN(dq_size, PAGE_SIZE);
dpage_count = SIZE_TO_PAGES(dq_size);
if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
- BNA_TO_POWER_OF_2_HIGH(hq_depth);
+ hq_depth = roundup_pow_of_two(hq_depth);
hq_size = hq_depth * BFI_RXQ_WI_SIZE;
hq_size = ALIGN(hq_size, PAGE_SIZE);
hpage_count = SIZE_TO_PAGES(hq_size);
@@ -2620,7 +2375,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
if (intr_info->intr_type == BNA_INTR_T_MSIX)
rxp->cq.ib.intr_vector = rxp->vector;
else
- rxp->cq.ib.intr_vector = (1 << rxp->vector);
+ rxp->cq.ib.intr_vector = BIT(rxp->vector);
rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
@@ -2691,7 +2446,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
/* if multi-buffer is enabled sum of q0_depth
* and q1_depth need not be a power of 2
*/
- BNA_TO_POWER_OF_2_HIGH(cq_depth);
+ cq_depth = roundup_pow_of_two(cq_depth);
rxp->cq.ccb->q_depth = cq_depth;
rxp->cq.ccb->cq = &rxp->cq;
rxp->cq.ccb->rcb[0] = q0->rcb;
@@ -2725,7 +2480,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
- rx_mod->rid_mask |= (1 << rx->rid);
+ rx_mod->rid_mask |= BIT(rx->rid);
return rx;
}
@@ -2742,7 +2497,8 @@ bna_rx_destroy(struct bna_rx *rx)
bna_rxf_uninit(&rx->rxf);
while (!list_empty(&rx->rxp_q)) {
- bfa_q_deq(&rx->rxp_q, &rxp);
+ rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
+ list_del(&rxp->qe);
GET_RXQS(rxp, q0, q1);
if (rx->rcb_destroy_cbfn)
rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
@@ -2769,15 +2525,13 @@ bna_rx_destroy(struct bna_rx *rx)
bna_rxp_put(rx_mod, rxp);
}
- list_for_each(qe, &rx_mod->rx_active_q) {
+ list_for_each(qe, &rx_mod->rx_active_q)
if (qe == &rx->qe) {
list_del(&rx->qe);
- bfa_q_qe_init(&rx->qe);
break;
}
- }
- rx_mod->rid_mask &= ~(1 << rx->rid);
+ rx_mod->rid_mask &= ~BIT(rx->rid);
rx->bna = NULL;
rx->priv = NULL;
@@ -2844,8 +2598,7 @@ bna_rx_vlan_strip_disable(struct bna_rx *rx)
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
- enum bna_rxmode bitmask,
- void (*cbfn)(struct bnad *, struct bna_rx *))
+ enum bna_rxmode bitmask)
{
struct bna_rxf *rxf = &rx->rxf;
int need_hw_config = 0;
@@ -2900,11 +2653,10 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
/* Trigger h/w if needed */
if (need_hw_config) {
- rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbfn = NULL;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
- } else if (cbfn)
- (*cbfn)(rx->bna->bnad, rx);
+ }
return BNA_CB_SUCCESS;
@@ -2928,10 +2680,8 @@ void
bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
{
struct bna_rxp *rxp;
- struct list_head *qe;
- list_for_each(qe, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe;
+ list_for_each_entry(rxp, &rx->rxp_q, qe) {
rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
}
@@ -3024,16 +2774,6 @@ do { \
} \
} while (0)
-#define call_tx_prio_change_cbfn(tx) \
-do { \
- if ((tx)->prio_change_cbfn) { \
- void (*cbfn)(struct bnad *, struct bna_tx *); \
- cbfn = (tx)->prio_change_cbfn; \
- (tx)->prio_change_cbfn = NULL; \
- cbfn((tx)->bna->bnad, (tx)); \
- } \
-} while (0)
-
static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
static void bna_bfi_tx_enet_start(struct bna_tx *tx);
static void bna_tx_enet_stop(struct bna_tx *tx);
@@ -3044,7 +2784,6 @@ enum bna_tx_event {
TX_E_FAIL = 3,
TX_E_STARTED = 4,
TX_E_STOPPED = 5,
- TX_E_PRIO_CHANGE = 6,
TX_E_CLEANUP_DONE = 7,
TX_E_BW_UPDATE = 8,
};
@@ -3085,10 +2824,6 @@ bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
/* No-op */
break;
- case TX_E_PRIO_CHANGE:
- call_tx_prio_change_cbfn(tx);
- break;
-
case TX_E_BW_UPDATE:
/* No-op */
break;
@@ -3109,28 +2844,23 @@ bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_STOP:
- tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
+ tx->flags &= ~BNA_TX_F_BW_UPDATED;
bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
break;
case TX_E_FAIL:
- tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
+ tx->flags &= ~BNA_TX_F_BW_UPDATED;
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
break;
case TX_E_STARTED:
- if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
- tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
- BNA_TX_F_BW_UPDATED);
+ if (tx->flags & BNA_TX_F_BW_UPDATED) {
+ tx->flags &= ~BNA_TX_F_BW_UPDATED;
bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
} else
bfa_fsm_set_state(tx, bna_tx_sm_started);
break;
- case TX_E_PRIO_CHANGE:
- tx->flags |= BNA_TX_F_PRIO_CHANGED;
- break;
-
case TX_E_BW_UPDATE:
tx->flags |= BNA_TX_F_BW_UPDATED;
break;
@@ -3144,11 +2874,9 @@ static void
bna_tx_sm_started_entry(struct bna_tx *tx)
{
struct bna_txq *txq;
- struct list_head *qe;
int is_regular = (tx->type == BNA_TX_T_REGULAR);
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
+ list_for_each_entry(txq, &tx->txq_q, qe) {
txq->tcb->priority = txq->priority;
/* Start IB */
bna_ib_start(tx->bna, &txq->ib, is_regular);
@@ -3172,7 +2900,6 @@ bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
break;
- case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
break;
@@ -3205,7 +2932,6 @@ bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
bna_tx_enet_stop(tx);
break;
- case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
@@ -3225,7 +2951,6 @@ bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_FAIL:
- case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
@@ -3256,7 +2981,6 @@ bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
case TX_E_FAIL:
bfa_fsm_set_state(tx, bna_tx_sm_failed);
- call_tx_prio_change_cbfn(tx);
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
break;
@@ -3264,7 +2988,6 @@ bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
break;
- case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
@@ -3277,7 +3000,6 @@ bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
static void
bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
{
- call_tx_prio_change_cbfn(tx);
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
}
@@ -3293,7 +3015,6 @@ bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
bfa_fsm_set_state(tx, bna_tx_sm_failed);
break;
- case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
@@ -3372,7 +3093,6 @@ bna_bfi_tx_enet_start(struct bna_tx *tx)
{
struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
struct bna_txq *txq = NULL;
- struct list_head *qe;
int i;
bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
@@ -3381,11 +3101,9 @@ bna_bfi_tx_enet_start(struct bna_tx *tx)
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
cfg_req->num_queues = tx->num_txq;
- for (i = 0, qe = bfa_q_first(&tx->txq_q);
- i < tx->num_txq;
- i++, qe = bfa_q_next(qe)) {
- txq = (struct bna_txq *)qe;
-
+ for (i = 0; i < tx->num_txq; i++) {
+ txq = txq ? list_next_entry(txq, qe)
+ : list_first_entry(&tx->txq_q, struct bna_txq, qe);
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
cfg_req->q_cfg[i].q.priority = txq->priority;
@@ -3437,13 +3155,10 @@ static void
bna_tx_enet_stop(struct bna_tx *tx)
{
struct bna_txq *txq;
- struct list_head *qe;
/* Stop IB */
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
+ list_for_each_entry(txq, &tx->txq_q, qe)
bna_ib_stop(tx->bna, &txq->ib);
- }
bna_bfi_tx_enet_stop(tx);
}
@@ -3487,18 +3202,15 @@ bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
static struct bna_tx *
bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
- struct list_head *qe = NULL;
struct bna_tx *tx = NULL;
if (list_empty(&tx_mod->tx_free_q))
return NULL;
- if (type == BNA_TX_T_REGULAR) {
- bfa_q_deq(&tx_mod->tx_free_q, &qe);
- } else {
- bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
- }
- tx = (struct bna_tx *)qe;
- bfa_q_qe_init(&tx->qe);
+ if (type == BNA_TX_T_REGULAR)
+ tx = list_first_entry(&tx_mod->tx_free_q, struct bna_tx, qe);
+ else
+ tx = list_last_entry(&tx_mod->tx_free_q, struct bna_tx, qe);
+ list_del(&tx->qe);
tx->type = type;
return tx;
@@ -3509,21 +3221,18 @@ bna_tx_free(struct bna_tx *tx)
{
struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
struct bna_txq *txq;
- struct list_head *prev_qe;
struct list_head *qe;
while (!list_empty(&tx->txq_q)) {
- bfa_q_deq(&tx->txq_q, &txq);
- bfa_q_qe_init(&txq->qe);
+ txq = list_first_entry(&tx->txq_q, struct bna_txq, qe);
txq->tcb = NULL;
txq->tx = NULL;
- list_add_tail(&txq->qe, &tx_mod->txq_free_q);
+ list_move_tail(&txq->qe, &tx_mod->txq_free_q);
}
list_for_each(qe, &tx_mod->tx_active_q) {
if (qe == &tx->qe) {
list_del(&tx->qe);
- bfa_q_qe_init(&tx->qe);
break;
}
}
@@ -3531,28 +3240,11 @@ bna_tx_free(struct bna_tx *tx)
tx->bna = NULL;
tx->priv = NULL;
- prev_qe = NULL;
- list_for_each(qe, &tx_mod->tx_free_q) {
+ list_for_each_prev(qe, &tx_mod->tx_free_q)
if (((struct bna_tx *)qe)->rid < tx->rid)
- prev_qe = qe;
- else {
break;
- }
- }
- if (prev_qe == NULL) {
- /* This is the first entry */
- bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
- } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
- /* This is the last entry */
- list_add_tail(&tx->qe, &tx_mod->tx_free_q);
- } else {
- /* Somewhere in the middle */
- bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
- bfa_q_prev(&tx->qe) = prev_qe;
- bfa_q_next(prev_qe) = &tx->qe;
- bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
- }
+ list_add(&tx->qe, qe);
}
static void
@@ -3585,7 +3277,6 @@ bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
{
struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
struct bna_txq *txq = NULL;
- struct list_head *qe;
int i;
bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
@@ -3593,10 +3284,8 @@ bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
tx->hw_id = cfg_rsp->hw_id;
- for (i = 0, qe = bfa_q_first(&tx->txq_q);
- i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
- txq = (struct bna_txq *)qe;
-
+ for (i = 0, txq = list_first_entry(&tx->txq_q, struct bna_txq, qe);
+ i < tx->num_txq; i++, txq = list_next_entry(txq, qe)) {
/* Setup doorbells */
txq->tcb->i_dbell->doorbell_addr =
tx->bna->pcidev.pci_bar_kva
@@ -3624,12 +3313,9 @@ void
bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
{
struct bna_tx *tx;
- struct list_head *qe;
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
+ list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
- }
}
void
@@ -3689,7 +3375,6 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
struct bna_tx_mod *tx_mod = &bna->tx_mod;
struct bna_tx *tx;
struct bna_txq *txq;
- struct list_head *qe;
int page_count;
int i;
@@ -3719,9 +3404,8 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
if (list_empty(&tx_mod->txq_free_q))
goto err_return;
- bfa_q_deq(&tx_mod->txq_free_q, &txq);
- bfa_q_qe_init(&txq->qe);
- list_add_tail(&txq->qe, &tx->txq_q);
+ txq = list_first_entry(&tx_mod->txq_free_q, struct bna_txq, qe);
+ list_move_tail(&txq->qe, &tx->txq_q);
txq->tx = tx;
}
@@ -3760,8 +3444,7 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
/* TxQ */
i = 0;
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
+ list_for_each_entry(txq, &tx->txq_q, qe) {
txq->tcb = (struct bna_tcb *)
res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
txq->tx_packets = 0;
@@ -3779,7 +3462,7 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
intr_info->idl[0].vector :
intr_info->idl[i].vector;
if (intr_info->intr_type == BNA_INTR_T_INTX)
- txq->ib.intr_vector = (1 << txq->ib.intr_vector);
+ txq->ib.intr_vector = BIT(txq->ib.intr_vector);
txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
@@ -3821,7 +3504,7 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
- tx_mod->rid_mask |= (1 << tx->rid);
+ tx_mod->rid_mask |= BIT(tx->rid);
return tx;
@@ -3834,15 +3517,12 @@ void
bna_tx_destroy(struct bna_tx *tx)
{
struct bna_txq *txq;
- struct list_head *qe;
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
+ list_for_each_entry(txq, &tx->txq_q, qe)
if (tx->tcb_destroy_cbfn)
(tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
- }
- tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
+ tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid);
bna_tx_free(tx);
}
@@ -3920,9 +3600,7 @@ bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
tx_mod->tx[i].rid = i;
- bfa_q_qe_init(&tx_mod->tx[i].qe);
list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
- bfa_q_qe_init(&tx_mod->txq[i].qe);
list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
}
@@ -3935,17 +3613,6 @@ bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
void
bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
{
- struct list_head *qe;
- int i;
-
- i = 0;
- list_for_each(qe, &tx_mod->tx_free_q)
- i++;
-
- i = 0;
- list_for_each(qe, &tx_mod->txq_free_q)
- i++;
-
tx_mod->bna = NULL;
}
@@ -3953,24 +3620,20 @@ void
bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
struct bna_tx *tx;
- struct list_head *qe;
tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
if (type == BNA_TX_T_LOOPBACK)
tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
+ list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
if (tx->type == type)
bna_tx_start(tx);
- }
}
void
bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
struct bna_tx *tx;
- struct list_head *qe;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
@@ -3979,13 +3642,11 @@ bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
+ list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
if (tx->type == type) {
bfa_wc_up(&tx_mod->tx_stop_wc);
bna_tx_stop(tx);
}
- }
bfa_wc_wait(&tx_mod->tx_stop_wc);
}
@@ -3994,25 +3655,19 @@ void
bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
{
struct bna_tx *tx;
- struct list_head *qe;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
+ list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
bna_tx_fail(tx);
- }
}
void
bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
{
struct bna_txq *txq;
- struct list_head *qe;
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
+ list_for_each_entry(txq, &tx->txq_q, qe)
bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
- }
}
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index d0a7a566f..e0e797f2e 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -135,7 +135,6 @@ enum bna_tx_type {
enum bna_tx_flags {
BNA_TX_F_ENET_STARTED = 1,
BNA_TX_F_ENABLED = 2,
- BNA_TX_F_PRIO_CHANGED = 4,
BNA_TX_F_BW_UPDATED = 8,
};
@@ -182,17 +181,11 @@ enum bna_rx_mod_flags {
BNA_RX_MOD_F_ENET_LOOPBACK = 2,
};
-enum bna_rxf_flags {
- BNA_RXF_F_PAUSED = 1,
-};
-
enum bna_rxf_event {
RXF_E_START = 1,
RXF_E_STOP = 2,
RXF_E_FAIL = 3,
RXF_E_CONFIG = 4,
- RXF_E_PAUSE = 5,
- RXF_E_RESUME = 6,
RXF_E_FW_RESP = 7,
};
@@ -362,9 +355,6 @@ struct bna_enet {
void (*stop_cbfn)(void *);
void *stop_cbarg;
- /* Callback for bna_enet_pause_config() */
- void (*pause_cbfn)(struct bnad *);
-
/* Callback for bna_enet_mtu_set() */
void (*mtu_cbfn)(struct bnad *);
@@ -498,9 +488,6 @@ struct bna_tx {
void (*stop_cbfn)(void *arg, struct bna_tx *tx);
void *stop_cbarg;
- /* callback for bna_tx_prio_set() */
- void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx);
-
struct bfa_msgq_cmd_entry msgq_cmd;
union {
struct bfi_enet_tx_cfg_req cfg_req;
@@ -676,7 +663,6 @@ struct bna_rx_config {
enum bna_rx_type rx_type;
int num_paths;
enum bna_rxp_type rxp_type;
- int paused;
int coalescing_timeo;
/*
* Small/Large (or Header/Data) buffer size to be configured
@@ -721,7 +707,6 @@ struct bna_rxp {
/* RxF structure (hardware Rx Function) */
struct bna_rxf {
bfa_fsm_t fsm;
- enum bna_rxf_flags flags;
struct bfa_msgq_cmd_entry msgq_cmd;
union {
@@ -742,10 +727,6 @@ struct bna_rxf {
void (*stop_cbfn) (struct bna_rx *rx);
struct bna_rx *stop_cbarg;
- /* callback for bna_rx_receive_pause() / bna_rx_receive_resume() */
- void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx);
- struct bnad *oper_state_cbarg;
-
/**
* callback for:
* bna_rxf_ucast_set()
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index b69b8715a..c1c13fd6c 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -57,7 +57,8 @@ static u32 bnad_rxqs_per_cq = 2;
static u32 bna_id;
static struct mutex bnad_list_mutex;
static LIST_HEAD(bnad_list);
-static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+static const u8 bnad_bcast_addr[] __aligned(2) =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
/*
* Local MACROS
@@ -308,7 +309,7 @@ bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
}
}
- BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
+ BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
return 0;
}
@@ -675,6 +676,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
if (!next_cmpl->valid)
break;
}
+ packets++;
/* TODO: BNA_CQ_EF_LOCAL ? */
if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
@@ -691,7 +693,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
else
bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
- packets++;
rcb->rxq->rx_packets++;
rcb->rxq->rx_bytes += totlen;
ccb->bytes_per_intr += totlen;
@@ -724,7 +725,6 @@ next:
cmpl->valid = 0;
BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
}
- cmpl = &cq[ccb->producer_index];
}
napi_gro_flush(&rx_ctrl->napi, false);
@@ -757,7 +757,7 @@ bnad_msix_rx(int irq, void *data)
struct bna_ccb *ccb = (struct bna_ccb *)data;
if (ccb) {
- ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
+ ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
}
@@ -875,9 +875,9 @@ bnad_set_netdev_perm_addr(struct bnad *bnad)
{
struct net_device *netdev = bnad->netdev;
- memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
+ ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
if (is_zero_ether_addr(netdev->dev_addr))
- memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
}
/* Control Path Handlers */
@@ -946,8 +946,7 @@ bnad_cb_ethport_link_status(struct bnad *bnad,
if (link_up) {
if (!netif_carrier_ok(bnad->netdev)) {
uint tx_id, tcb_id;
- printk(KERN_WARNING "bna: %s link up\n",
- bnad->netdev->name);
+ netdev_info(bnad->netdev, "link up\n");
netif_carrier_on(bnad->netdev);
BNAD_UPDATE_CTR(bnad, link_toggle);
for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
@@ -966,10 +965,6 @@ bnad_cb_ethport_link_status(struct bnad *bnad,
/*
* Force an immediate
* Transmit Schedule */
- printk(KERN_INFO "bna: %s %d "
- "TXQ_STARTED\n",
- bnad->netdev->name,
- txq_id);
netif_wake_subqueue(
bnad->netdev,
txq_id);
@@ -987,8 +982,7 @@ bnad_cb_ethport_link_status(struct bnad *bnad,
}
} else {
if (netif_carrier_ok(bnad->netdev)) {
- printk(KERN_WARNING "bna: %s link down\n",
- bnad->netdev->name);
+ netdev_info(bnad->netdev, "link down\n");
netif_carrier_off(bnad->netdev);
BNAD_UPDATE_CTR(bnad, link_toggle);
}
@@ -1058,8 +1052,6 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
txq_id = tcb->id;
clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
netif_stop_subqueue(bnad->netdev, txq_id);
- printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
- bnad->netdev->name, txq_id);
}
}
@@ -1082,8 +1074,6 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
BUG_ON(*(tcb->hw_consumer_index) != 0);
if (netif_carrier_ok(bnad->netdev)) {
- printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
- bnad->netdev->name, txq_id);
netif_wake_subqueue(bnad->netdev, txq_id);
BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
}
@@ -1094,8 +1084,8 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
* get a 0 MAC address. We try to get the MAC address
* again here.
*/
- if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
- bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
+ if (is_zero_ether_addr(bnad->perm_addr)) {
+ bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
bnad_set_netdev_perm_addr(bnad);
}
}
@@ -1703,7 +1693,7 @@ bnad_ioc_timeout(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
+ bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1714,7 +1704,7 @@ bnad_ioc_hb_check(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
+ bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1725,7 +1715,7 @@ bnad_iocpf_timeout(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
+ bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1736,7 +1726,7 @@ bnad_iocpf_sem_timeout(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
+ bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1862,8 +1852,7 @@ bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
struct netdev_hw_addr *mc_addr;
netdev_for_each_mc_addr(mc_addr, netdev) {
- memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
- ETH_ALEN);
+ ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
i++;
}
}
@@ -2137,7 +2126,7 @@ bnad_reinit_rx(struct bnad *bnad)
current_err = bnad_setup_rx(bnad, rx_id);
if (current_err && !err) {
err = current_err;
- pr_err("RXQ:%u setup failed\n", rx_id);
+ netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
}
}
@@ -2338,7 +2327,7 @@ bnad_rx_coalescing_timeo_set(struct bnad *bnad)
* Called with bnad->bna_lock held
*/
int
-bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
+bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
{
int ret;
@@ -2349,7 +2338,7 @@ bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
if (!bnad->rx_info[0].rx)
return 0;
- ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
+ ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
if (ret != BNA_CB_SUCCESS)
return -EADDRNOTAVAIL;
@@ -2367,8 +2356,8 @@ bnad_enable_default_bcast(struct bnad *bnad)
init_completion(&bnad->bnad_completions.mcast_comp);
spin_lock_irqsave(&bnad->bna_lock, flags);
- ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
- bnad_cb_rx_mcast_add);
+ ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
+ bnad_cb_rx_mcast_add);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
if (ret == BNA_CB_SUCCESS)
@@ -2673,8 +2662,9 @@ bnad_enable_msix(struct bnad *bnad)
if (ret < 0) {
goto intx_mode;
} else if (ret < bnad->msix_num) {
- pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
- ret, bnad->msix_num);
+ dev_warn(&bnad->pcidev->dev,
+ "%d MSI-X vectors allocated < %d requested\n",
+ ret, bnad->msix_num);
spin_lock_irqsave(&bnad->bna_lock, flags);
/* ret = #of vectors that we got */
@@ -2696,7 +2686,8 @@ bnad_enable_msix(struct bnad *bnad)
return;
intx_mode:
- pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
+ dev_warn(&bnad->pcidev->dev,
+ "MSI-X enable failed - operating in INTx mode\n");
kfree(bnad->msix_table);
bnad->msix_table = NULL;
@@ -2754,7 +2745,7 @@ bnad_open(struct net_device *netdev)
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_enet_mtu_set(&bnad->bna.enet,
BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
- bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
+ bna_enet_pause_config(&bnad->bna.enet, &pause_config);
bna_enet_enable(&bnad->bna.enet);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3128,7 +3119,7 @@ bnad_set_rx_ucast_fltr(struct bnad *bnad)
int entry;
if (netdev_uc_empty(bnad->netdev)) {
- bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+ bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
return;
}
@@ -3141,13 +3132,11 @@ bnad_set_rx_ucast_fltr(struct bnad *bnad)
entry = 0;
netdev_for_each_uc_addr(ha, netdev) {
- memcpy(&mac_list[entry * ETH_ALEN],
- &ha->addr[0], ETH_ALEN);
+ ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
entry++;
}
- ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
- mac_list, NULL);
+ ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
kfree(mac_list);
if (ret != BNA_CB_SUCCESS)
@@ -3158,7 +3147,7 @@ bnad_set_rx_ucast_fltr(struct bnad *bnad)
/* ucast packets not in UCAM are routed to default function */
mode_default:
bnad->cfg_flags |= BNAD_CF_DEFAULT;
- bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+ bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
}
static void
@@ -3183,12 +3172,11 @@ bnad_set_rx_mcast_fltr(struct bnad *bnad)
if (mac_list == NULL)
goto mode_allmulti;
- memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+ ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
/* copy rest of the MCAST addresses */
bnad_netdev_mc_list_get(netdev, mac_list);
- ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
- mac_list, NULL);
+ ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
kfree(mac_list);
if (ret != BNA_CB_SUCCESS)
@@ -3198,7 +3186,7 @@ bnad_set_rx_mcast_fltr(struct bnad *bnad)
mode_allmulti:
bnad->cfg_flags |= BNAD_CF_ALLMULTI;
- bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
+ bna_rx_mcast_delall(bnad->rx_info[0].rx);
}
void
@@ -3237,7 +3225,7 @@ bnad_set_rx_mode(struct net_device *netdev)
mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
BNA_RXMODE_ALLMULTI;
- bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
+ bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -3248,19 +3236,18 @@ bnad_set_rx_mode(struct net_device *netdev)
* in a non-blocking context.
*/
static int
-bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
+bnad_set_mac_address(struct net_device *netdev, void *addr)
{
int err;
struct bnad *bnad = netdev_priv(netdev);
- struct sockaddr *sa = (struct sockaddr *)mac_addr;
+ struct sockaddr *sa = (struct sockaddr *)addr;
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
-
if (!err)
- memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, sa->sa_data);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3487,8 +3474,8 @@ bnad_init(struct bnad *bnad,
dev_err(&pdev->dev, "ioremap for bar0 failed\n");
return -ENOMEM;
}
- pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
- (unsigned long long) bnad->mmio_len);
+ dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
+ (unsigned long long) bnad->mmio_len);
spin_lock_irqsave(&bnad->bna_lock, flags);
if (!bnad_msix_disable)
@@ -3609,13 +3596,10 @@ bnad_pci_probe(struct pci_dev *pdev,
struct bfa_pcidev pcidev_info;
unsigned long flags;
- pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
- pdev, pcidev_id, PCI_FUNC(pdev->devfn));
-
mutex_lock(&bnad_fwimg_mutex);
if (!cna_get_firmware_buf(pdev)) {
mutex_unlock(&bnad_fwimg_mutex);
- pr_warn("Failed to load Firmware Image!\n");
+ dev_err(&pdev->dev, "failed to load firmware image!\n");
return -ENODEV;
}
mutex_unlock(&bnad_fwimg_mutex);
@@ -3693,13 +3677,13 @@ bnad_pci_probe(struct pci_dev *pdev,
/* Set up timers */
setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
- ((unsigned long)bnad));
+ (unsigned long)bnad);
setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
- ((unsigned long)bnad));
+ (unsigned long)bnad);
setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
- ((unsigned long)bnad));
+ (unsigned long)bnad);
setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
- ((unsigned long)bnad));
+ (unsigned long)bnad);
/*
* Start the chip
@@ -3708,8 +3692,7 @@ bnad_pci_probe(struct pci_dev *pdev,
*/
err = bnad_ioceth_enable(bnad);
if (err) {
- pr_err("BNA: Initialization failed err=%d\n",
- err);
+ dev_err(&pdev->dev, "initialization failed err=%d\n", err);
goto probe_success;
}
@@ -3742,7 +3725,7 @@ bnad_pci_probe(struct pci_dev *pdev,
/* Get the burnt-in mac */
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
+ bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
bnad_set_netdev_perm_addr(bnad);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3751,7 +3734,7 @@ bnad_pci_probe(struct pci_dev *pdev,
/* Finally, reguister with net_device layer */
err = register_netdev(netdev);
if (err) {
- pr_err("BNA : Registering with netdev failed\n");
+ dev_err(&pdev->dev, "registering net device failed\n");
goto probe_uninit;
}
set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
@@ -3803,7 +3786,6 @@ bnad_pci_remove(struct pci_dev *pdev)
if (!netdev)
return;
- pr_info("%s bnad_pci_remove\n", netdev->name);
bnad = netdev_priv(netdev);
bna = &bnad->bna;
@@ -3864,15 +3846,14 @@ bnad_module_init(void)
{
int err;
- pr_info("QLogic BR-series 10G Ethernet driver - version: %s\n",
- BNAD_VERSION);
+ pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
+ BNAD_VERSION);
bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
err = pci_register_driver(&bnad_pci_driver);
if (err < 0) {
- pr_err("bna : PCI registration failed in module init "
- "(%d)\n", err);
+ pr_err("bna: PCI driver registration failed err=%d\n", err);
return err;
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 7ead6c23e..faedbf247 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -344,7 +344,7 @@ struct bnad {
struct bnad_completion bnad_completions;
/* Burnt in MAC address */
- mac_t perm_addr;
+ u8 perm_addr[ETH_ALEN];
struct workqueue_struct *work_q;
@@ -385,7 +385,7 @@ u32 *cna_get_firmware_buf(struct pci_dev *pdev);
/* Netdev entry point prototypes */
void bnad_set_rx_mode(struct net_device *netdev);
struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev);
-int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
+int bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr);
int bnad_enable_default_bcast(struct bnad *bnad);
void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
void bnad_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 72c895504..8fc246ea1 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -76,8 +76,7 @@ bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
fw_debug->debug_buffer = NULL;
kfree(fw_debug);
fw_debug = NULL;
- pr_warn("bnad %s: Failed to collect fwtrc\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed to collect fwtrc\n");
return -ENOMEM;
}
@@ -117,8 +116,7 @@ bnad_debugfs_open_fwsave(struct inode *inode, struct file *file)
fw_debug->debug_buffer = NULL;
kfree(fw_debug);
fw_debug = NULL;
- pr_warn("bna %s: Failed to collect fwsave\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed to collect fwsave\n");
return -ENOMEM;
}
@@ -217,8 +215,7 @@ bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file)
drv_info->debug_buffer = NULL;
kfree(drv_info);
drv_info = NULL;
- pr_warn("bna %s: Failed to collect drvinfo\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed to collect drvinfo\n");
return -ENOMEM;
}
@@ -271,15 +268,15 @@ bna_reg_offset_check(struct bfa_ioc *ioc, u32 offset, u32 len)
area = (offset >> 15) & 0x7;
if (area == 0) {
/* PCIe core register */
- if ((offset + (len<<2)) > 0x8000) /* 8k dwords or 32KB */
+ if (offset + (len << 2) > 0x8000) /* 8k dwords or 32KB */
return BFA_STATUS_EINVAL;
} else if (area == 0x1) {
/* CB 32 KB memory page */
- if ((offset + (len<<2)) > 0x10000) /* 8k dwords or 32KB */
+ if (offset + (len << 2) > 0x10000) /* 8k dwords or 32KB */
return BFA_STATUS_EINVAL;
} else {
/* CB register space 64KB */
- if ((offset + (len<<2)) > BFA_REG_ADDRMSK(ioc))
+ if (offset + (len << 2) > BFA_REG_ADDRMSK(ioc))
return BFA_STATUS_EINVAL;
}
return BFA_STATUS_OK;
@@ -321,27 +318,20 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
unsigned long flags;
void *kern_buf;
- /* Allocate memory to store the user space buf */
- kern_buf = kzalloc(nbytes, GFP_KERNEL);
- if (!kern_buf)
- return -ENOMEM;
-
- if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
- kfree(kern_buf);
- return -ENOMEM;
- }
+ /* Copy the user space buf */
+ kern_buf = memdup_user(buf, nbytes);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
rc = sscanf(kern_buf, "%x:%x", &addr, &len);
if (rc < 2) {
- pr_warn("bna %s: Failed to read user buffer\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed to read user buffer\n");
kfree(kern_buf);
return -EINVAL;
}
kfree(kern_buf);
kfree(bnad->regdata);
- bnad->regdata = NULL;
bnad->reglen = 0;
bnad->regdata = kzalloc(len << 2, GFP_KERNEL);
@@ -355,8 +345,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
/* offset and len sanity check */
rc = bna_reg_offset_check(ioc, addr, len);
if (rc) {
- pr_warn("bna %s: Failed reg offset check\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed reg offset check\n");
kfree(bnad->regdata);
bnad->regdata = NULL;
bnad->reglen = 0;
@@ -388,20 +377,14 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
unsigned long flags;
void *kern_buf;
- /* Allocate memory to store the user space buf */
- kern_buf = kzalloc(nbytes, GFP_KERNEL);
- if (!kern_buf)
- return -ENOMEM;
-
- if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
- kfree(kern_buf);
- return -ENOMEM;
- }
+ /* Copy the user space buf */
+ kern_buf = memdup_user(buf, nbytes);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
rc = sscanf(kern_buf, "%x:%x", &addr, &val);
if (rc < 2) {
- pr_warn("bna %s: Failed to read user buffer\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed to read user buffer\n");
kfree(kern_buf);
return -EINVAL;
}
@@ -412,8 +395,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
/* offset and len sanity check */
rc = bna_reg_offset_check(ioc, addr, 1);
if (rc) {
- pr_warn("bna %s: Failed reg offset check\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed reg offset check\n");
return -EINVAL;
}
@@ -525,7 +507,8 @@ bnad_debugfs_init(struct bnad *bnad)
bna_debugfs_root = debugfs_create_dir("bna", NULL);
atomic_set(&bna_debugfs_port_count, 0);
if (!bna_debugfs_root) {
- pr_warn("BNA: debugfs root dir creation failed\n");
+ netdev_warn(bnad->netdev,
+ "debugfs root dir creation failed\n");
return;
}
}
@@ -536,8 +519,8 @@ bnad_debugfs_init(struct bnad *bnad)
bnad->port_debugfs_root =
debugfs_create_dir(name, bna_debugfs_root);
if (!bnad->port_debugfs_root) {
- pr_warn("bna pci_dev %s: root dir creation failed\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev,
+ "debugfs root dir creation failed\n");
return;
}
@@ -552,9 +535,9 @@ bnad_debugfs_init(struct bnad *bnad)
bnad,
file->fops);
if (!bnad->bnad_dentry_files[i]) {
- pr_warn(
- "BNA pci_dev:%s: create %s entry failed\n",
- pci_name(bnad->pcidev), file->name);
+ netdev_warn(bnad->netdev,
+ "create %s entry failed\n",
+ file->name);
return;
}
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 2a1a1d21d..3385c279a 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -445,13 +445,13 @@ bnad_set_ringparam(struct net_device *netdev,
if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
- !BNA_POWER_OF_2(ringparam->rx_pending)) {
+ !is_power_of_2(ringparam->rx_pending)) {
mutex_unlock(&bnad->conf_mutex);
return -EINVAL;
}
if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
- !BNA_POWER_OF_2(ringparam->tx_pending)) {
+ !is_power_of_2(ringparam->tx_pending)) {
mutex_unlock(&bnad->conf_mutex);
return -EINVAL;
}
@@ -533,7 +533,7 @@ bnad_set_pauseparam(struct net_device *netdev,
pause_config.rx_pause = pauseparam->rx_pause;
pause_config.tx_pause = pauseparam->tx_pause;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
+ bna_enet_pause_config(&bnad->bna.enet, &pause_config);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
mutex_unlock(&bnad->conf_mutex);
@@ -1080,7 +1080,7 @@ bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
ret = reject_firmware(&fw, eflash->data, &bnad->pcidev->dev);
if (ret) {
- pr_err("BNA: Can't locate firmware %s\n", eflash->data);
+ netdev_err(netdev, "can't load firmware %s\n", eflash->data);
goto out;
}
@@ -1093,7 +1093,7 @@ bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
bnad->id, (u8 *)fw->data, fw->size, 0,
bnad_cb_completion, &fcomp);
if (ret != BFA_STATUS_OK) {
- pr_warn("BNA: Flash update failed with err: %d\n", ret);
+ netdev_warn(netdev, "flash update failed with err=%d\n", ret);
ret = -EIO;
spin_unlock_irq(&bnad->bna_lock);
goto out;
@@ -1103,8 +1103,9 @@ bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
wait_for_completion(&fcomp.comp);
if (fcomp.comp_status != BFA_STATUS_OK) {
ret = -EIO;
- pr_warn("BNA: Firmware image update to flash failed with: %d\n",
- fcomp.comp_status);
+ netdev_warn(netdev,
+ "firmware image update failed with err=%d\n",
+ fcomp.comp_status);
}
out:
release_firmware(fw);
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 1901ff1c6..8ddcf6361 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -42,66 +42,4 @@ extern char bfa_version[];
#define CNA_FW_FILE_CT2 "/*(DEBLOBBED)*/"
#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
-#pragma pack(1)
-
-typedef struct mac { u8 mac[ETH_ALEN]; } mac_t;
-
-#pragma pack()
-
-#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
-#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
-#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
-
-/*
- * bfa_q_qe_init - to initialize a queue element
- */
-#define bfa_q_qe_init(_qe) { \
- bfa_q_next(_qe) = (struct list_head *) NULL; \
- bfa_q_prev(_qe) = (struct list_head *) NULL; \
-}
-
-/*
- * bfa_q_deq - dequeue an element from head of the queue
- */
-#define bfa_q_deq(_q, _qe) { \
- if (!list_empty(_q)) { \
- (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
- bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
- (struct list_head *) (_q); \
- bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
- bfa_q_qe_init(*((struct list_head **) _qe)); \
- } else { \
- *((struct list_head **)(_qe)) = NULL; \
- } \
-}
-
-/*
- * bfa_q_deq_tail - dequeue an element from tail of the queue
- */
-#define bfa_q_deq_tail(_q, _qe) { \
- if (!list_empty(_q)) { \
- *((struct list_head **) (_qe)) = bfa_q_prev(_q); \
- bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
- (struct list_head *) (_q); \
- bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
- bfa_q_qe_init(*((struct list_head **) _qe)); \
- } else { \
- *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
- } \
-}
-
-/*
- * bfa_add_tail_head - enqueue an element at the head of queue
- */
-#define bfa_q_enq_head(_q, _qe) { \
- if (!(bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL)) \
- pr_err("Assertion failure: %s:%d: %d", \
- __FILE__, __LINE__, \
- (bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL));\
- bfa_q_next(_qe) = bfa_q_next(_q); \
- bfa_q_prev(_qe) = (struct list_head *) (_q); \
- bfa_q_prev(bfa_q_next(_q)) = (struct list_head *) (_qe); \
- bfa_q_next(_q) = (struct list_head *) (_qe); \
-}
-
#endif /* __CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index a59f221bd..9c07a4a43 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -33,7 +33,7 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
u32 n;
if (reject_firmware(&fw, fw_name, &pdev->dev)) {
- pr_alert("Can't locate firmware %s\n", fw_name);
+ dev_alert(&pdev->dev, "can't load firmware %s\n", fw_name);
goto error;
}
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 1ba3e3a67..f0bcb15d3 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -8,8 +8,6 @@ config NET_CADENCE
default y
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
- Make sure you know the name of your card. Read the Ethernet-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
If unsure, say Y.
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index fc646a41d..bf9eb2ecf 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -54,6 +54,8 @@
#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
+#define GEM_MTU_MIN_SIZE 68
+
/*
* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
@@ -102,6 +104,57 @@ static void *macb_rx_buffer(struct macb *bp, unsigned int index)
return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
}
+/* I/O accessors */
+static u32 hw_readl_native(struct macb *bp, int offset)
+{
+ return __raw_readl(bp->regs + offset);
+}
+
+static void hw_writel_native(struct macb *bp, int offset, u32 value)
+{
+ __raw_writel(value, bp->regs + offset);
+}
+
+static u32 hw_readl(struct macb *bp, int offset)
+{
+ return readl_relaxed(bp->regs + offset);
+}
+
+static void hw_writel(struct macb *bp, int offset, u32 value)
+{
+ writel_relaxed(value, bp->regs + offset);
+}
+
+/*
+ * Find the CPU endianness by using the loopback bit of NCR register. When the
+ * CPU is in big endian we need to program swaped mode for management
+ * descriptor access.
+ */
+static bool hw_is_native_io(void __iomem *addr)
+{
+ u32 value = MACB_BIT(LLB);
+
+ __raw_writel(value, addr + MACB_NCR);
+ value = __raw_readl(addr + MACB_NCR);
+
+ /* Write 0 back to disable everything */
+ __raw_writel(0, addr + MACB_NCR);
+
+ return value == MACB_BIT(LLB);
+}
+
+static bool hw_is_gem(void __iomem *addr, bool native_io)
+{
+ u32 id;
+
+ if (native_io)
+ id = __raw_readl(addr + MACB_MID);
+ else
+ id = readl_relaxed(addr + MACB_MID);
+
+ return MACB_BFEXT(IDNUM, id) >= 0x2;
+}
+
static void macb_set_hwaddr(struct macb *bp)
{
u32 bottom;
@@ -158,7 +211,7 @@ static void macb_get_hwaddr(struct macb *bp)
}
}
- netdev_info(bp->dev, "invalid hw address, using random\n");
+ dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
eth_hw_addr_random(bp->dev);
}
@@ -250,7 +303,6 @@ static void macb_handle_link_change(struct net_device *dev)
struct macb *bp = netdev_priv(dev);
struct phy_device *phydev = bp->phy_dev;
unsigned long flags;
-
int status_change = 0;
spin_lock_irqsave(&bp->lock, flags);
@@ -447,14 +499,14 @@ err_out:
static void macb_update_stats(struct macb *bp)
{
- u32 __iomem *reg = bp->regs + MACB_PFR;
u32 *p = &bp->hw_stats.macb.rx_pause_frames;
u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
+ int offset = MACB_PFR;
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
- for(; p < end; p++, reg++)
- *p += readl_relaxed(reg);
+ for(; p < end; p++, offset += 4)
+ *p += bp->macb_reg_readl(bp, offset);
}
static int macb_halt_tx(struct macb *bp)
@@ -785,7 +837,7 @@ static int gem_rx(struct macb *bp, int budget)
}
/* now everything is ready for receiving packet */
bp->rx_skbuff[entry] = NULL;
- len = MACB_BFEXT(RX_FRMLEN, ctrl);
+ len = ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
@@ -831,7 +883,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
struct macb_dma_desc *desc;
desc = macb_rx_desc(bp, last_frag);
- len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
+ len = desc->ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
macb_rx_ring_wrap(first_frag),
@@ -1105,12 +1157,6 @@ static void macb_poll_controller(struct net_device *dev)
}
#endif
-static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
- unsigned int len)
-{
- return (len + bp->max_tx_length - 1) / bp->max_tx_length;
-}
-
static unsigned int macb_tx_map(struct macb *bp,
struct macb_queue *queue,
struct sk_buff *skb)
@@ -1261,11 +1307,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
* socket buffer: skb fragments of jumbo frames may need to be
* splitted into many buffer descriptors.
*/
- count = macb_count_tx_descriptors(bp, skb_headlen(skb));
+ count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++) {
frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
- count += macb_count_tx_descriptors(bp, frag_size);
+ count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
}
spin_lock_irqsave(&bp->lock, flags);
@@ -1601,7 +1647,6 @@ static u32 macb_dbw(struct macb *bp)
static void macb_configure_dma(struct macb *bp)
{
u32 dmacfg;
- u32 tmp, ncr;
if (macb_is_gem(bp)) {
dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
@@ -1611,22 +1656,11 @@ static void macb_configure_dma(struct macb *bp)
dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
dmacfg &= ~GEM_BIT(ENDIA_PKT);
- /* Find the CPU endianness by using the loopback bit of net_ctrl
- * register. save it first. When the CPU is in big endian we
- * need to program swaped mode for management descriptor access.
- */
- ncr = macb_readl(bp, NCR);
- __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR);
- tmp = __raw_readl(bp->regs + MACB_NCR);
-
- if (tmp == MACB_BIT(LLB))
+ if (bp->native_io)
dmacfg &= ~GEM_BIT(ENDIA_DESC);
else
dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
- /* Restore net_ctrl */
- macb_writel(bp, NCR, ncr);
-
if (bp->dev->features & NETIF_F_HW_CSUM)
dmacfg |= GEM_BIT(TXCOEN);
else
@@ -1651,7 +1685,10 @@ static void macb_init_hw(struct macb *bp)
config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
config |= MACB_BIT(PAE); /* PAuse Enable */
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
- config |= MACB_BIT(BIG); /* Receive oversized frames */
+ if (bp->caps & MACB_CAPS_JUMBO)
+ config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
+ else
+ config |= MACB_BIT(BIG); /* Receive oversized frames */
if (bp->dev->flags & IFF_PROMISC)
config |= MACB_BIT(CAF); /* Copy All Frames */
else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
@@ -1660,8 +1697,13 @@ static void macb_init_hw(struct macb *bp)
config |= MACB_BIT(NBC); /* No BroadCast */
config |= macb_dbw(bp);
macb_writel(bp, NCFGR, config);
+ if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
+ gem_writel(bp, JML, bp->jumbo_max_len);
bp->speed = SPEED_10;
bp->duplex = DUPLEX_HALF;
+ bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
+ if (bp->caps & MACB_CAPS_JUMBO)
+ bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
macb_configure_dma(bp);
@@ -1865,21 +1907,41 @@ static int macb_close(struct net_device *dev)
return 0;
}
+static int macb_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct macb *bp = netdev_priv(dev);
+ u32 max_mtu;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ max_mtu = ETH_DATA_LEN;
+ if (bp->caps & MACB_CAPS_JUMBO)
+ max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
+
+ if ((new_mtu > max_mtu) || (new_mtu < GEM_MTU_MIN_SIZE))
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
static void gem_update_stats(struct macb *bp)
{
- int i;
+ unsigned int i;
u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
u32 offset = gem_statistics[i].offset;
- u64 val = readl_relaxed(bp->regs + offset);
+ u64 val = bp->macb_reg_readl(bp, offset);
bp->ethtool_stats[i] += val;
*p += val;
if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
/* Add GEM_OCTTXH, GEM_OCTRXH */
- val = readl_relaxed(bp->regs + offset + 4);
+ val = bp->macb_reg_readl(bp, offset + 4);
bp->ethtool_stats[i] += ((u64)val) << 32;
*(++p) += val;
}
@@ -1946,7 +2008,7 @@ static int gem_get_sset_count(struct net_device *dev, int sset)
static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
{
- int i;
+ unsigned int i;
switch (sset) {
case ETH_SS_STATS:
@@ -2141,7 +2203,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_get_stats = macb_get_stats,
.ndo_do_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
+ .ndo_change_mtu = macb_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = macb_poll_controller,
@@ -2160,7 +2222,7 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
if (dt_conf)
bp->caps = dt_conf->caps;
- if (macb_is_gem_hw(bp->regs)) {
+ if (hw_is_gem(bp->regs, bp->native_io)) {
bp->caps |= MACB_CAPS_MACB_IS_GEM;
dcfg = gem_readl(bp, DCFG1);
@@ -2171,10 +2233,11 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
bp->caps |= MACB_CAPS_FIFO_MODE;
}
- netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps);
+ dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
}
static void macb_probe_queues(void __iomem *mem,
+ bool native_io,
unsigned int *queue_mask,
unsigned int *num_queues)
{
@@ -2189,7 +2252,7 @@ static void macb_probe_queues(void __iomem *mem,
* we are early in the probe process and don't have the
* MACB_CAPS_MACB_IS_GEM flag positioned
*/
- if (!macb_is_gem_hw(mem))
+ if (!hw_is_gem(mem, native_io))
return;
/* bit 0 is never set but queue 0 always exists */
@@ -2683,6 +2746,13 @@ static const struct macb_config pc302gem_config = {
.init = macb_init,
};
+static const struct macb_config sama5d2_config = {
+ .caps = 0,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+};
+
static const struct macb_config sama5d3_config = {
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
.dma_burst_length = 16,
@@ -2702,6 +2772,16 @@ static const struct macb_config emac_config = {
.init = at91ether_init,
};
+
+static const struct macb_config zynqmp_config = {
+ .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_JUMBO,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .jumbo_max_len = 10240,
+};
+
static const struct macb_config zynq_config = {
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
MACB_CAPS_NO_GIGABIT_HALF,
@@ -2716,10 +2796,12 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,macb" },
{ .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
{ .compatible = "cdns,gem", .data = &pc302gem_config },
+ { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
{ .compatible = "cdns,emac", .data = &emac_config },
+ { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
{ /* sentinel */ }
};
@@ -2737,6 +2819,7 @@ static int macb_probe(struct platform_device *pdev)
struct clk *pclk, *hclk, *tx_clk;
unsigned int queue_mask, num_queues;
struct macb_platform_data *pdata;
+ bool native_io;
struct phy_device *phydev;
struct net_device *dev;
struct resource *regs;
@@ -2745,6 +2828,11 @@ static int macb_probe(struct platform_device *pdev)
struct macb *bp;
int err;
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mem = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
if (np) {
const struct of_device_id *match;
@@ -2760,14 +2848,9 @@ static int macb_probe(struct platform_device *pdev)
if (err)
return err;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mem = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(mem)) {
- err = PTR_ERR(mem);
- goto err_disable_clocks;
- }
+ native_io = hw_is_native_io(mem);
- macb_probe_queues(mem, &queue_mask, &num_queues);
+ macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
if (!dev) {
err = -ENOMEM;
@@ -2782,6 +2865,14 @@ static int macb_probe(struct platform_device *pdev)
bp->pdev = pdev;
bp->dev = dev;
bp->regs = mem;
+ bp->native_io = native_io;
+ if (native_io) {
+ bp->macb_reg_readl = hw_readl_native;
+ bp->macb_reg_writel = hw_writel_native;
+ } else {
+ bp->macb_reg_readl = hw_readl;
+ bp->macb_reg_writel = hw_writel;
+ }
bp->num_queues = num_queues;
bp->queue_mask = queue_mask;
if (macb_config)
@@ -2789,6 +2880,9 @@ static int macb_probe(struct platform_device *pdev)
bp->pclk = pclk;
bp->hclk = hclk;
bp->tx_clk = tx_clk;
+ if (macb_config)
+ bp->jumbo_max_len = macb_config->jumbo_max_len;
+
spin_lock_init(&bp->lock);
/* setup capabilities */
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 24b1d9bcd..1895b6b2a 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -71,6 +71,7 @@
#define GEM_NCFGR 0x0004 /* Network Config */
#define GEM_USRIO 0x000c /* User IO */
#define GEM_DMACFG 0x0010 /* DMA Configuration */
+#define GEM_JML 0x0048 /* Jumbo Max Length */
#define GEM_HRB 0x0080 /* Hash Bottom */
#define GEM_HRT 0x0084 /* Hash Top */
#define GEM_SA1B 0x0088 /* Specific1 Bottom */
@@ -398,6 +399,7 @@
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
#define MACB_CAPS_MACB_IS_GEM 0x80000000
+#define MACB_CAPS_JUMBO 0x00000008
/* Bit manipulation macros */
#define MACB_BIT(name) \
@@ -427,18 +429,12 @@
| GEM_BF(name, value))
/* Register access macros */
-#define macb_readl(port,reg) \
- readl_relaxed((port)->regs + MACB_##reg)
-#define macb_writel(port,reg,value) \
- writel_relaxed((value), (port)->regs + MACB_##reg)
-#define gem_readl(port, reg) \
- readl_relaxed((port)->regs + GEM_##reg)
-#define gem_writel(port, reg, value) \
- writel_relaxed((value), (port)->regs + GEM_##reg)
-#define queue_readl(queue, reg) \
- readl_relaxed((queue)->bp->regs + (queue)->reg)
-#define queue_writel(queue, reg, value) \
- writel_relaxed((value), (queue)->bp->regs + (queue)->reg)
+#define macb_readl(port, reg) (port)->macb_reg_readl((port), MACB_##reg)
+#define macb_writel(port, reg, value) (port)->macb_reg_writel((port), MACB_##reg, (value))
+#define gem_readl(port, reg) (port)->macb_reg_readl((port), GEM_##reg)
+#define gem_writel(port, reg, value) (port)->macb_reg_writel((port), GEM_##reg, (value))
+#define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
+#define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
/* Conditional GEM/MACB macros. These perform the operation to the correct
* register dependent on whether the device is a GEM or a MACB. For registers
@@ -515,6 +511,9 @@ struct macb_dma_desc {
#define MACB_RX_BROADCAST_OFFSET 31
#define MACB_RX_BROADCAST_SIZE 1
+#define MACB_RX_FRMLEN_MASK 0xFFF
+#define MACB_RX_JFRMLEN_MASK 0x3FFF
+
/* RX checksum offload disabled: bit 24 clear in NCFGR */
#define GEM_RX_TYPEID_MATCH_OFFSET 22
#define GEM_RX_TYPEID_MATCH_SIZE 2
@@ -758,6 +757,7 @@ struct macb_config {
int (*clk_init)(struct platform_device *pdev, struct clk **pclk,
struct clk **hclk, struct clk **tx_clk);
int (*init)(struct platform_device *pdev);
+ int jumbo_max_len;
};
struct macb_queue {
@@ -779,6 +779,11 @@ struct macb_queue {
struct macb {
void __iomem *regs;
+ bool native_io;
+
+ /* hardware IO accessors */
+ u32 (*macb_reg_readl)(struct macb *bp, int offset);
+ void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
unsigned int rx_tail;
unsigned int rx_prepared_head;
@@ -811,9 +816,9 @@ struct macb {
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
- unsigned int link;
- unsigned int speed;
- unsigned int duplex;
+ int link;
+ int speed;
+ int duplex;
u32 caps;
unsigned int dma_burst_length;
@@ -827,6 +832,9 @@ struct macb {
unsigned int max_tx_length;
u64 ethtool_stats[GEM_STATS_LEN];
+
+ unsigned int rx_frm_len_mask;
+ unsigned int jumbo_max_len;
};
static inline bool macb_is_gem(struct macb *bp)
@@ -834,9 +842,4 @@ static inline bool macb_is_gem(struct macb *bp)
return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
}
-static inline bool macb_is_gem_hw(void __iomem *addr)
-{
- return !!(MACB_BFEXT(IDNUM, readl_relaxed(addr + MACB_MID)) >= 0x2);
-}
-
#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
new file mode 100644
index 000000000..02e23e6f1
--- /dev/null
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -0,0 +1,54 @@
+#
+# Cavium ethernet device configuration
+#
+
+config NET_VENDOR_CAVIUM
+ tristate "Cavium ethernet drivers"
+ depends on PCI
+ default y
+ ---help---
+ Select this option if you want enable Cavium network support.
+
+ If you have a Cavium SoC or network adapter, say Y.
+
+if NET_VENDOR_CAVIUM
+
+config THUNDER_NIC_PF
+ tristate "Thunder Physical function driver"
+ depends on 64BIT
+ select THUNDER_NIC_BGX
+ ---help---
+ This driver supports Thunder's NIC physical function.
+ The NIC provides the controller and DMA engines to
+ move network traffic to/from the memory. The NIC
+ works closely with TNS, BGX and SerDes to implement the
+ functions replacing and virtualizing those of a typical
+ standalone PCIe NIC chip.
+
+config THUNDER_NIC_VF
+ tristate "Thunder Virtual function driver"
+ depends on 64BIT
+ ---help---
+ This driver supports Thunder's NIC virtual function
+
+config THUNDER_NIC_BGX
+ tristate "Thunder MAC interface driver (BGX)"
+ depends on 64BIT
+ ---help---
+ This driver supports programming and controlling of MAC
+ interface from NIC physical function driver.
+
+config LIQUIDIO
+ tristate "Cavium LiquidIO support"
+ depends on 64BIT
+ select PTP_1588_CLOCK
+ select FW_LOADER
+ select LIBCRC32C
+ ---help---
+ This driver supports Cavium LiquidIO Intelligent Server Adapters
+ based on CN66XX and CN68XX chips.
+
+ To compile this driver as a module, choose M here: the module
+ will be called liquidio. This is recommended.
+
+endif # NET_VENDOR_CAVIUM
diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile
new file mode 100644
index 000000000..d22f886ac
--- /dev/null
+++ b/drivers/net/ethernet/cavium/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Cavium ethernet device drivers.
+#
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += liquidio/
diff --git a/drivers/net/ethernet/cavium/liquidio/Makefile b/drivers/net/ethernet/cavium/liquidio/Makefile
new file mode 100644
index 000000000..2f3668068
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/Makefile
@@ -0,0 +1,16 @@
+#
+# Cavium Liquidio ethernet device driver
+#
+obj-$(CONFIG_LIQUIDIO) += liquidio.o
+
+liquidio-objs := lio_main.o \
+ lio_ethtool.o \
+ request_manager.o \
+ response_manager.o \
+ octeon_device.o \
+ cn66xx_device.o \
+ cn68xx_device.o \
+ octeon_mem_ops.o \
+ octeon_droq.o \
+ octeon_console.o \
+ octeon_nic.o
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
new file mode 100644
index 000000000..8ad7425f8
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -0,0 +1,796 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+int lio_cn6xxx_soft_reset(struct octeon_device *oct)
+{
+ octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF);
+
+ dev_dbg(&oct->pci_dev->dev, "BIST enabled for soft reset\n");
+
+ lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_BIST);
+ octeon_write_csr64(oct, CN6XXX_SLI_SCRATCH1, 0x1234ULL);
+
+ lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST);
+ lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST);
+
+ /* make sure that the reset is written before starting timer */
+ mmiowb();
+
+ /* Wait for 10ms as Octeon resets. */
+ mdelay(100);
+
+ if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1) == 0x1234ULL) {
+ dev_err(&oct->pci_dev->dev, "Soft reset failed\n");
+ return 1;
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "Reset completed\n");
+ octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF);
+
+ return 0;
+}
+
+void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct)
+{
+ u32 val;
+
+ pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
+ if (val & 0x000f0000) {
+ dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n",
+ val & 0x000f0000);
+ }
+
+ val |= 0xf; /* Enable Link error reporting */
+
+ dev_dbg(&oct->pci_dev->dev, "Enabling PCI-E error reporting..\n");
+ pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
+}
+
+void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct,
+ enum octeon_pcie_mps mps)
+{
+ u32 val;
+ u64 r64;
+
+ /* Read config register for MPS */
+ pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
+
+ if (mps == PCIE_MPS_DEFAULT) {
+ mps = ((val & (0x7 << 5)) >> 5);
+ } else {
+ val &= ~(0x7 << 5); /* Turn off any MPS bits */
+ val |= (mps << 5); /* Set MPS */
+ pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
+ }
+
+ /* Set MPS in DPI_SLI_PRT0_CFG to the same value. */
+ r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+ r64 |= (mps << 4);
+ lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+}
+
+void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct,
+ enum octeon_pcie_mrrs mrrs)
+{
+ u32 val;
+ u64 r64;
+
+ /* Read config register for MRRS */
+ pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
+
+ if (mrrs == PCIE_MRRS_DEFAULT) {
+ mrrs = ((val & (0x7 << 12)) >> 12);
+ } else {
+ val &= ~(0x7 << 12); /* Turn off any MRRS bits */
+ val |= (mrrs << 12); /* Set MRRS */
+ pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
+ }
+
+ /* Set MRRS in SLI_S2M_PORT0_CTL to the same value. */
+ r64 = octeon_read_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port));
+ r64 |= mrrs;
+ octeon_write_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port), r64);
+
+ /* Set MRRS in DPI_SLI_PRT0_CFG to the same value. */
+ r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+ r64 |= mrrs;
+ lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+}
+
+u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct)
+{
+ /* Bits 29:24 of MIO_RST_BOOT holds the ref. clock multiplier
+ * for SLI.
+ */
+ return ((lio_pci_readq(oct, CN6XXX_MIO_RST_BOOT) >> 24) & 0x3f) * 50;
+}
+
+u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct,
+ u32 time_intr_in_us)
+{
+ /* This gives the SLI clock per microsec */
+ u32 oqticks_per_us = lio_cn6xxx_coprocessor_clock(oct);
+
+ /* core clock per us / oq ticks will be fractional. TO avoid that
+ * we use the method below.
+ */
+
+ /* This gives the clock cycles per millisecond */
+ oqticks_per_us *= 1000;
+
+ /* This gives the oq ticks (1024 core clock cycles) per millisecond */
+ oqticks_per_us /= 1024;
+
+ /* time_intr is in microseconds. The next 2 steps gives the oq ticks
+ * corressponding to time_intr.
+ */
+ oqticks_per_us *= time_intr_in_us;
+ oqticks_per_us /= 1000;
+
+ return oqticks_per_us;
+}
+
+void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct)
+{
+ /* Select Round-Robin Arb, ES, RO, NS for Input Queues */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INPUT_CONTROL,
+ CN6XXX_INPUT_CTL_MASK);
+
+ /* Instruction Read Size - Max 4 instructions per PCIE Read */
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_INSTR_RD_SIZE,
+ 0xFFFFFFFFFFFFFFFFULL);
+
+ /* Select PCIE Port for all Input rings. */
+ octeon_write_csr64(oct, CN6XXX_SLI_IN_PCIE_PORT,
+ (oct->pcie_port * 0x5555555555555555ULL));
+}
+
+static void lio_cn66xx_setup_pkt_ctl_regs(struct octeon_device *oct)
+{
+ u64 pktctl;
+
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+
+ pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL);
+
+ /* 66XX SPECIFIC */
+ if (CFG_GET_OQ_MAX_Q(cn6xxx->conf) <= 4)
+ /* Disable RING_EN if only upto 4 rings are used. */
+ pktctl &= ~(1 << 4);
+ else
+ pktctl |= (1 << 4);
+
+ if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf))
+ pktctl |= 0xF;
+ else
+ /* Disable per-port backpressure. */
+ pktctl &= ~0xF;
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl);
+}
+
+void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct)
+{
+ u32 time_threshold;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+
+ /* / Select PCI-E Port for all Output queues */
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_PCIE_PORT64,
+ (oct->pcie_port * 0x5555555555555555ULL));
+
+ if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) {
+ octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 32);
+ } else {
+ /* / Set Output queue watermark to 0 to disable backpressure */
+ octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 0);
+ }
+
+ /* / Select Info Ptr for length & data */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_IPTR, 0xFFFFFFFF);
+
+ /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0);
+
+ /* / Select ES,RO,NS setting from register for Output Queue Packet
+ * Address
+ */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF);
+
+ /* No Relaxed Ordering, No Snoop, 64-bit swap for Output
+ * Queue ScatterList
+ */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_ROR, 0);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_NS, 0);
+
+ /* / ENDIAN_SPECIFIC CHANGES - 0 works for LE. */
+#ifdef __BIG_ENDIAN_BITFIELD
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64,
+ 0x5555555555555555ULL);
+#else
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, 0ULL);
+#endif
+
+ /* / No Relaxed Ordering, No Snoop, 64-bit swap for Output Queue Data */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_ROR, 0);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_NS, 0);
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_DATA_OUT_ES64,
+ 0x5555555555555555ULL);
+
+ /* / Set up interrupt packet and time threshold */
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
+ (u32)CFG_GET_OQ_INTR_PKT(cn6xxx->conf));
+ time_threshold =
+ lio_cn6xxx_get_oq_ticks(oct, (u32)
+ CFG_GET_OQ_INTR_TIME(cn6xxx->conf));
+
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
+}
+
+static int lio_cn6xxx_setup_device_regs(struct octeon_device *oct)
+{
+ lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT);
+ lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_512B);
+ lio_cn6xxx_enable_error_reporting(oct);
+
+ lio_cn6xxx_setup_global_input_regs(oct);
+ lio_cn66xx_setup_pkt_ctl_regs(oct);
+ lio_cn6xxx_setup_global_output_regs(oct);
+
+ /* Default error timeout value should be 0x200000 to avoid host hang
+ * when reads invalid register
+ */
+ octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL);
+ return 0;
+}
+
+void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+
+ /* Disable Packet-by-Packet mode; No Parse Mode or Skip length */
+ octeon_write_csr64(oct, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no), 0);
+
+ /* Write the start of the input queue's ring and its size */
+ octeon_write_csr64(oct, CN6XXX_SLI_IQ_BASE_ADDR64(iq_no),
+ iq->base_addr_dma);
+ octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr for this
+ * queue
+ */
+ iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio[0].hw_addr
+ + CN6XXX_SLI_IQ_INSTR_COUNT(iq_no);
+ dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+ iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+
+ /* Store the current instruction counter
+ * (used in flush_iq calculation)
+ */
+ iq->reset_instr_cnt = readl(iq->inst_cnt_reg);
+}
+
+static void lio_cn66xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+ lio_cn6xxx_setup_iq_regs(oct, iq_no);
+
+ /* Backpressure for this queue - WMARK set to all F's. This effectively
+ * disables the backpressure mechanism.
+ */
+ octeon_write_csr64(oct, CN66XX_SLI_IQ_BP64(iq_no),
+ (0xFFFFFFFFULL << 32));
+}
+
+void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
+{
+ u32 intr;
+ struct octeon_droq *droq = oct->droq[oq_no];
+
+ octeon_write_csr64(oct, CN6XXX_SLI_OQ_BASE_ADDR64(oq_no),
+ droq->desc_ring_dma);
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count);
+
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
+ (droq->buffer_size | (OCT_RH_SIZE << 16)));
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ droq->pkts_sent_reg =
+ oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_SENT(oq_no);
+ droq->pkts_credit_reg =
+ oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_CREDIT(oq_no);
+
+ /* Enable this output queue to generate Packet Timer Interrupt */
+ intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
+ intr |= (1 << oq_no);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, intr);
+
+ /* Enable this output queue to generate Packet Timer Interrupt */
+ intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
+ intr |= (1 << oq_no);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr);
+}
+
+void lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
+{
+ u32 mask;
+
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE);
+ mask |= oct->io_qmask.iq64B;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE, mask);
+
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB);
+ mask |= oct->io_qmask.iq;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
+
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+ mask |= oct->io_qmask.oq;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
+}
+
+void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
+{
+ u32 mask, i, loop = HZ;
+ u32 d32;
+
+ /* Reset the Enable bits for Input Queues. */
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB);
+ mask ^= oct->io_qmask.iq;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
+
+ /* Wait until hardware indicates that the queues are out of reset. */
+ mask = oct->io_qmask.iq;
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
+ while (((d32 & mask) != mask) && loop--) {
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* Reset the doorbell register for each Input queue. */
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ if (!(oct->io_qmask.iq & (1UL << i)))
+ continue;
+ octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF);
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i));
+ }
+
+ /* Reset the Enable bits for Output Queues. */
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+ mask ^= oct->io_qmask.oq;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
+
+ /* Wait until hardware indicates that the queues are out of reset. */
+ loop = HZ;
+ mask = oct->io_qmask.oq;
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
+ while (((d32 & mask) != mask) && loop--) {
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
+ schedule_timeout_uninterruptible(1);
+ }
+ ;
+
+ /* Reset the doorbell register for each Output queue. */
+ /* for (i = 0; i < oct->num_oqs; i++) { */
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct->io_qmask.oq & (1UL << i)))
+ continue;
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF);
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i));
+
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i));
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i), d32);
+ }
+
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT);
+ if (d32)
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, d32);
+
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT);
+ if (d32)
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32);
+}
+
+void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ if (!(oct->io_qmask.iq & (1UL << i)))
+ continue;
+ oct->fn_list.setup_iq_regs(oct, i);
+ }
+
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct->io_qmask.oq & (1UL << i)))
+ continue;
+ oct->fn_list.setup_oq_regs(oct, i);
+ }
+
+ oct->fn_list.setup_device_regs(oct);
+
+ oct->fn_list.enable_interrupt(oct->chip);
+
+ oct->fn_list.enable_io_queues(oct);
+
+ /* for (i = 0; i < oct->num_oqs; i++) { */
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct->io_qmask.oq & (1UL << i)))
+ continue;
+ writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg);
+ }
+}
+
+void
+lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct,
+ u64 core_addr,
+ u32 idx,
+ int valid)
+{
+ u64 bar1;
+
+ if (valid == 0) {
+ bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+ lio_pci_writeq(oct, (bar1 & 0xFFFFFFFEULL),
+ CN6XXX_BAR1_REG(idx, oct->pcie_port));
+ bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+ return;
+ }
+
+ /* Bits 17:4 of the PCI_BAR1_INDEXx stores bits 35:22 of
+ * the Core Addr
+ */
+ lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK),
+ CN6XXX_BAR1_REG(idx, oct->pcie_port));
+
+ bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+}
+
+void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct,
+ u32 idx,
+ u32 mask)
+{
+ lio_pci_writeq(oct, mask, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+}
+
+u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx)
+{
+ return (u32)lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+}
+
+u32
+lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
+ struct octeon_instr_queue *iq)
+{
+ u32 new_idx = readl(iq->inst_cnt_reg);
+
+ /* The new instr cnt reg is a 32-bit counter that can roll over. We have
+ * noted the counter's initial value at init time into
+ * reset_instr_cnt
+ */
+ if (iq->reset_instr_cnt < new_idx)
+ new_idx -= iq->reset_instr_cnt;
+ else
+ new_idx += (0xffffffff - iq->reset_instr_cnt) + 1;
+
+ /* Modulo of the new index with the IQ size will give us
+ * the new index.
+ */
+ new_idx %= iq->max_count;
+
+ return new_idx;
+}
+
+void lio_cn6xxx_enable_interrupt(void *chip)
+{
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+ u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE;
+
+ /* Enable Interrupt */
+ writeq(mask, cn6xxx->intr_enb_reg64);
+}
+
+void lio_cn6xxx_disable_interrupt(void *chip)
+{
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+
+ /* Disable Interrupts */
+ writeq(0, cn6xxx->intr_enb_reg64);
+
+ /* make sure interrupts are really disabled */
+ mmiowb();
+}
+
+static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
+{
+ /* CN63xx Pass2 and newer parts implements the SLI_MAC_NUMBER register
+ * to determine the PCIE port #
+ */
+ oct->pcie_port = octeon_read_csr(oct, CN6XXX_SLI_MAC_NUMBER) & 0xff;
+
+ dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port);
+}
+
+void
+lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64)
+{
+ dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n",
+ CVM_CAST64(intr64));
+}
+
+int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
+{
+ struct octeon_droq *droq;
+ u32 oq_no, pkt_count, droq_time_mask, droq_mask, droq_int_enb;
+ u32 droq_cnt_enb, droq_cnt_mask;
+
+ droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
+ droq_cnt_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT);
+ droq_mask = droq_cnt_mask & droq_cnt_enb;
+
+ droq_time_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT);
+ droq_int_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
+ droq_mask |= (droq_time_mask & droq_int_enb);
+
+ droq_mask &= oct->io_qmask.oq;
+
+ oct->droq_intr = 0;
+
+ /* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */
+ for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) {
+ if (!(droq_mask & (1 << oq_no)))
+ continue;
+
+ droq = oct->droq[oq_no];
+ pkt_count = octeon_droq_check_hw_for_pkts(oct, droq);
+ if (pkt_count) {
+ oct->droq_intr |= (1ULL << oq_no);
+ if (droq->ops.poll_mode) {
+ u32 value;
+ u32 reg;
+
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+
+ /* disable interrupts for this droq */
+ spin_lock
+ (&cn6xxx->lock_for_droq_int_enb_reg);
+ reg = CN6XXX_SLI_PKT_TIME_INT_ENB;
+ value = octeon_read_csr(oct, reg);
+ value &= ~(1 << oq_no);
+ octeon_write_csr(oct, reg, value);
+ reg = CN6XXX_SLI_PKT_CNT_INT_ENB;
+ value = octeon_read_csr(oct, reg);
+ value &= ~(1 << oq_no);
+ octeon_write_csr(oct, reg, value);
+
+ /* Ensure that the enable register is written.
+ */
+ mmiowb();
+
+ spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg);
+ }
+ }
+ }
+
+ droq_time_mask &= oct->io_qmask.oq;
+ droq_cnt_mask &= oct->io_qmask.oq;
+
+ /* Reset the PKT_CNT/TIME_INT registers. */
+ if (droq_time_mask)
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, droq_time_mask);
+
+ if (droq_cnt_mask) /* reset PKT_CNT register:66xx */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, droq_cnt_mask);
+
+ return 0;
+}
+
+irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev)
+{
+ struct octeon_device *oct = (struct octeon_device *)dev;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+ u64 intr64;
+
+ intr64 = readq(cn6xxx->intr_sum_reg64);
+
+ /* If our device has interrupted, then proceed.
+ * Also check for all f's if interrupt was triggered on an error
+ * and the PCI read fails.
+ */
+ if (!intr64 || (intr64 == 0xFFFFFFFFFFFFFFFFULL))
+ return IRQ_NONE;
+
+ oct->int_status = 0;
+
+ if (intr64 & CN6XXX_INTR_ERR)
+ lio_cn6xxx_process_pcie_error_intr(oct, intr64);
+
+ if (intr64 & CN6XXX_INTR_PKT_DATA) {
+ lio_cn6xxx_process_droq_intr_regs(oct);
+ oct->int_status |= OCT_DEV_INTR_PKT_DATA;
+ }
+
+ if (intr64 & CN6XXX_INTR_DMA0_FORCE)
+ oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
+
+ if (intr64 & CN6XXX_INTR_DMA1_FORCE)
+ oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
+
+ /* Clear the current interrupts */
+ writeq(intr64, cn6xxx->intr_sum_reg64);
+
+ return IRQ_HANDLED;
+}
+
+void lio_cn6xxx_setup_reg_address(struct octeon_device *oct,
+ void *chip,
+ struct octeon_reg_list *reg_list)
+{
+ u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+
+ reg_list->pci_win_wr_addr_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_HI);
+ reg_list->pci_win_wr_addr_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_LO);
+ reg_list->pci_win_wr_addr =
+ (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR64);
+
+ reg_list->pci_win_rd_addr_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_HI);
+ reg_list->pci_win_rd_addr_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_LO);
+ reg_list->pci_win_rd_addr =
+ (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR64);
+
+ reg_list->pci_win_wr_data_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_HI);
+ reg_list->pci_win_wr_data_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_LO);
+ reg_list->pci_win_wr_data =
+ (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA64);
+
+ reg_list->pci_win_rd_data_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_HI);
+ reg_list->pci_win_rd_data_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_LO);
+ reg_list->pci_win_rd_data =
+ (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA64);
+
+ lio_cn6xxx_get_pcie_qlmport(oct);
+
+ cn6xxx->intr_sum_reg64 = bar0_pciaddr + CN6XXX_SLI_INT_SUM64;
+ cn6xxx->intr_mask64 = CN6XXX_INTR_MASK;
+ cn6xxx->intr_enb_reg64 =
+ bar0_pciaddr + CN6XXX_SLI_INT_ENB64(oct->pcie_port);
+}
+
+int lio_setup_cn66xx_octeon_device(struct octeon_device *oct)
+{
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+
+ if (octeon_map_pci_barx(oct, 0, 0))
+ return 1;
+
+ if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
+ dev_err(&oct->pci_dev->dev, "%s CN66XX BAR1 map failed\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ return 1;
+ }
+
+ spin_lock_init(&cn6xxx->lock_for_droq_int_enb_reg);
+
+ oct->fn_list.setup_iq_regs = lio_cn66xx_setup_iq_regs;
+ oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs;
+
+ oct->fn_list.soft_reset = lio_cn6xxx_soft_reset;
+ oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs;
+ oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs;
+ oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
+
+ oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
+ oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write;
+ oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read;
+
+ oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
+ oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt;
+ oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt;
+
+ oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues;
+ oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues;
+
+ lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list);
+
+ cn6xxx->conf = (struct octeon_config *)
+ oct_get_config_info(oct, LIO_210SV);
+ if (!cn6xxx->conf) {
+ dev_err(&oct->pci_dev->dev, "%s No Config found for CN66XX\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+ return 1;
+ }
+
+ oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct);
+
+ return 0;
+}
+
+int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
+ struct octeon_config *conf6xxx)
+{
+ /* int total_instrs = 0; */
+
+ if (CFG_GET_IQ_MAX_Q(conf6xxx) > CN6XXX_MAX_INPUT_QUEUES) {
+ dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
+ __func__, CFG_GET_IQ_MAX_Q(conf6xxx),
+ CN6XXX_MAX_INPUT_QUEUES);
+ return 1;
+ }
+
+ if (CFG_GET_OQ_MAX_Q(conf6xxx) > CN6XXX_MAX_OUTPUT_QUEUES) {
+ dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
+ __func__, CFG_GET_OQ_MAX_Q(conf6xxx),
+ CN6XXX_MAX_OUTPUT_QUEUES);
+ return 1;
+ }
+
+ if (CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_32BYTE_INSTR &&
+ CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_64BYTE_INSTR) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
+ __func__);
+ return 1;
+ }
+ if (!(CFG_GET_OQ_INFO_PTR(conf6xxx)) ||
+ !(CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx))) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
+ __func__);
+ return 1;
+ }
+
+ if (!(CFG_GET_OQ_INTR_TIME(conf6xxx))) {
+ dev_err(&oct->pci_dev->dev, "%s: No Time Interrupt for OQ\n",
+ __func__);
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
new file mode 100644
index 000000000..f77918779
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -0,0 +1,107 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn66xx_device.h
+ * \brief Host Driver: Routines that perform CN66XX specific operations.
+ */
+
+#ifndef __CN66XX_DEVICE_H__
+#define __CN66XX_DEVICE_H__
+
+/* Register address and configuration for a CN6XXX devices.
+ * If device specific changes need to be made then add a struct to include
+ * device specific fields as shown in the commented section
+ */
+struct octeon_cn6xxx {
+ /** PCI interrupt summary register */
+ u8 __iomem *intr_sum_reg64;
+
+ /** PCI interrupt enable register */
+ u8 __iomem *intr_enb_reg64;
+
+ /** The PCI interrupt mask used by interrupt handler */
+ u64 intr_mask64;
+
+ struct octeon_config *conf;
+
+ /* Example additional fields - not used currently
+ * struct {
+ * }cn6xyz;
+ */
+
+ /* For the purpose of atomic access to interrupt enable reg */
+ spinlock_t lock_for_droq_int_enb_reg;
+
+};
+
+enum octeon_pcie_mps {
+ PCIE_MPS_DEFAULT = -1, /* Use the default setup by BIOS */
+ PCIE_MPS_128B = 0,
+ PCIE_MPS_256B = 1
+};
+
+enum octeon_pcie_mrrs {
+ PCIE_MRRS_DEFAULT = -1, /* Use the default setup by BIOS */
+ PCIE_MRRS_128B = 0,
+ PCIE_MRRS_256B = 1,
+ PCIE_MRRS_512B = 2,
+ PCIE_MRRS_1024B = 3,
+ PCIE_MRRS_2048B = 4,
+ PCIE_MRRS_4096B = 5
+};
+
+/* Common functions for 66xx and 68xx */
+int lio_cn6xxx_soft_reset(struct octeon_device *oct);
+void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct);
+void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct,
+ enum octeon_pcie_mps mps);
+void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct,
+ enum octeon_pcie_mrrs mrrs);
+void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct);
+void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct);
+void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no);
+void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no);
+void lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
+void lio_cn6xxx_disable_io_queues(struct octeon_device *oct);
+void lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64);
+int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct);
+irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev);
+void lio_cn6xxx_reinit_regs(struct octeon_device *oct);
+void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
+ u32 idx, int valid);
+void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask);
+u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx);
+u32
+lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
+ struct octeon_instr_queue *iq);
+void lio_cn6xxx_enable_interrupt(void *chip);
+void lio_cn6xxx_disable_interrupt(void *chip);
+void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
+void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip,
+ struct octeon_reg_list *reg_list);
+u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct);
+u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
+int lio_setup_cn66xx_octeon_device(struct octeon_device *);
+int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
+ struct octeon_config *);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
new file mode 100644
index 000000000..5e3aff242
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
@@ -0,0 +1,535 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn66xx_regs.h
+ * \brief Host Driver: Register Address and Register Mask values for
+ * Octeon CN66XX devices.
+ */
+
+#ifndef __CN66XX_REGS_H__
+#define __CN66XX_REGS_H__
+
+#define CN6XXX_XPANSION_BAR 0x30
+
+#define CN6XXX_MSI_CAP 0x50
+#define CN6XXX_MSI_ADDR_LO 0x54
+#define CN6XXX_MSI_ADDR_HI 0x58
+#define CN6XXX_MSI_DATA 0x5C
+
+#define CN6XXX_PCIE_CAP 0x70
+#define CN6XXX_PCIE_DEVCAP 0x74
+#define CN6XXX_PCIE_DEVCTL 0x78
+#define CN6XXX_PCIE_LINKCAP 0x7C
+#define CN6XXX_PCIE_LINKCTL 0x80
+#define CN6XXX_PCIE_SLOTCAP 0x84
+#define CN6XXX_PCIE_SLOTCTL 0x88
+
+#define CN6XXX_PCIE_ENH_CAP 0x100
+#define CN6XXX_PCIE_UNCORR_ERR_STATUS 0x104
+#define CN6XXX_PCIE_UNCORR_ERR_MASK 0x108
+#define CN6XXX_PCIE_UNCORR_ERR 0x10C
+#define CN6XXX_PCIE_CORR_ERR_STATUS 0x110
+#define CN6XXX_PCIE_CORR_ERR_MASK 0x114
+#define CN6XXX_PCIE_ADV_ERR_CAP 0x118
+
+#define CN6XXX_PCIE_ACK_REPLAY_TIMER 0x700
+#define CN6XXX_PCIE_OTHER_MSG 0x704
+#define CN6XXX_PCIE_PORT_FORCE_LINK 0x708
+#define CN6XXX_PCIE_ACK_FREQ 0x70C
+#define CN6XXX_PCIE_PORT_LINK_CTL 0x710
+#define CN6XXX_PCIE_LANE_SKEW 0x714
+#define CN6XXX_PCIE_SYM_NUM 0x718
+#define CN6XXX_PCIE_FLTMSK 0x720
+
+/* ############## BAR0 Registers ################ */
+
+#define CN6XXX_SLI_CTL_PORT0 0x0050
+#define CN6XXX_SLI_CTL_PORT1 0x0060
+
+#define CN6XXX_SLI_WINDOW_CTL 0x02E0
+#define CN6XXX_SLI_DBG_DATA 0x0310
+#define CN6XXX_SLI_SCRATCH1 0x03C0
+#define CN6XXX_SLI_SCRATCH2 0x03D0
+#define CN6XXX_SLI_CTL_STATUS 0x0570
+
+#define CN6XXX_WIN_WR_ADDR_LO 0x0000
+#define CN6XXX_WIN_WR_ADDR_HI 0x0004
+#define CN6XXX_WIN_WR_ADDR64 CN6XXX_WIN_WR_ADDR_LO
+
+#define CN6XXX_WIN_RD_ADDR_LO 0x0010
+#define CN6XXX_WIN_RD_ADDR_HI 0x0014
+#define CN6XXX_WIN_RD_ADDR64 CN6XXX_WIN_RD_ADDR_LO
+
+#define CN6XXX_WIN_WR_DATA_LO 0x0020
+#define CN6XXX_WIN_WR_DATA_HI 0x0024
+#define CN6XXX_WIN_WR_DATA64 CN6XXX_WIN_WR_DATA_LO
+
+#define CN6XXX_WIN_RD_DATA_LO 0x0040
+#define CN6XXX_WIN_RD_DATA_HI 0x0044
+#define CN6XXX_WIN_RD_DATA64 CN6XXX_WIN_RD_DATA_LO
+
+#define CN6XXX_WIN_WR_MASK_LO 0x0030
+#define CN6XXX_WIN_WR_MASK_HI 0x0034
+#define CN6XXX_WIN_WR_MASK_REG CN6XXX_WIN_WR_MASK_LO
+
+/* 1 register (32-bit) to enable Input queues */
+#define CN6XXX_SLI_PKT_INSTR_ENB 0x1000
+
+/* 1 register (32-bit) to enable Output queues */
+#define CN6XXX_SLI_PKT_OUT_ENB 0x1010
+
+/* 1 register (32-bit) to determine whether Output queues are in reset. */
+#define CN6XXX_SLI_PORT_IN_RST_OQ 0x11F0
+
+/* 1 register (32-bit) to determine whether Input queues are in reset. */
+#define CN6XXX_SLI_PORT_IN_RST_IQ 0x11F4
+
+/*###################### REQUEST QUEUE #########################*/
+
+/* 1 register (32-bit) - instr. size of each input queue. */
+#define CN6XXX_SLI_PKT_INSTR_SIZE 0x1020
+
+/* 32 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */
+#define CN6XXX_SLI_IQ_INSTR_COUNT_START 0x2000
+
+/* 32 registers for Input Queue Start Addr - SLI_PKT0_INSTR_BADDR */
+#define CN6XXX_SLI_IQ_BASE_ADDR_START64 0x2800
+
+/* 32 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */
+#define CN6XXX_SLI_IQ_DOORBELL_START 0x2C00
+
+/* 32 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */
+#define CN6XXX_SLI_IQ_SIZE_START 0x3000
+
+/* 32 registers for Instruction Header Options - SLI_PKT0_INSTR_HEADER */
+#define CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 0x3400
+
+/* 1 register (64-bit) - Back Pressure for each input queue - SLI_PKT0_IN_BP */
+#define CN66XX_SLI_INPUT_BP_START64 0x3800
+
+/* Each Input Queue register is at a 16-byte Offset in BAR0 */
+#define CN6XXX_IQ_OFFSET 0x10
+
+/* 1 register (32-bit) - ES, RO, NS, Arbitration for Input Queue Data &
+ * gather list fetches. SLI_PKT_INPUT_CONTROL.
+ */
+#define CN6XXX_SLI_PKT_INPUT_CONTROL 0x1170
+
+/* 1 register (64-bit) - Number of instructions to read at one time
+ * - 2 bits for each input ring. SLI_PKT_INSTR_RD_SIZE.
+ */
+#define CN6XXX_SLI_PKT_INSTR_RD_SIZE 0x11A0
+
+/* 1 register (64-bit) - Assign Input ring to MAC port
+ * - 2 bits for each input ring. SLI_PKT_IN_PCIE_PORT.
+ */
+#define CN6XXX_SLI_IN_PCIE_PORT 0x11B0
+
+/*------- Request Queue Macros ---------*/
+#define CN6XXX_SLI_IQ_BASE_ADDR64(iq) \
+ (CN6XXX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN6XXX_SLI_IQ_SIZE(iq) \
+ (CN6XXX_SLI_IQ_SIZE_START + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq) \
+ (CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN6XXX_SLI_IQ_DOORBELL(iq) \
+ (CN6XXX_SLI_IQ_DOORBELL_START + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN6XXX_SLI_IQ_INSTR_COUNT(iq) \
+ (CN6XXX_SLI_IQ_INSTR_COUNT_START + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN66XX_SLI_IQ_BP64(iq) \
+ (CN66XX_SLI_INPUT_BP_START64 + ((iq) * CN6XXX_IQ_OFFSET))
+
+/*------------------ Masks ----------------*/
+#define CN6XXX_INPUT_CTL_ROUND_ROBIN_ARB BIT(22)
+#define CN6XXX_INPUT_CTL_DATA_NS BIT(8)
+#define CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP BIT(6)
+#define CN6XXX_INPUT_CTL_DATA_RO BIT(5)
+#define CN6XXX_INPUT_CTL_USE_CSR BIT(4)
+#define CN6XXX_INPUT_CTL_GATHER_NS BIT(3)
+#define CN6XXX_INPUT_CTL_GATHER_ES_64B_SWAP BIT(2)
+#define CN6XXX_INPUT_CTL_GATHER_RO BIT(1)
+
+#ifdef __BIG_ENDIAN_BITFIELD
+#define CN6XXX_INPUT_CTL_MASK \
+ (CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP \
+ | CN6XXX_INPUT_CTL_USE_CSR \
+ | CN6XXX_INPUT_CTL_GATHER_ES_64B_SWAP)
+#else
+#define CN6XXX_INPUT_CTL_MASK \
+ (CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP \
+ | CN6XXX_INPUT_CTL_USE_CSR)
+#endif
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* 32 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */
+#define CN6XXX_SLI_OQ0_BUFF_INFO_SIZE 0x0C00
+
+/* 32 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */
+#define CN6XXX_SLI_OQ_BASE_ADDR_START64 0x1400
+
+/* 32 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */
+#define CN6XXX_SLI_OQ_PKT_CREDITS_START 0x1800
+
+/* 32 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */
+#define CN6XXX_SLI_OQ_SIZE_START 0x1C00
+
+/* 32 registers for Output Queue Packet Count - SLI_PKT0_CNTS */
+#define CN6XXX_SLI_OQ_PKT_SENT_START 0x2400
+
+/* Each Output Queue register is at a 16-byte Offset in BAR0 */
+#define CN6XXX_OQ_OFFSET 0x10
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - Relaxed Ordering setting for reading Output Queues descriptors
+ * - SLI_PKT_SLIST_ROR
+ */
+#define CN6XXX_SLI_PKT_SLIST_ROR 0x1030
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - No Snoop mode for reading Output Queues descriptors
+ * - SLI_PKT_SLIST_NS
+ */
+#define CN6XXX_SLI_PKT_SLIST_NS 0x1040
+
+/* 1 register (64-bit) - 2 bits for each output queue
+ * - Endian-Swap mode for reading Output Queue descriptors
+ * - SLI_PKT_SLIST_ES
+ */
+#define CN6XXX_SLI_PKT_SLIST_ES64 0x1050
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - InfoPtr mode for Output Queues.
+ * - SLI_PKT_IPTR
+ */
+#define CN6XXX_SLI_PKT_IPTR 0x1070
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - DPTR format selector for Output queues.
+ * - SLI_PKT_DPADDR
+ */
+#define CN6XXX_SLI_PKT_DPADDR 0x1080
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - Relaxed Ordering setting for reading Output Queues data
+ * - SLI_PKT_DATA_OUT_ROR
+ */
+#define CN6XXX_SLI_PKT_DATA_OUT_ROR 0x1090
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - No Snoop mode for reading Output Queues data
+ * - SLI_PKT_DATA_OUT_NS
+ */
+#define CN6XXX_SLI_PKT_DATA_OUT_NS 0x10A0
+
+/* 1 register (64-bit) - 2 bits for each output queue
+ * - Endian-Swap mode for reading Output Queue data
+ * - SLI_PKT_DATA_OUT_ES
+ */
+#define CN6XXX_SLI_PKT_DATA_OUT_ES64 0x10B0
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - Controls whether SLI_PKTn_CNTS is incremented for bytes or for packets.
+ * - SLI_PKT_OUT_BMODE
+ */
+#define CN6XXX_SLI_PKT_OUT_BMODE 0x10D0
+
+/* 1 register (64-bit) - 2 bits for each output queue
+ * - Assign PCIE port for Output queues
+ * - SLI_PKT_PCIE_PORT.
+ */
+#define CN6XXX_SLI_PKT_PCIE_PORT64 0x10E0
+
+/* 1 (64-bit) register for Output Queue Packet Count Interrupt Threshold
+ * & Time Threshold. The same setting applies to all 32 queues.
+ * The register is defined as a 64-bit registers, but we use the
+ * 32-bit offsets to define distinct addresses.
+ */
+#define CN6XXX_SLI_OQ_INT_LEVEL_PKTS 0x1120
+#define CN6XXX_SLI_OQ_INT_LEVEL_TIME 0x1124
+
+/* 1 (64-bit register) for Output Queue backpressure across all rings. */
+#define CN6XXX_SLI_OQ_WMARK 0x1180
+
+/* 1 register to control output queue global backpressure & ring enable. */
+#define CN6XXX_SLI_PKT_CTL 0x1220
+
+/*------- Output Queue Macros ---------*/
+#define CN6XXX_SLI_OQ_BASE_ADDR64(oq) \
+ (CN6XXX_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define CN6XXX_SLI_OQ_SIZE(oq) \
+ (CN6XXX_SLI_OQ_SIZE_START + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq) \
+ (CN6XXX_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define CN6XXX_SLI_OQ_PKTS_SENT(oq) \
+ (CN6XXX_SLI_OQ_PKT_SENT_START + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define CN6XXX_SLI_OQ_PKTS_CREDIT(oq) \
+ (CN6XXX_SLI_OQ_PKT_CREDITS_START + ((oq) * CN6XXX_OQ_OFFSET))
+
+/*######################### DMA Counters #########################*/
+
+/* 2 registers (64-bit) - DMA Count - 1 for each DMA counter 0/1. */
+#define CN6XXX_DMA_CNT_START 0x0400
+
+/* 2 registers (64-bit) - DMA Timer 0/1, contains DMA timer values
+ * SLI_DMA_0_TIM
+ */
+#define CN6XXX_DMA_TIM_START 0x0420
+
+/* 2 registers (64-bit) - DMA count & Time Interrupt threshold -
+ * SLI_DMA_0_INT_LEVEL
+ */
+#define CN6XXX_DMA_INT_LEVEL_START 0x03E0
+
+/* Each DMA register is at a 16-byte Offset in BAR0 */
+#define CN6XXX_DMA_OFFSET 0x10
+
+/*---------- DMA Counter Macros ---------*/
+#define CN6XXX_DMA_CNT(dq) \
+ (CN6XXX_DMA_CNT_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define CN6XXX_DMA_INT_LEVEL(dq) \
+ (CN6XXX_DMA_INT_LEVEL_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define CN6XXX_DMA_PKT_INT_LEVEL(dq) \
+ (CN6XXX_DMA_INT_LEVEL_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define CN6XXX_DMA_TIME_INT_LEVEL(dq) \
+ (CN6XXX_DMA_INT_LEVEL_START + 4 + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define CN6XXX_DMA_TIM(dq) \
+ (CN6XXX_DMA_TIM_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+/*######################## INTERRUPTS #########################*/
+
+/* 1 register (64-bit) for Interrupt Summary */
+#define CN6XXX_SLI_INT_SUM64 0x0330
+
+/* 1 register (64-bit) for Interrupt Enable */
+#define CN6XXX_SLI_INT_ENB64_PORT0 0x0340
+#define CN6XXX_SLI_INT_ENB64_PORT1 0x0350
+
+/* 1 register (32-bit) to enable Output Queue Packet/Byte Count Interrupt */
+#define CN6XXX_SLI_PKT_CNT_INT_ENB 0x1150
+
+/* 1 register (32-bit) to enable Output Queue Packet Timer Interrupt */
+#define CN6XXX_SLI_PKT_TIME_INT_ENB 0x1160
+
+/* 1 register (32-bit) to indicate which Output Queue reached pkt threshold */
+#define CN6XXX_SLI_PKT_CNT_INT 0x1130
+
+/* 1 register (32-bit) to indicate which Output Queue reached time threshold */
+#define CN6XXX_SLI_PKT_TIME_INT 0x1140
+
+/*------------------ Interrupt Masks ----------------*/
+
+#define CN6XXX_INTR_RML_TIMEOUT_ERR BIT(1)
+#define CN6XXX_INTR_BAR0_RW_TIMEOUT_ERR BIT(2)
+#define CN6XXX_INTR_IO2BIG_ERR BIT(3)
+#define CN6XXX_INTR_PKT_COUNT BIT(4)
+#define CN6XXX_INTR_PKT_TIME BIT(5)
+#define CN6XXX_INTR_M0UPB0_ERR BIT(8)
+#define CN6XXX_INTR_M0UPWI_ERR BIT(9)
+#define CN6XXX_INTR_M0UNB0_ERR BIT(10)
+#define CN6XXX_INTR_M0UNWI_ERR BIT(11)
+#define CN6XXX_INTR_M1UPB0_ERR BIT(12)
+#define CN6XXX_INTR_M1UPWI_ERR BIT(13)
+#define CN6XXX_INTR_M1UNB0_ERR BIT(14)
+#define CN6XXX_INTR_M1UNWI_ERR BIT(15)
+#define CN6XXX_INTR_MIO_INT0 BIT(16)
+#define CN6XXX_INTR_MIO_INT1 BIT(17)
+#define CN6XXX_INTR_MAC_INT0 BIT(18)
+#define CN6XXX_INTR_MAC_INT1 BIT(19)
+
+#define CN6XXX_INTR_DMA0_FORCE BIT_ULL(32)
+#define CN6XXX_INTR_DMA1_FORCE BIT_ULL(33)
+#define CN6XXX_INTR_DMA0_COUNT BIT_ULL(34)
+#define CN6XXX_INTR_DMA1_COUNT BIT_ULL(35)
+#define CN6XXX_INTR_DMA0_TIME BIT_ULL(36)
+#define CN6XXX_INTR_DMA1_TIME BIT_ULL(37)
+#define CN6XXX_INTR_INSTR_DB_OF_ERR BIT_ULL(48)
+#define CN6XXX_INTR_SLIST_DB_OF_ERR BIT_ULL(49)
+#define CN6XXX_INTR_POUT_ERR BIT_ULL(50)
+#define CN6XXX_INTR_PIN_BP_ERR BIT_ULL(51)
+#define CN6XXX_INTR_PGL_ERR BIT_ULL(52)
+#define CN6XXX_INTR_PDI_ERR BIT_ULL(53)
+#define CN6XXX_INTR_POP_ERR BIT_ULL(54)
+#define CN6XXX_INTR_PINS_ERR BIT_ULL(55)
+#define CN6XXX_INTR_SPRT0_ERR BIT_ULL(56)
+#define CN6XXX_INTR_SPRT1_ERR BIT_ULL(57)
+#define CN6XXX_INTR_ILL_PAD_ERR BIT_ULL(60)
+
+#define CN6XXX_INTR_DMA0_DATA (CN6XXX_INTR_DMA0_TIME)
+
+#define CN6XXX_INTR_DMA1_DATA (CN6XXX_INTR_DMA1_TIME)
+
+#define CN6XXX_INTR_DMA_DATA \
+ (CN6XXX_INTR_DMA0_DATA | CN6XXX_INTR_DMA1_DATA)
+
+#define CN6XXX_INTR_PKT_DATA (CN6XXX_INTR_PKT_TIME | \
+ CN6XXX_INTR_PKT_COUNT)
+
+/* Sum of interrupts for all PCI-Express Data Interrupts */
+#define CN6XXX_INTR_PCIE_DATA \
+ (CN6XXX_INTR_DMA_DATA | CN6XXX_INTR_PKT_DATA)
+
+#define CN6XXX_INTR_MIO \
+ (CN6XXX_INTR_MIO_INT0 | CN6XXX_INTR_MIO_INT1)
+
+#define CN6XXX_INTR_MAC \
+ (CN6XXX_INTR_MAC_INT0 | CN6XXX_INTR_MAC_INT1)
+
+/* Sum of interrupts for error events */
+#define CN6XXX_INTR_ERR \
+ (CN6XXX_INTR_BAR0_RW_TIMEOUT_ERR \
+ | CN6XXX_INTR_IO2BIG_ERR \
+ | CN6XXX_INTR_M0UPB0_ERR \
+ | CN6XXX_INTR_M0UPWI_ERR \
+ | CN6XXX_INTR_M0UNB0_ERR \
+ | CN6XXX_INTR_M0UNWI_ERR \
+ | CN6XXX_INTR_M1UPB0_ERR \
+ | CN6XXX_INTR_M1UPWI_ERR \
+ | CN6XXX_INTR_M1UPB0_ERR \
+ | CN6XXX_INTR_M1UNWI_ERR \
+ | CN6XXX_INTR_INSTR_DB_OF_ERR \
+ | CN6XXX_INTR_SLIST_DB_OF_ERR \
+ | CN6XXX_INTR_POUT_ERR \
+ | CN6XXX_INTR_PIN_BP_ERR \
+ | CN6XXX_INTR_PGL_ERR \
+ | CN6XXX_INTR_PDI_ERR \
+ | CN6XXX_INTR_POP_ERR \
+ | CN6XXX_INTR_PINS_ERR \
+ | CN6XXX_INTR_SPRT0_ERR \
+ | CN6XXX_INTR_SPRT1_ERR \
+ | CN6XXX_INTR_ILL_PAD_ERR)
+
+/* Programmed Mask for Interrupt Sum */
+#define CN6XXX_INTR_MASK \
+ (CN6XXX_INTR_PCIE_DATA \
+ | CN6XXX_INTR_DMA0_FORCE \
+ | CN6XXX_INTR_DMA1_FORCE \
+ | CN6XXX_INTR_MIO \
+ | CN6XXX_INTR_MAC \
+ | CN6XXX_INTR_ERR)
+
+#define CN6XXX_SLI_S2M_PORT0_CTL 0x3D80
+#define CN6XXX_SLI_S2M_PORT1_CTL 0x3D90
+#define CN6XXX_SLI_S2M_PORTX_CTL(port) \
+ (CN6XXX_SLI_S2M_PORT0_CTL + (port * 0x10))
+
+#define CN6XXX_SLI_INT_ENB64(port) \
+ (CN6XXX_SLI_INT_ENB64_PORT0 + (port * 0x10))
+
+#define CN6XXX_SLI_MAC_NUMBER 0x3E00
+
+/* CN6XXX BAR1 Index registers. */
+#define CN6XXX_PEM_BAR1_INDEX000 0x00011800C00000A8ULL
+#define CN6XXX_PEM_OFFSET 0x0000000001000000ULL
+
+#define CN6XXX_BAR1_INDEX_START CN6XXX_PEM_BAR1_INDEX000
+#define CN6XXX_PCI_BAR1_OFFSET 0x8
+
+#define CN6XXX_BAR1_REG(idx, port) \
+ (CN6XXX_BAR1_INDEX_START + (port * CN6XXX_PEM_OFFSET) + \
+ (CN6XXX_PCI_BAR1_OFFSET * (idx)))
+
+/*############################ DPI #########################*/
+
+#define CN6XXX_DPI_CTL 0x0001df0000000040ULL
+
+#define CN6XXX_DPI_DMA_CONTROL 0x0001df0000000048ULL
+
+#define CN6XXX_DPI_REQ_GBL_ENB 0x0001df0000000050ULL
+
+#define CN6XXX_DPI_REQ_ERR_RSP 0x0001df0000000058ULL
+
+#define CN6XXX_DPI_REQ_ERR_RST 0x0001df0000000060ULL
+
+#define CN6XXX_DPI_DMA_ENG0_ENB 0x0001df0000000080ULL
+
+#define CN6XXX_DPI_DMA_ENG_ENB(q_no) \
+ (CN6XXX_DPI_DMA_ENG0_ENB + (q_no * 8))
+
+#define CN6XXX_DPI_DMA_ENG0_BUF 0x0001df0000000880ULL
+
+#define CN6XXX_DPI_DMA_ENG_BUF(q_no) \
+ (CN6XXX_DPI_DMA_ENG0_BUF + (q_no * 8))
+
+#define CN6XXX_DPI_SLI_PRT0_CFG 0x0001df0000000900ULL
+#define CN6XXX_DPI_SLI_PRT1_CFG 0x0001df0000000908ULL
+#define CN6XXX_DPI_SLI_PRTX_CFG(port) \
+ (CN6XXX_DPI_SLI_PRT0_CFG + (port * 0x10))
+
+#define CN6XXX_DPI_DMA_COMMIT_MODE BIT_ULL(58)
+#define CN6XXX_DPI_DMA_PKT_HP BIT_ULL(57)
+#define CN6XXX_DPI_DMA_PKT_EN BIT_ULL(56)
+#define CN6XXX_DPI_DMA_O_ES BIT_ULL(15)
+#define CN6XXX_DPI_DMA_O_MODE BIT_ULL(14)
+
+#define CN6XXX_DPI_DMA_CTL_MASK \
+ (CN6XXX_DPI_DMA_COMMIT_MODE | \
+ CN6XXX_DPI_DMA_PKT_HP | \
+ CN6XXX_DPI_DMA_PKT_EN | \
+ CN6XXX_DPI_DMA_O_ES | \
+ CN6XXX_DPI_DMA_O_MODE)
+
+/*############################ CIU #########################*/
+
+#define CN6XXX_CIU_SOFT_BIST 0x0001070000000738ULL
+#define CN6XXX_CIU_SOFT_RST 0x0001070000000740ULL
+
+/*############################ MIO #########################*/
+#define CN6XXX_MIO_PTP_CLOCK_CFG 0x0001070000000f00ULL
+#define CN6XXX_MIO_PTP_CLOCK_LO 0x0001070000000f08ULL
+#define CN6XXX_MIO_PTP_CLOCK_HI 0x0001070000000f10ULL
+#define CN6XXX_MIO_PTP_CLOCK_COMP 0x0001070000000f18ULL
+#define CN6XXX_MIO_PTP_TIMESTAMP 0x0001070000000f20ULL
+#define CN6XXX_MIO_PTP_EVT_CNT 0x0001070000000f28ULL
+#define CN6XXX_MIO_PTP_CKOUT_THRESH_LO 0x0001070000000f30ULL
+#define CN6XXX_MIO_PTP_CKOUT_THRESH_HI 0x0001070000000f38ULL
+#define CN6XXX_MIO_PTP_CKOUT_HI_INCR 0x0001070000000f40ULL
+#define CN6XXX_MIO_PTP_CKOUT_LO_INCR 0x0001070000000f48ULL
+#define CN6XXX_MIO_PTP_PPS_THRESH_LO 0x0001070000000f50ULL
+#define CN6XXX_MIO_PTP_PPS_THRESH_HI 0x0001070000000f58ULL
+#define CN6XXX_MIO_PTP_PPS_HI_INCR 0x0001070000000f60ULL
+#define CN6XXX_MIO_PTP_PPS_LO_INCR 0x0001070000000f68ULL
+
+#define CN6XXX_MIO_QLM4_CFG 0x00011800000015B0ULL
+#define CN6XXX_MIO_RST_BOOT 0x0001180000001600ULL
+
+#define CN6XXX_MIO_QLM_CFG_MASK 0x7
+
+/*############################ LMC #########################*/
+
+#define CN6XXX_LMC0_RESET_CTL 0x0001180088000180ULL
+#define CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK 0x0000000000000001ULL
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
new file mode 100644
index 000000000..8e830d0c0
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -0,0 +1,198 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct)
+{
+ u32 i;
+ u32 fifo_sizes[6] = { 3, 3, 1, 1, 1, 8 };
+
+ lio_pci_writeq(oct, CN6XXX_DPI_DMA_CTL_MASK, CN6XXX_DPI_DMA_CONTROL);
+ dev_dbg(&oct->pci_dev->dev, "DPI_DMA_CONTROL: 0x%016llx\n",
+ lio_pci_readq(oct, CN6XXX_DPI_DMA_CONTROL));
+
+ for (i = 0; i < 6; i++) {
+ /* Prevent service of instruction queue for all DMA engines
+ * Engine 5 will remain 0. Engines 0 - 4 will be setup by
+ * core.
+ */
+ lio_pci_writeq(oct, 0, CN6XXX_DPI_DMA_ENG_ENB(i));
+ lio_pci_writeq(oct, fifo_sizes[i], CN6XXX_DPI_DMA_ENG_BUF(i));
+ dev_dbg(&oct->pci_dev->dev, "DPI_ENG_BUF%d: 0x%016llx\n", i,
+ lio_pci_readq(oct, CN6XXX_DPI_DMA_ENG_BUF(i)));
+ }
+
+ /* DPI_SLI_PRT_CFG has MPS and MRRS settings that will be set
+ * separately.
+ */
+
+ lio_pci_writeq(oct, 1, CN6XXX_DPI_CTL);
+ dev_dbg(&oct->pci_dev->dev, "DPI_CTL: 0x%016llx\n",
+ lio_pci_readq(oct, CN6XXX_DPI_CTL));
+}
+
+static int lio_cn68xx_soft_reset(struct octeon_device *oct)
+{
+ lio_cn6xxx_soft_reset(oct);
+ lio_cn68xx_set_dpi_regs(oct);
+
+ return 0;
+}
+
+static void lio_cn68xx_setup_pkt_ctl_regs(struct octeon_device *oct)
+{
+ struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip;
+ u64 pktctl, tx_pipe, max_oqs;
+
+ pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL);
+
+ /* 68XX specific */
+ max_oqs = CFG_GET_OQ_MAX_Q(CHIP_FIELD(oct, cn6xxx, conf));
+ tx_pipe = octeon_read_csr64(oct, CN68XX_SLI_TX_PIPE);
+ tx_pipe &= 0xffffffffff00ffffULL; /* clear out NUMP field */
+ tx_pipe |= max_oqs << 16; /* put max_oqs in NUMP field */
+ octeon_write_csr64(oct, CN68XX_SLI_TX_PIPE, tx_pipe);
+
+ if (CFG_GET_IS_SLI_BP_ON(cn68xx->conf))
+ pktctl |= 0xF;
+ else
+ /* Disable per-port backpressure. */
+ pktctl &= ~0xF;
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl);
+}
+
+static int lio_cn68xx_setup_device_regs(struct octeon_device *oct)
+{
+ lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT);
+ lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_256B);
+ lio_cn6xxx_enable_error_reporting(oct);
+
+ lio_cn6xxx_setup_global_input_regs(oct);
+ lio_cn68xx_setup_pkt_ctl_regs(oct);
+ lio_cn6xxx_setup_global_output_regs(oct);
+
+ /* Default error timeout value should be 0x200000 to avoid host hang
+ * when reads invalid register
+ */
+ octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL);
+
+ return 0;
+}
+
+static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct)
+{
+ u32 val = 0;
+
+ /* Set M_VEND1_DRP and M_VEND0_DRP bits */
+ pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, &val);
+ val |= 0x3;
+ pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val);
+}
+
+int lio_is_210nv(struct octeon_device *oct)
+{
+ u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG);
+
+ return ((mio_qlm4_cfg & CN6XXX_MIO_QLM_CFG_MASK) == 0);
+}
+
+int lio_setup_cn68xx_octeon_device(struct octeon_device *oct)
+{
+ struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip;
+ u16 card_type = LIO_410NV;
+
+ if (octeon_map_pci_barx(oct, 0, 0))
+ return 1;
+
+ if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
+ dev_err(&oct->pci_dev->dev, "%s CN68XX BAR1 map failed\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ return 1;
+ }
+
+ spin_lock_init(&cn68xx->lock_for_droq_int_enb_reg);
+
+ oct->fn_list.setup_iq_regs = lio_cn6xxx_setup_iq_regs;
+ oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs;
+
+ oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
+ oct->fn_list.soft_reset = lio_cn68xx_soft_reset;
+ oct->fn_list.setup_device_regs = lio_cn68xx_setup_device_regs;
+ oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs;
+ oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
+
+ oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
+ oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write;
+ oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read;
+
+ oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt;
+ oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt;
+
+ oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues;
+ oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues;
+
+ lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list);
+
+ /* Determine variant of card */
+ if (lio_is_210nv(oct))
+ card_type = LIO_210NV;
+
+ cn68xx->conf = (struct octeon_config *)
+ oct_get_config_info(oct, card_type);
+ if (!cn68xx->conf) {
+ dev_err(&oct->pci_dev->dev, "%s No Config found for CN68XX %s\n",
+ __func__,
+ (card_type == LIO_410NV) ? LIO_410NV_NAME :
+ LIO_210NV_NAME);
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+ return 1;
+ }
+
+ oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct);
+
+ lio_cn68xx_vendor_message_fix(oct);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
new file mode 100644
index 000000000..d4e1c9fb0
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
@@ -0,0 +1,33 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn68xx_device.h
+ * \brief Host Driver: Routines that perform CN68XX specific operations.
+ */
+
+#ifndef __CN68XX_DEVICE_H__
+#define __CN68XX_DEVICE_H__
+
+int lio_setup_cn68xx_octeon_device(struct octeon_device *oct);
+int lio_is_210nv(struct octeon_device *oct);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
new file mode 100644
index 000000000..38cddbd10
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
@@ -0,0 +1,51 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn68xx_regs.h
+ * \brief Host Driver: Register Address and Register Mask values for
+ * Octeon CN68XX devices. The register map for CN66XX is the same
+ * for most registers. This file has the other registers that are
+ * 68XX-specific.
+ */
+
+#ifndef __CN68XX_REGS_H__
+#define __CN68XX_REGS_H__
+#include "cn66xx_regs.h"
+
+/*###################### REQUEST QUEUE #########################*/
+
+#define CN68XX_SLI_IQ_PORT0_PKIND 0x0800
+
+#define CN68XX_SLI_IQ_PORT_PKIND(iq) \
+ (CN68XX_SLI_IQ_PORT0_PKIND + ((iq) * CN6XXX_IQ_OFFSET))
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* Starting pipe number and number of pipes used by the SLI packet output. */
+#define CN68XX_SLI_TX_PIPE 0x1230
+
+/*######################## INTERRUPTS #########################*/
+
+/*------------------ Interrupt Masks ----------------*/
+#define CN68XX_INTR_PIPE_ERR BIT_ULL(61)
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
new file mode 100644
index 000000000..29f330831
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -0,0 +1,1217 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/ethtool.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+
+struct oct_mdio_cmd_context {
+ int octeon_id;
+ wait_queue_head_t wc;
+ int cond;
+};
+
+struct oct_mdio_cmd_resp {
+ u64 rh;
+ struct oct_mdio_cmd resp;
+ u64 status;
+};
+
+#define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp))
+
+/* Octeon's interface mode of operation */
+enum {
+ INTERFACE_MODE_DISABLED,
+ INTERFACE_MODE_RGMII,
+ INTERFACE_MODE_GMII,
+ INTERFACE_MODE_SPI,
+ INTERFACE_MODE_PCIE,
+ INTERFACE_MODE_XAUI,
+ INTERFACE_MODE_SGMII,
+ INTERFACE_MODE_PICMG,
+ INTERFACE_MODE_NPI,
+ INTERFACE_MODE_LOOP,
+ INTERFACE_MODE_SRIO,
+ INTERFACE_MODE_ILK,
+ INTERFACE_MODE_RXAUI,
+ INTERFACE_MODE_QSGMII,
+ INTERFACE_MODE_AGL,
+};
+
+#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
+#define OCT_ETHTOOL_REGDUMP_LEN 4096
+#define OCT_ETHTOOL_REGSVER 1
+
+static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
+ "Instr posted",
+ "Instr processed",
+ "Instr dropped",
+ "Bytes Sent",
+ "Sgentry_sent",
+ "Inst cntreg",
+ "Tx done",
+ "Tx Iq busy",
+ "Tx dropped",
+ "Tx bytes",
+};
+
+static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
+ "OQ Pkts Received",
+ "OQ Bytes Received",
+ "Dropped no dispatch",
+ "Dropped nomem",
+ "Dropped toomany",
+ "Stack RX cnt",
+ "Stack RX Bytes",
+ "RX dropped",
+};
+
+#define OCTNIC_NCMD_AUTONEG_ON 0x1
+#define OCTNIC_NCMD_PHY_ON 0x2
+
+static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_link_info *linfo;
+
+ linfo = &lio->linfo;
+
+ if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
+ linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
+ ecmd->port = PORT_FIBRE;
+ ecmd->supported =
+ (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
+ SUPPORTED_Pause);
+ ecmd->advertising =
+ (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
+ ecmd->transceiver = XCVR_EXTERNAL;
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ } else {
+ dev_err(&oct->pci_dev->dev, "Unknown link interface reported\n");
+ }
+
+ if (linfo->link.s.status) {
+ ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
+ ecmd->duplex = linfo->link.s.duplex;
+ } else {
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static void
+lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct lio *lio;
+ struct octeon_device *oct;
+
+ lio = GET_LIO(netdev);
+ oct = lio->oct_dev;
+
+ memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
+ strcpy(drvinfo->driver, "liquidio");
+ strcpy(drvinfo->version, LIQUIDIO_VERSION);
+ strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
+ ETHTOOL_FWVERS_LEN);
+ strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
+ drvinfo->regdump_len = OCT_ETHTOOL_REGDUMP_LEN;
+}
+
+static void
+lio_ethtool_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct lio *lio = GET_LIO(dev);
+ struct octeon_device *oct = lio->oct_dev;
+ u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
+
+ if (OCTEON_CN6XXX(oct)) {
+ struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
+
+ max_rx = CFG_GET_OQ_MAX_Q(conf6x);
+ max_tx = CFG_GET_IQ_MAX_Q(conf6x);
+ rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
+ tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
+ }
+
+ channel->max_rx = max_rx;
+ channel->max_tx = max_tx;
+ channel->rx_count = rx_count;
+ channel->tx_count = tx_count;
+}
+
+static int lio_get_eeprom_len(struct net_device *netdev)
+{
+ u8 buf[128];
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct octeon_board_info *board_info;
+ int len;
+
+ board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
+ len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
+ board_info->name, board_info->serial_number,
+ board_info->major, board_info->minor);
+
+ return len;
+}
+
+static int
+lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct octeon_board_info *board_info;
+ int len;
+
+ if (eeprom->offset != 0)
+ return -EINVAL;
+
+ eeprom->magic = oct_dev->pci_dev->vendor;
+ board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
+ len =
+ sprintf((char *)bytes,
+ "boardname:%s serialnum:%s maj:%lld min:%lld\n",
+ board_info->name, board_info->serial_number,
+ board_info->major, board_info->minor);
+
+ return 0;
+}
+
+static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = addr;
+ nctrl.ncmd.s.param3 = val;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ nparams.resp_order = OCTEON_RESP_ORDERED;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Callback for when mdio command response arrives
+ */
+static void octnet_mdio_resp_callback(struct octeon_device *oct,
+ u32 status,
+ void *buf)
+{
+ struct oct_mdio_cmd_resp *mdio_cmd_rsp;
+ struct oct_mdio_cmd_context *mdio_cmd_ctx;
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+
+ mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
+ mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
+
+ oct = lio_get_device(mdio_cmd_ctx->octeon_id);
+ if (status) {
+ dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
+ CVM_CAST64(status));
+ ACCESS_ONCE(mdio_cmd_ctx->cond) = -1;
+ } else {
+ ACCESS_ONCE(mdio_cmd_ctx->cond) = 1;
+ }
+ wake_up_interruptible(&mdio_cmd_ctx->wc);
+}
+
+/* This routine provides PHY access routines for
+ * mdio clause45 .
+ */
+static int
+octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
+{
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct octeon_soft_command *sc;
+ struct oct_mdio_cmd_resp *mdio_cmd_rsp;
+ struct oct_mdio_cmd_context *mdio_cmd_ctx;
+ struct oct_mdio_cmd *mdio_cmd;
+ int retval = 0;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ sizeof(struct oct_mdio_cmd),
+ sizeof(struct oct_mdio_cmd_resp),
+ sizeof(struct oct_mdio_cmd_context));
+
+ if (!sc)
+ return -ENOMEM;
+
+ mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
+ mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
+ mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
+
+ ACCESS_ONCE(mdio_cmd_ctx->cond) = 0;
+ mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
+ mdio_cmd->op = op;
+ mdio_cmd->mdio_addr = loc;
+ if (op)
+ mdio_cmd->value1 = *value;
+ mdio_cmd->value2 = lio->linfo.ifidx;
+ octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
+ 0, 0, 0);
+
+ sc->wait_time = 1000;
+ sc->callback = octnet_mdio_resp_callback;
+ sc->callback_arg = sc;
+
+ init_waitqueue_head(&mdio_cmd_ctx->wc);
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+
+ if (retval) {
+ dev_err(&oct_dev->pci_dev->dev,
+ "octnet_mdio45_access instruction failed status: %x\n",
+ retval);
+ retval = -EBUSY;
+ } else {
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived
+ */
+ sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
+ retval = mdio_cmd_rsp->status;
+ if (retval) {
+ dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
+ retval = -EBUSY;
+ } else {
+ octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
+ sizeof(struct oct_mdio_cmd) / 8);
+
+ if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) {
+ if (!op)
+ *value = mdio_cmd_rsp->resp.value1;
+ } else {
+ retval = -EINVAL;
+ }
+ }
+ }
+
+ octeon_free_soft_command(oct_dev, sc);
+
+ return retval;
+}
+
+static int lio_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ int value, ret;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (oct->chip_id == OCTEON_CN66XX) {
+ octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+ VITESSE_PHY_GPIO_DRIVEON);
+ return 2;
+
+ } else if (oct->chip_id == OCTEON_CN68XX) {
+ /* Save the current LED settings */
+ ret = octnet_mdio45_access(lio, 0,
+ LIO68XX_LED_BEACON_ADDR,
+ &lio->phy_beacon_val);
+ if (ret)
+ return ret;
+
+ ret = octnet_mdio45_access(lio, 0,
+ LIO68XX_LED_CTRL_ADDR,
+ &lio->led_ctrl_val);
+ if (ret)
+ return ret;
+
+ /* Configure Beacon values */
+ value = LIO68XX_LED_BEACON_CFGON;
+ ret =
+ octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_BEACON_ADDR,
+ &value);
+ if (ret)
+ return ret;
+
+ value = LIO68XX_LED_CTRL_CFGON;
+ ret =
+ octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_CTRL_ADDR,
+ &value);
+ if (ret)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
+ break;
+
+ case ETHTOOL_ID_ON:
+ if (oct->chip_id == OCTEON_CN66XX) {
+ octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+ VITESSE_PHY_GPIO_HIGH);
+
+ } else if (oct->chip_id == OCTEON_CN68XX) {
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+ break;
+
+ case ETHTOOL_ID_OFF:
+ if (oct->chip_id == OCTEON_CN66XX)
+ octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+ VITESSE_PHY_GPIO_LOW);
+ else if (oct->chip_id == OCTEON_CN68XX)
+ return -EINVAL;
+ else
+ return -EINVAL;
+
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ if (oct->chip_id == OCTEON_CN66XX) {
+ octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+ VITESSE_PHY_GPIO_DRIVEOFF);
+ } else if (oct->chip_id == OCTEON_CN68XX) {
+ /* Restore LED settings */
+ ret = octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_CTRL_ADDR,
+ &lio->led_ctrl_val);
+ if (ret)
+ return ret;
+
+ ret = octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_BEACON_ADDR,
+ &lio->phy_beacon_val);
+ if (ret)
+ return ret;
+
+ } else {
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+lio_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
+ rx_pending = 0;
+
+ if (OCTEON_CN6XXX(oct)) {
+ struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
+
+ tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
+ rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
+ rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
+ tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
+ }
+
+ if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) {
+ ering->rx_pending = 0;
+ ering->rx_max_pending = 0;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = rx_pending;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = rx_max_pending;
+ } else {
+ ering->rx_pending = rx_pending;
+ ering->rx_max_pending = rx_max_pending;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+ }
+
+ ering->tx_pending = tx_pending;
+ ering->tx_max_pending = tx_max_pending;
+}
+
+static u32 lio_get_msglevel(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ return lio->msg_enable;
+}
+
+static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
+ if (msglvl & NETIF_MSG_HW)
+ liquidio_set_feature(netdev,
+ OCTNET_CMD_VERBOSE_ENABLE);
+ else
+ liquidio_set_feature(netdev,
+ OCTNET_CMD_VERBOSE_DISABLE);
+ }
+
+ lio->msg_enable = msglvl;
+}
+
+static void
+lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+{
+ /* Notes: Not supporting any auto negotiation in these
+ * drivers. Just report pause frame support.
+ */
+ pause->tx_pause = 1;
+ pause->rx_pause = 1; /* TODO: Need to support RX pause frame!!. */
+}
+
+static void
+lio_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ int i = 0, j;
+
+ for (j = 0; j < MAX_OCTEON_INSTR_QUEUES; j++) {
+ if (!(oct_dev->io_qmask.iq & (1UL << j)))
+ continue;
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
+ data[i++] =
+ CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.instr_processed);
+ data[i++] =
+ CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.instr_dropped);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
+ data[i++] =
+ readl(oct_dev->instr_queue[j]->inst_cnt_reg);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
+ }
+
+ /* for (j = 0; j < oct_dev->num_oqs; j++){ */
+ for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES; j++) {
+ if (!(oct_dev->io_qmask.oq & (1UL << j)))
+ continue;
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
+ }
+}
+
+static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ int num_iq_stats, num_oq_stats, i, j;
+
+ num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ if (!(oct_dev->io_qmask.iq & (1UL << i)))
+ continue;
+ for (j = 0; j < num_iq_stats; j++) {
+ sprintf(data, "IQ%d %s", i, oct_iq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
+ /* for (i = 0; i < oct_dev->num_oqs; i++) { */
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct_dev->io_qmask.oq & (1UL << i)))
+ continue;
+ for (j = 0; j < num_oq_stats; j++) {
+ sprintf(data, "OQ%d %s", i, oct_droq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+static int lio_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+
+ return (ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs) +
+ (ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
+}
+
+static int lio_get_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *intr_coal)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+ struct octeon_instr_queue *iq;
+ struct oct_intrmod_cfg *intrmod_cfg;
+
+ intrmod_cfg = &oct->intrmod;
+
+ switch (oct->chip_id) {
+ /* case OCTEON_CN73XX: Todo */
+ /* break; */
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ if (!intrmod_cfg->intrmod_enable) {
+ intr_coal->rx_coalesce_usecs =
+ CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
+ intr_coal->rx_max_coalesced_frames =
+ CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
+ } else {
+ intr_coal->use_adaptive_rx_coalesce =
+ intrmod_cfg->intrmod_enable;
+ intr_coal->rate_sample_interval =
+ intrmod_cfg->intrmod_check_intrvl;
+ intr_coal->pkt_rate_high =
+ intrmod_cfg->intrmod_maxpkt_ratethr;
+ intr_coal->pkt_rate_low =
+ intrmod_cfg->intrmod_minpkt_ratethr;
+ intr_coal->rx_max_coalesced_frames_high =
+ intrmod_cfg->intrmod_maxcnt_trigger;
+ intr_coal->rx_coalesce_usecs_high =
+ intrmod_cfg->intrmod_maxtmr_trigger;
+ intr_coal->rx_coalesce_usecs_low =
+ intrmod_cfg->intrmod_mintmr_trigger;
+ intr_coal->rx_max_coalesced_frames_low =
+ intrmod_cfg->intrmod_mincnt_trigger;
+ }
+
+ iq = oct->instr_queue[lio->linfo.txpciq[0]];
+ intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
+ break;
+
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Callback function for intrmod */
+static void octnet_intrmod_callback(struct octeon_device *oct_dev,
+ u32 status,
+ void *ptr)
+{
+ struct oct_intrmod_cmd *cmd = ptr;
+ struct octeon_soft_command *sc = cmd->sc;
+
+ oct_dev = cmd->oct_dev;
+
+ if (status)
+ dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
+ CVM_CAST64(status));
+ else
+ dev_info(&oct_dev->pci_dev->dev,
+ "Rx-Adaptive Interrupt moderation enabled:%llx\n",
+ oct_dev->intrmod.intrmod_enable);
+
+ octeon_free_soft_command(oct_dev, sc);
+}
+
+/* Configure interrupt moderation parameters */
+static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
+{
+ struct octeon_soft_command *sc;
+ struct oct_intrmod_cmd *cmd;
+ struct oct_intrmod_cfg *cfg;
+ int retval;
+ struct octeon_device *oct_dev = (struct octeon_device *)oct;
+
+ /* Alloc soft command */
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ sizeof(struct oct_intrmod_cfg),
+ 0,
+ sizeof(struct oct_intrmod_cmd));
+
+ if (!sc)
+ return -ENOMEM;
+
+ cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
+ cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
+
+ memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
+ octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
+ cmd->sc = sc;
+ cmd->cfg = cfg;
+ cmd->oct_dev = oct_dev;
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
+
+ sc->callback = octnet_intrmod_callback;
+ sc->callback_arg = cmd;
+ sc->wait_time = 1000;
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+ if (retval) {
+ octeon_free_soft_command(oct_dev, sc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Enable/Disable auto interrupt Moderation */
+static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
+ *intr_coal, int adaptive)
+{
+ int ret = 0;
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_intrmod_cfg *intrmod_cfg;
+
+ intrmod_cfg = &oct->intrmod;
+
+ if (adaptive) {
+ if (intr_coal->rate_sample_interval)
+ intrmod_cfg->intrmod_check_intrvl =
+ intr_coal->rate_sample_interval;
+ else
+ intrmod_cfg->intrmod_check_intrvl =
+ LIO_INTRMOD_CHECK_INTERVAL;
+
+ if (intr_coal->pkt_rate_high)
+ intrmod_cfg->intrmod_maxpkt_ratethr =
+ intr_coal->pkt_rate_high;
+ else
+ intrmod_cfg->intrmod_maxpkt_ratethr =
+ LIO_INTRMOD_MAXPKT_RATETHR;
+
+ if (intr_coal->pkt_rate_low)
+ intrmod_cfg->intrmod_minpkt_ratethr =
+ intr_coal->pkt_rate_low;
+ else
+ intrmod_cfg->intrmod_minpkt_ratethr =
+ LIO_INTRMOD_MINPKT_RATETHR;
+
+ if (intr_coal->rx_max_coalesced_frames_high)
+ intrmod_cfg->intrmod_maxcnt_trigger =
+ intr_coal->rx_max_coalesced_frames_high;
+ else
+ intrmod_cfg->intrmod_maxcnt_trigger =
+ LIO_INTRMOD_MAXCNT_TRIGGER;
+
+ if (intr_coal->rx_coalesce_usecs_high)
+ intrmod_cfg->intrmod_maxtmr_trigger =
+ intr_coal->rx_coalesce_usecs_high;
+ else
+ intrmod_cfg->intrmod_maxtmr_trigger =
+ LIO_INTRMOD_MAXTMR_TRIGGER;
+
+ if (intr_coal->rx_coalesce_usecs_low)
+ intrmod_cfg->intrmod_mintmr_trigger =
+ intr_coal->rx_coalesce_usecs_low;
+ else
+ intrmod_cfg->intrmod_mintmr_trigger =
+ LIO_INTRMOD_MINTMR_TRIGGER;
+
+ if (intr_coal->rx_max_coalesced_frames_low)
+ intrmod_cfg->intrmod_mincnt_trigger =
+ intr_coal->rx_max_coalesced_frames_low;
+ else
+ intrmod_cfg->intrmod_mincnt_trigger =
+ LIO_INTRMOD_MINCNT_TRIGGER;
+ }
+
+ intrmod_cfg->intrmod_enable = adaptive;
+ ret = octnet_set_intrmod_cfg(oct, intrmod_cfg);
+
+ return ret;
+}
+
+static int
+oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
+{
+ int ret;
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+ u32 rx_max_coalesced_frames;
+
+ if (!intr_coal->rx_max_coalesced_frames)
+ rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
+ else
+ rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames;
+
+ /* Disable adaptive interrupt modulation */
+ ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
+ if (ret)
+ return ret;
+
+ /* Config Cnt based interrupt values */
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
+ rx_max_coalesced_frames);
+ CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
+ return 0;
+}
+
+static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
+ *intr_coal)
+{
+ int ret;
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+ u32 time_threshold, rx_coalesce_usecs;
+
+ if (!intr_coal->rx_coalesce_usecs)
+ rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
+ else
+ rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
+
+ /* Disable adaptive interrupt modulation */
+ ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
+ if (ret)
+ return ret;
+
+ /* Config Time based interrupt values */
+ time_threshold = lio_cn6xxx_get_oq_ticks(oct, rx_coalesce_usecs);
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
+ CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
+
+ return 0;
+}
+
+static int lio_set_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *intr_coal)
+{
+ struct lio *lio = GET_LIO(netdev);
+ int ret;
+ struct octeon_device *oct = lio->oct_dev;
+ u32 j, q_no;
+
+ if ((intr_coal->tx_max_coalesced_frames >= CN6XXX_DB_MIN) &&
+ (intr_coal->tx_max_coalesced_frames <= CN6XXX_DB_MAX)) {
+ for (j = 0; j < lio->linfo.num_txpciq; j++) {
+ q_no = lio->linfo.txpciq[j];
+ oct->instr_queue[q_no]->fill_threshold =
+ intr_coal->tx_max_coalesced_frames;
+ }
+ } else {
+ dev_err(&oct->pci_dev->dev,
+ "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
+ intr_coal->tx_max_coalesced_frames, CN6XXX_DB_MIN,
+ CN6XXX_DB_MAX);
+ return -EINVAL;
+ }
+
+ /* User requested adaptive-rx on */
+ if (intr_coal->use_adaptive_rx_coalesce) {
+ ret = oct_cfg_adaptive_intr(lio, intr_coal, 1);
+ if (ret)
+ goto ret_intrmod;
+ }
+
+ /* User requested adaptive-rx off and rx coalesce */
+ if ((intr_coal->rx_coalesce_usecs) &&
+ (!intr_coal->use_adaptive_rx_coalesce)) {
+ ret = oct_cfg_rx_intrtime(lio, intr_coal);
+ if (ret)
+ goto ret_intrmod;
+ }
+
+ /* User requested adaptive-rx off and rx coalesce */
+ if ((intr_coal->rx_max_coalesced_frames) &&
+ (!intr_coal->use_adaptive_rx_coalesce)) {
+ ret = oct_cfg_rx_intrcnt(lio, intr_coal);
+ if (ret)
+ goto ret_intrmod;
+ }
+
+ /* User requested adaptive-rx off, so use default coalesce params */
+ if ((!intr_coal->rx_max_coalesced_frames) &&
+ (!intr_coal->use_adaptive_rx_coalesce) &&
+ (!intr_coal->rx_coalesce_usecs)) {
+ dev_info(&oct->pci_dev->dev,
+ "Turning off adaptive-rx interrupt moderation\n");
+ dev_info(&oct->pci_dev->dev,
+ "Using RX Coalesce Default values rx_coalesce_usecs:%d rx_max_coalesced_frames:%d\n",
+ CN6XXX_OQ_INTR_TIME, CN6XXX_OQ_INTR_PKT);
+ ret = oct_cfg_rx_intrtime(lio, intr_coal);
+ if (ret)
+ goto ret_intrmod;
+
+ ret = oct_cfg_rx_intrcnt(lio, intr_coal);
+ if (ret)
+ goto ret_intrmod;
+ }
+
+ return 0;
+ret_intrmod:
+ return ret;
+}
+
+static int lio_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (lio->ptp_clock)
+ info->phc_index = ptp_clock_index(lio->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+
+ return 0;
+}
+
+static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_link_info *linfo;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+ int ret = 0;
+
+ /* get the link info */
+ linfo = &lio->linfo;
+
+ if (ecmd->autoneg != AUTONEG_ENABLE && ecmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (ecmd->autoneg == AUTONEG_DISABLE && ((ecmd->speed != SPEED_100 &&
+ ecmd->speed != SPEED_10) ||
+ (ecmd->duplex != DUPLEX_HALF &&
+ ecmd->duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ /* Ethtool Support is not provided for XAUI and RXAUI Interfaces
+ * as they operate at fixed Speed and Duplex settings
+ */
+ if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
+ linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
+ dev_info(&oct->pci_dev->dev, "XAUI IFs settings cannot be modified.\n");
+ return -EINVAL;
+ }
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
+ nctrl.wait_time = 1000;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
+ * to SE core application using ncmd.s.more & ncmd.s.param
+ */
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ /* Autoneg ON */
+ nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
+ OCTNIC_NCMD_AUTONEG_ON;
+ nctrl.ncmd.s.param2 = ecmd->advertising;
+ } else {
+ /* Autoneg OFF */
+ nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
+
+ nctrl.ncmd.s.param3 = ecmd->duplex;
+
+ nctrl.ncmd.s.param2 = ecmd->speed;
+ }
+
+ nparams.resp_order = OCTEON_RESP_ORDERED;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int lio_nway_reset(struct net_device *netdev)
+{
+ if (netif_running(netdev)) {
+ struct ethtool_cmd ecmd;
+
+ memset(&ecmd, 0, sizeof(struct ethtool_cmd));
+ ecmd.autoneg = 0;
+ ecmd.speed = 0;
+ ecmd.duplex = 0;
+ lio_set_settings(netdev, &ecmd);
+ }
+ return 0;
+}
+
+/* Return register dump len. */
+static int lio_get_regs_len(struct net_device *dev)
+{
+ return OCT_ETHTOOL_REGDUMP_LEN;
+}
+
+static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
+{
+ u32 reg;
+ int i, len = 0;
+
+ /* PCI Window Registers */
+
+ len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
+ reg = CN6XXX_WIN_WR_ADDR_LO;
+ len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
+ CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_WR_ADDR_HI;
+ len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
+ CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_RD_ADDR_LO;
+ len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
+ CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_RD_ADDR_HI;
+ len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
+ CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_WR_DATA_LO;
+ len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
+ CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_WR_DATA_HI;
+ len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
+ CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
+ len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
+ CN6XXX_WIN_WR_MASK_REG,
+ octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
+
+ /* PCI Interrupt Register */
+ len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
+ CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
+ CN6XXX_SLI_INT_ENB64_PORT0));
+ len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
+ CN6XXX_SLI_INT_ENB64_PORT1,
+ octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
+ len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
+ octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
+
+ /* PCI Output queue registers */
+ for (i = 0; i < oct->num_oqs; i++) {
+ reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
+ len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
+ reg, i, octeon_read_csr(oct, reg));
+ reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
+ len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
+ reg, i, octeon_read_csr(oct, reg));
+ }
+ reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
+ len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
+ reg, octeon_read_csr(oct, reg));
+ reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
+ len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
+ reg, octeon_read_csr(oct, reg));
+
+ /* PCI Input queue registers */
+ for (i = 0; i <= 3; i++) {
+ u32 reg;
+
+ reg = CN6XXX_SLI_IQ_DOORBELL(i);
+ len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
+ reg, i, octeon_read_csr(oct, reg));
+ reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
+ len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
+ reg, i, octeon_read_csr(oct, reg));
+ }
+
+ /* PCI DMA registers */
+
+ len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
+ CN6XXX_DMA_CNT(0),
+ octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
+ reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
+ len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
+ CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
+ reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
+ len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
+ CN6XXX_DMA_TIME_INT_LEVEL(0),
+ octeon_read_csr(oct, reg));
+
+ len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
+ CN6XXX_DMA_CNT(1),
+ octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
+ reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
+ len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
+ CN6XXX_DMA_PKT_INT_LEVEL(1),
+ octeon_read_csr(oct, reg));
+ reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
+ len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
+ CN6XXX_DMA_TIME_INT_LEVEL(1),
+ octeon_read_csr(oct, reg));
+
+ /* PCI Index registers */
+
+ len += sprintf(s + len, "\n");
+
+ for (i = 0; i < 16; i++) {
+ reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
+ len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
+ CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
+ }
+
+ return len;
+}
+
+static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
+{
+ u32 val;
+ int i, len = 0;
+
+ /* PCI CONFIG Registers */
+
+ len += sprintf(s + len,
+ "\n\t Octeon Config space Registers\n\n");
+
+ for (i = 0; i <= 13; i++) {
+ pci_read_config_dword(oct->pci_dev, (i * 4), &val);
+ len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
+ (i * 4), i, val);
+ }
+
+ for (i = 30; i <= 34; i++) {
+ pci_read_config_dword(oct->pci_dev, (i * 4), &val);
+ len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
+ (i * 4), i, val);
+ }
+
+ return len;
+}
+
+/* Return register dump user app. */
+static void lio_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *regbuf)
+{
+ struct lio *lio = GET_LIO(dev);
+ int len = 0;
+ struct octeon_device *oct = lio->oct_dev;
+
+ memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
+ regs->version = OCT_ETHTOOL_REGSVER;
+
+ switch (oct->chip_id) {
+ /* case OCTEON_CN73XX: Todo */
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ len += cn6xxx_read_csr_reg(regbuf + len, oct);
+ len += cn6xxx_read_config_reg(regbuf + len, oct);
+ break;
+ default:
+ dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
+ __func__, oct->chip_id);
+ }
+}
+
+static const struct ethtool_ops lio_ethtool_ops = {
+ .get_settings = lio_get_settings,
+ .get_link = ethtool_op_get_link,
+ .get_drvinfo = lio_get_drvinfo,
+ .get_ringparam = lio_ethtool_get_ringparam,
+ .get_channels = lio_ethtool_get_channels,
+ .set_phys_id = lio_set_phys_id,
+ .get_eeprom_len = lio_get_eeprom_len,
+ .get_eeprom = lio_get_eeprom,
+ .get_strings = lio_get_strings,
+ .get_ethtool_stats = lio_get_ethtool_stats,
+ .get_pauseparam = lio_get_pauseparam,
+ .get_regs_len = lio_get_regs_len,
+ .get_regs = lio_get_regs,
+ .get_msglevel = lio_get_msglevel,
+ .set_msglevel = lio_set_msglevel,
+ .get_sset_count = lio_get_sset_count,
+ .nway_reset = lio_nway_reset,
+ .set_settings = lio_set_settings,
+ .get_coalesce = lio_get_intr_coalesce,
+ .set_coalesce = lio_set_intr_coalesce,
+ .get_ts_info = lio_get_ts_info,
+};
+
+void liquidio_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &lio_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
new file mode 100644
index 000000000..5824becd9
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -0,0 +1,3666 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/net_tstamp.h>
+#include <linux/if_vlan.h>
+#include <linux/firmware.h>
+#include <linux/ethtool.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+
+MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LIQUIDIO_VERSION);
+/*(DEBLOBBED)*/
+
+static int ddr_timeout = 10000;
+module_param(ddr_timeout, int, 0644);
+MODULE_PARM_DESC(ddr_timeout,
+ "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
+
+static u32 console_bitmask;
+module_param(console_bitmask, int, 0644);
+MODULE_PARM_DESC(console_bitmask,
+ "Bitmask indicating which consoles have debug output redirected to syslog.");
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
+static int debug = -1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
+
+static char fw_type[LIO_MAX_FW_TYPE_LEN];
+module_param_string(fw_type, fw_type, sizeof(fw_type), 0000);
+MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
+
+static int conf_type;
+module_param(conf_type, int, 0);
+MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs");
+
+/* Bit mask values for lio->ifstate */
+#define LIO_IFSTATE_DROQ_OPS 0x01
+#define LIO_IFSTATE_REGISTERED 0x02
+#define LIO_IFSTATE_RUNNING 0x04
+#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
+
+/* Polling interval for determining when NIC application is alive */
+#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
+
+/* runtime link query interval */
+#define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
+
+struct liquidio_if_cfg_context {
+ int octeon_id;
+
+ wait_queue_head_t wc;
+
+ int cond;
+};
+
+struct liquidio_if_cfg_resp {
+ u64 rh;
+ struct liquidio_if_cfg_info cfg_info;
+ u64 status;
+};
+
+struct oct_link_status_resp {
+ u64 rh;
+ struct oct_link_info link_info;
+ u64 status;
+};
+
+struct oct_timestamp_resp {
+ u64 rh;
+ u64 timestamp;
+ u64 status;
+};
+
+#define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
+
+union tx_info {
+ u64 u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u16 gso_size;
+ u16 gso_segs;
+ u32 reserved;
+#else
+ u32 reserved;
+ u16 gso_segs;
+ u16 gso_size;
+#endif
+ } s;
+};
+
+/** Octeon device properties to be used by the NIC module.
+ * Each octeon device in the system will be represented
+ * by this structure in the NIC module.
+ */
+
+#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
+
+#define OCTNIC_GSO_MAX_HEADER_SIZE 128
+#define OCTNIC_GSO_MAX_SIZE (GSO_MAX_SIZE - OCTNIC_GSO_MAX_HEADER_SIZE)
+
+/** Structure of a node in list of gather components maintained by
+ * NIC driver for each network device.
+ */
+struct octnic_gather {
+ /** List manipulation. Next and prev pointers. */
+ struct list_head list;
+
+ /** Size of the gather component at sg in bytes. */
+ int sg_size;
+
+ /** Number of bytes that sg was adjusted to make it 8B-aligned. */
+ int adjust;
+
+ /** Gather component that can accommodate max sized fragment list
+ * received from the IP layer.
+ */
+ struct octeon_sg_entry *sg;
+};
+
+/** This structure is used by NIC driver to store information required
+ * to free the sk_buff when the packet has been fetched by Octeon.
+ * Bytes offset below assume worst-case of a 64-bit system.
+ */
+struct octnet_buf_free_info {
+ /** Bytes 1-8. Pointer to network device private structure. */
+ struct lio *lio;
+
+ /** Bytes 9-16. Pointer to sk_buff. */
+ struct sk_buff *skb;
+
+ /** Bytes 17-24. Pointer to gather list. */
+ struct octnic_gather *g;
+
+ /** Bytes 25-32. Physical address of skb->data or gather list. */
+ u64 dptr;
+
+ /** Bytes 33-47. Piggybacked soft command, if any */
+ struct octeon_soft_command *sc;
+};
+
+struct handshake {
+ struct completion init;
+ struct completion started;
+ struct pci_dev *pci_dev;
+ int init_ok;
+ int started_ok;
+};
+
+struct octeon_device_priv {
+ /** Tasklet structures for this device. */
+ struct tasklet_struct droq_tasklet;
+ unsigned long napi_mask;
+};
+
+static int octeon_device_init(struct octeon_device *);
+static void liquidio_remove(struct pci_dev *pdev);
+static int liquidio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+static struct handshake handshake[MAX_OCTEON_DEVICES];
+static struct completion first_stage;
+
+static void octeon_droq_bh(unsigned long pdev)
+{
+ int q_no;
+ int reschedule = 0;
+ struct octeon_device *oct = (struct octeon_device *)pdev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+
+ /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
+ for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES; q_no++) {
+ if (!(oct->io_qmask.oq & (1UL << q_no)))
+ continue;
+ reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
+ MAX_PACKET_BUDGET);
+ }
+
+ if (reschedule)
+ tasklet_schedule(&oct_priv->droq_tasklet);
+}
+
+static int lio_wait_for_oq_pkts(struct octeon_device *oct)
+{
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ int retry = 100, pkt_cnt = 0, pending_pkts = 0;
+ int i;
+
+ do {
+ pending_pkts = 0;
+
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct->io_qmask.oq & (1UL << i)))
+ continue;
+ pkt_cnt += octeon_droq_check_hw_for_pkts(oct,
+ oct->droq[i]);
+ }
+ if (pkt_cnt > 0) {
+ pending_pkts += pkt_cnt;
+ tasklet_schedule(&oct_priv->droq_tasklet);
+ }
+ pkt_cnt = 0;
+ schedule_timeout_uninterruptible(1);
+
+ } while (retry-- && pending_pkts);
+
+ return pkt_cnt;
+}
+
+void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
+ unsigned int bytes_compl)
+{
+ struct netdev_queue *netdev_queue = txq;
+
+ netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
+}
+
+void octeon_update_tx_completion_counters(void *buf, int reqtype,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb = NULL;
+ struct octeon_soft_command *sc;
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ finfo = buf;
+ skb = finfo->skb;
+ break;
+
+ case REQTYPE_RESP_NET_SG:
+ case REQTYPE_RESP_NET:
+ sc = buf;
+ skb = sc->callback_arg;
+ break;
+
+ default:
+ return;
+ }
+
+ (*pkts_compl)++;
+ *bytes_compl += skb->len;
+}
+
+void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb;
+ struct octeon_soft_command *sc;
+ struct netdev_queue *txq;
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ finfo = buf;
+ skb = finfo->skb;
+ break;
+
+ case REQTYPE_RESP_NET_SG:
+ case REQTYPE_RESP_NET:
+ sc = buf;
+ skb = sc->callback_arg;
+ break;
+
+ default:
+ return;
+ }
+
+ txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
+ netdev_tx_sent_queue(txq, skb->len);
+}
+
+int octeon_console_debug_enabled(u32 console)
+{
+ return (console_bitmask >> (console)) & 0x1;
+}
+
+/**
+ * \brief Forces all IO queues off on a given device
+ * @param oct Pointer to Octeon device
+ */
+static void force_io_queues_off(struct octeon_device *oct)
+{
+ if ((oct->chip_id == OCTEON_CN66XX) ||
+ (oct->chip_id == OCTEON_CN68XX)) {
+ /* Reset the Enable bits for Input Queues. */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
+
+ /* Reset the Enable bits for Output Queues. */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+ }
+}
+
+/**
+ * \brief wait for all pending requests to complete
+ * @param oct Pointer to Octeon device
+ *
+ * Called during shutdown sequence
+ */
+static int wait_for_pending_requests(struct octeon_device *oct)
+{
+ int i, pcount = 0;
+
+ for (i = 0; i < 100; i++) {
+ pcount =
+ atomic_read(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].pending_req_count);
+ if (pcount)
+ schedule_timeout_uninterruptible(HZ / 10);
+ else
+ break;
+ }
+
+ if (pcount)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * \brief Cause device to go quiet so it can be safely removed/reset/etc
+ * @param oct Pointer to Octeon device
+ */
+static inline void pcierror_quiesce_device(struct octeon_device *oct)
+{
+ int i;
+
+ /* Disable the input and output queues now. No more packets will
+ * arrive from Octeon, but we should wait for all packet processing
+ * to finish.
+ */
+ force_io_queues_off(oct);
+
+ /* To allow for in-flight requests */
+ schedule_timeout_uninterruptible(100);
+
+ if (wait_for_pending_requests(oct))
+ dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
+ /* Force all requests waiting to be fetched by OCTEON to complete. */
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ struct octeon_instr_queue *iq;
+
+ if (!(oct->io_qmask.iq & (1UL << i)))
+ continue;
+ iq = oct->instr_queue[i];
+
+ if (atomic_read(&iq->instr_pending)) {
+ spin_lock_bh(&iq->lock);
+ iq->fill_cnt = 0;
+ iq->octeon_read_index = iq->host_write_index;
+ iq->stats.instr_processed +=
+ atomic_read(&iq->instr_pending);
+ lio_process_iq_request_list(oct, iq);
+ spin_unlock_bh(&iq->lock);
+ }
+ }
+
+ /* Force all pending ordered list requests to time out. */
+ lio_process_ordered_list(oct, 1);
+
+ /* We do not need to wait for output queue packets to be processed. */
+}
+
+/**
+ * \brief Cleanup PCI AER uncorrectable error status
+ * @param dev Pointer to PCI device
+ */
+static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
+{
+ int pos = 0x100;
+ u32 status, mask;
+
+ pr_info("%s :\n", __func__);
+
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
+ if (dev->error_state == pci_channel_io_normal)
+ status &= ~mask; /* Clear corresponding nonfatal bits */
+ else
+ status &= mask; /* Clear corresponding fatal bits */
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+}
+
+/**
+ * \brief Stop all PCI IO to a given device
+ * @param dev Pointer to Octeon device
+ */
+static void stop_pci_io(struct octeon_device *oct)
+{
+ /* No more instructions will be forwarded. */
+ atomic_set(&oct->status, OCT_DEV_IN_RESET);
+
+ pci_disable_device(oct->pci_dev);
+
+ /* Disable interrupts */
+ oct->fn_list.disable_interrupt(oct->chip);
+
+ pcierror_quiesce_device(oct);
+
+ /* Release the interrupt line */
+ free_irq(oct->pci_dev->irq, oct);
+
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+
+ dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
+ lio_get_state_string(&oct->status));
+
+ /* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */
+ /* making it a common function for all OCTEON models */
+ cleanup_aer_uncorrect_error_status(oct->pci_dev);
+}
+
+/**
+ * \brief called when PCI error is detected
+ * @param pdev Pointer to PCI device
+ * @param state The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct octeon_device *oct = pci_get_drvdata(pdev);
+
+ /* Non-correctable Non-fatal errors */
+ if (state == pci_channel_io_normal) {
+ dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
+ cleanup_aer_uncorrect_error_status(oct->pci_dev);
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ }
+
+ /* Non-correctable Fatal errors */
+ dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
+ stop_pci_io(oct);
+
+ /* Always return a DISCONNECT. There is no support for recovery but only
+ * for a clean shutdown.
+ */
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/**
+ * \brief mmio handler
+ * @param pdev Pointer to PCI device
+ */
+static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev)
+{
+ /* We should never hit this since we never ask for a reset for a Fatal
+ * Error. We always return DISCONNECT in io_error above.
+ * But play safe and return RECOVERED for now.
+ */
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * \brief called after the pci bus has been reset.
+ * @param pdev Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the octeon_resume routine.
+ */
+static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev)
+{
+ /* We should never hit this since we never ask for a reset for a Fatal
+ * Error. We always return DISCONNECT in io_error above.
+ * But play safe and return RECOVERED for now.
+ */
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * \brief called when traffic can start flowing again.
+ * @param pdev Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the octeon_resume routine.
+ */
+static void liquidio_pcie_resume(struct pci_dev *pdev)
+{
+ /* Nothing to be done here. */
+}
+
+#ifdef CONFIG_PM
+/**
+ * \brief called when suspending
+ * @param pdev Pointer to PCI device
+ * @param state state to suspend to
+ */
+static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+/**
+ * \brief called when resuming
+ * @param pdev Pointer to PCI device
+ */
+static int liquidio_resume(struct pci_dev *pdev)
+{
+ return 0;
+}
+#endif
+
+/* For PCI-E Advanced Error Recovery (AER) Interface */
+static struct pci_error_handlers liquidio_err_handler = {
+ .error_detected = liquidio_pcie_error_detected,
+ .mmio_enabled = liquidio_pcie_mmio_enabled,
+ .slot_reset = liquidio_pcie_slot_reset,
+ .resume = liquidio_pcie_resume,
+};
+
+static const struct pci_device_id liquidio_pci_tbl[] = {
+ { /* 68xx */
+ PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+ },
+ { /* 66xx */
+ PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0
+ }
+};
+MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
+
+static struct pci_driver liquidio_pci_driver = {
+ .name = "LiquidIO",
+ .id_table = liquidio_pci_tbl,
+ .probe = liquidio_probe,
+ .remove = liquidio_remove,
+ .err_handler = &liquidio_err_handler, /* For AER */
+
+#ifdef CONFIG_PM
+ .suspend = liquidio_suspend,
+ .resume = liquidio_resume,
+#endif
+
+};
+
+/**
+ * \brief register PCI driver
+ */
+static int liquidio_init_pci(void)
+{
+ return pci_register_driver(&liquidio_pci_driver);
+}
+
+/**
+ * \brief unregister PCI driver
+ */
+static void liquidio_deinit_pci(void)
+{
+ pci_unregister_driver(&liquidio_pci_driver);
+}
+
+/**
+ * \brief check interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to check
+ */
+static inline int ifstate_check(struct lio *lio, int state_flag)
+{
+ return atomic_read(&lio->ifstate) & state_flag;
+}
+
+/**
+ * \brief set interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to set
+ */
+static inline void ifstate_set(struct lio *lio, int state_flag)
+{
+ atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
+}
+
+/**
+ * \brief clear interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to clear
+ */
+static inline void ifstate_reset(struct lio *lio, int state_flag)
+{
+ atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
+}
+
+/**
+ * \brief Stop Tx queues
+ * @param netdev network device
+ */
+static inline void txqs_stop(struct net_device *netdev)
+{
+ if (netif_is_multiqueue(netdev)) {
+ int i;
+
+ for (i = 0; i < netdev->num_tx_queues; i++)
+ netif_stop_subqueue(netdev, i);
+ } else {
+ netif_stop_queue(netdev);
+ }
+}
+
+/**
+ * \brief Start Tx queues
+ * @param netdev network device
+ */
+static inline void txqs_start(struct net_device *netdev)
+{
+ if (netif_is_multiqueue(netdev)) {
+ int i;
+
+ for (i = 0; i < netdev->num_tx_queues; i++)
+ netif_start_subqueue(netdev, i);
+ } else {
+ netif_start_queue(netdev);
+ }
+}
+
+/**
+ * \brief Wake Tx queues
+ * @param netdev network device
+ */
+static inline void txqs_wake(struct net_device *netdev)
+{
+ if (netif_is_multiqueue(netdev)) {
+ int i;
+
+ for (i = 0; i < netdev->num_tx_queues; i++)
+ netif_wake_subqueue(netdev, i);
+ } else {
+ netif_wake_queue(netdev);
+ }
+}
+
+/**
+ * \brief Stop Tx queue
+ * @param netdev network device
+ */
+static void stop_txq(struct net_device *netdev)
+{
+ txqs_stop(netdev);
+}
+
+/**
+ * \brief Start Tx queue
+ * @param netdev network device
+ */
+static void start_txq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (lio->linfo.link.s.status) {
+ txqs_start(netdev);
+ return;
+ }
+}
+
+/**
+ * \brief Wake a queue
+ * @param netdev network device
+ * @param q which queue to wake
+ */
+static inline void wake_q(struct net_device *netdev, int q)
+{
+ if (netif_is_multiqueue(netdev))
+ netif_wake_subqueue(netdev, q);
+ else
+ netif_wake_queue(netdev);
+}
+
+/**
+ * \brief Stop a queue
+ * @param netdev network device
+ * @param q which queue to stop
+ */
+static inline void stop_q(struct net_device *netdev, int q)
+{
+ if (netif_is_multiqueue(netdev))
+ netif_stop_subqueue(netdev, q);
+ else
+ netif_stop_queue(netdev);
+}
+
+/**
+ * \brief Check Tx queue status, and take appropriate action
+ * @param lio per-network private data
+ * @returns 0 if full, number of queues woken up otherwise
+ */
+static inline int check_txq_status(struct lio *lio)
+{
+ int ret_val = 0;
+
+ if (netif_is_multiqueue(lio->netdev)) {
+ int numqs = lio->netdev->num_tx_queues;
+ int q, iq = 0;
+
+ /* check each sub-queue state */
+ for (q = 0; q < numqs; q++) {
+ iq = lio->linfo.txpciq[q & (lio->linfo.num_txpciq - 1)];
+ if (octnet_iq_is_full(lio->oct_dev, iq))
+ continue;
+ wake_q(lio->netdev, q);
+ ret_val++;
+ }
+ } else {
+ if (octnet_iq_is_full(lio->oct_dev, lio->txq))
+ return 0;
+ wake_q(lio->netdev, lio->txq);
+ ret_val = 1;
+ }
+ return ret_val;
+}
+
+/**
+ * Remove the node at the head of the list. The list would be empty at
+ * the end of this call if there are no more nodes in the list.
+ */
+static inline struct list_head *list_delete_head(struct list_head *root)
+{
+ struct list_head *node;
+
+ if ((root->prev == root) && (root->next == root))
+ node = NULL;
+ else
+ node = root->next;
+
+ if (node)
+ list_del(node);
+
+ return node;
+}
+
+/**
+ * \brief Delete gather list
+ * @param lio per-network private data
+ */
+static void delete_glist(struct lio *lio)
+{
+ struct octnic_gather *g;
+
+ do {
+ g = (struct octnic_gather *)
+ list_delete_head(&lio->glist);
+ if (g) {
+ if (g->sg)
+ kfree((void *)((unsigned long)g->sg -
+ g->adjust));
+ kfree(g);
+ }
+ } while (g);
+}
+
+/**
+ * \brief Setup gather list
+ * @param lio per-network private data
+ */
+static int setup_glist(struct lio *lio)
+{
+ int i;
+ struct octnic_gather *g;
+
+ INIT_LIST_HEAD(&lio->glist);
+
+ for (i = 0; i < lio->tx_qsize; i++) {
+ g = kmalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ break;
+ memset(g, 0, sizeof(struct octnic_gather));
+
+ g->sg_size =
+ ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+
+ g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
+ if (!g->sg) {
+ kfree(g);
+ break;
+ }
+
+ /* The gather component should be aligned on 64-bit boundary */
+ if (((unsigned long)g->sg) & 7) {
+ g->adjust = 8 - (((unsigned long)g->sg) & 7);
+ g->sg = (struct octeon_sg_entry *)
+ ((unsigned long)g->sg + g->adjust);
+ }
+ list_add_tail(&g->list, &lio->glist);
+ }
+
+ if (i == lio->tx_qsize)
+ return 0;
+
+ delete_glist(lio);
+ return 1;
+}
+
+/**
+ * \brief Print link information
+ * @param netdev network device
+ */
+static void print_link_info(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
+ struct oct_link_info *linfo = &lio->linfo;
+
+ if (linfo->link.s.status) {
+ netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
+ linfo->link.s.speed,
+ (linfo->link.s.duplex) ? "Full" : "Half");
+ } else {
+ netif_info(lio, link, lio->netdev, "Link Down\n");
+ }
+ }
+}
+
+/**
+ * \brief Update link status
+ * @param netdev network device
+ * @param ls link status structure
+ *
+ * Called on receipt of a link status response from the core application to
+ * update each interface's link status.
+ */
+static inline void update_link_status(struct net_device *netdev,
+ union oct_link_status *ls)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
+ lio->linfo.link.u64 = ls->u64;
+
+ print_link_info(netdev);
+
+ if (lio->linfo.link.s.status) {
+ netif_carrier_on(netdev);
+ /* start_txq(netdev); */
+ txqs_wake(netdev);
+ } else {
+ netif_carrier_off(netdev);
+ stop_txq(netdev);
+ }
+ }
+}
+
+/**
+ * \brief Droq packet processor sceduler
+ * @param oct octeon device
+ */
+static
+void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
+{
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ u64 oq_no;
+ struct octeon_droq *droq;
+
+ if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
+ for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) {
+ if (!(oct->droq_intr & (1 << oq_no)))
+ continue;
+
+ droq = oct->droq[oq_no];
+
+ if (droq->ops.poll_mode) {
+ droq->ops.napi_fn(droq);
+ oct_priv->napi_mask |= (1 << oq_no);
+ } else {
+ tasklet_schedule(&oct_priv->droq_tasklet);
+ }
+ }
+ }
+}
+
+/**
+ * \brief Interrupt handler for octeon
+ * @param irq unused
+ * @param dev octeon device
+ */
+static
+irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
+{
+ struct octeon_device *oct = (struct octeon_device *)dev;
+ irqreturn_t ret;
+
+ /* Disable our interrupts for the duration of ISR */
+ oct->fn_list.disable_interrupt(oct->chip);
+
+ ret = oct->fn_list.process_interrupt_regs(oct);
+
+ if (ret == IRQ_HANDLED)
+ liquidio_schedule_droq_pkt_handlers(oct);
+
+ /* Re-enable our interrupts */
+ if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
+ oct->fn_list.enable_interrupt(oct->chip);
+
+ return ret;
+}
+
+/**
+ * \brief Setup interrupt for octeon device
+ * @param oct octeon device
+ *
+ * Enable interrupt in Octeon device as given in the PCI interrupt mask.
+ */
+static int octeon_setup_interrupt(struct octeon_device *oct)
+{
+ int irqret, err;
+
+ err = pci_enable_msi(oct->pci_dev);
+ if (err)
+ dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
+ err);
+ else
+ oct->flags |= LIO_FLAG_MSI_ENABLED;
+
+ irqret = request_irq(oct->pci_dev->irq, liquidio_intr_handler,
+ IRQF_SHARED, "octeon", oct);
+ if (irqret) {
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+ dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
+ irqret);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * \brief PCI probe handler
+ * @param pdev PCI device structure
+ * @param ent unused
+ */
+static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct octeon_device *oct_dev = NULL;
+ struct handshake *hs;
+
+ oct_dev = octeon_allocate_device(pdev->device,
+ sizeof(struct octeon_device_priv));
+ if (!oct_dev) {
+ dev_err(&pdev->dev, "Unable to allocate device\n");
+ return -ENOMEM;
+ }
+
+ dev_info(&pdev->dev, "Initializing device %x:%x.\n",
+ (u32)pdev->vendor, (u32)pdev->device);
+
+ /* Assign octeon_device for this device to the private data area. */
+ pci_set_drvdata(pdev, oct_dev);
+
+ /* set linux specific device pointer */
+ oct_dev->pci_dev = (void *)pdev;
+
+ hs = &handshake[oct_dev->octeon_id];
+ init_completion(&hs->init);
+ init_completion(&hs->started);
+ hs->pci_dev = pdev;
+
+ if (oct_dev->octeon_id == 0)
+ /* first LiquidIO NIC is detected */
+ complete(&first_stage);
+
+ if (octeon_device_init(oct_dev)) {
+ liquidio_remove(pdev);
+ return -ENOMEM;
+ }
+
+ dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
+
+ return 0;
+}
+
+/**
+ *\brief Destroy resources associated with octeon device
+ * @param pdev PCI device structure
+ * @param ent unused
+ */
+static void octeon_destroy_resources(struct octeon_device *oct)
+{
+ int i;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+
+ struct handshake *hs;
+
+ switch (atomic_read(&oct->status)) {
+ case OCT_DEV_RUNNING:
+ case OCT_DEV_CORE_OK:
+
+ /* No more instructions will be forwarded. */
+ atomic_set(&oct->status, OCT_DEV_IN_RESET);
+
+ oct->app_mode = CVM_DRV_INVALID_APP;
+ dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
+ lio_get_state_string(&oct->status));
+
+ schedule_timeout_uninterruptible(HZ / 10);
+
+ /* fallthrough */
+ case OCT_DEV_HOST_OK:
+
+ /* fallthrough */
+ case OCT_DEV_CONSOLE_INIT_DONE:
+ /* Remove any consoles */
+ octeon_remove_consoles(oct);
+
+ /* fallthrough */
+ case OCT_DEV_IO_QUEUES_DONE:
+ if (wait_for_pending_requests(oct))
+ dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
+ if (lio_wait_for_instr_fetch(oct))
+ dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
+
+ /* Disable the input and output queues now. No more packets will
+ * arrive from Octeon, but we should wait for all packet
+ * processing to finish.
+ */
+ oct->fn_list.disable_io_queues(oct);
+
+ if (lio_wait_for_oq_pkts(oct))
+ dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
+
+ /* Disable interrupts */
+ oct->fn_list.disable_interrupt(oct->chip);
+
+ /* Release the interrupt line */
+ free_irq(oct->pci_dev->irq, oct);
+
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+
+ /* Soft reset the octeon device before exiting */
+ oct->fn_list.soft_reset(oct);
+
+ /* Disable the device, releasing the PCI INT */
+ pci_disable_device(oct->pci_dev);
+
+ /* fallthrough */
+ case OCT_DEV_IN_RESET:
+ case OCT_DEV_DROQ_INIT_DONE:
+ /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
+ mdelay(100);
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct->io_qmask.oq & (1UL << i)))
+ continue;
+ octeon_delete_droq(oct, i);
+ }
+
+ /* Force any pending handshakes to complete */
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
+ hs = &handshake[i];
+
+ if (hs->pci_dev) {
+ handshake[oct->octeon_id].init_ok = 0;
+ complete(&handshake[oct->octeon_id].init);
+ handshake[oct->octeon_id].started_ok = 0;
+ complete(&handshake[oct->octeon_id].started);
+ }
+ }
+
+ /* fallthrough */
+ case OCT_DEV_RESP_LIST_INIT_DONE:
+ octeon_delete_response_list(oct);
+
+ /* fallthrough */
+ case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
+ octeon_free_sc_buffer_pool(oct);
+
+ /* fallthrough */
+ case OCT_DEV_INSTR_QUEUE_INIT_DONE:
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ if (!(oct->io_qmask.iq & (1UL << i)))
+ continue;
+ octeon_delete_instr_queue(oct, i);
+ }
+
+ /* fallthrough */
+ case OCT_DEV_DISPATCH_INIT_DONE:
+ octeon_delete_dispatch_list(oct);
+ cancel_delayed_work_sync(&oct->nic_poll_work.work);
+
+ /* fallthrough */
+ case OCT_DEV_PCI_MAP_DONE:
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+
+ /* fallthrough */
+ case OCT_DEV_BEGIN_STATE:
+ /* Nothing to be done here either */
+ break;
+ } /* end switch(oct->status) */
+
+ tasklet_kill(&oct_priv->droq_tasklet);
+}
+
+/**
+ * \brief Send Rx control command
+ * @param lio per-network private data
+ * @param start_stop whether to start or stop
+ */
+static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+{
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = start_stop;
+ nctrl.netpndev = (u64)lio->netdev;
+
+ nparams.resp_order = OCTEON_RESP_NORESPONSE;
+
+ if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams) < 0)
+ netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
+}
+
+/**
+ * \brief Destroy NIC device interface
+ * @param oct octeon device
+ * @param ifidx which interface to destroy
+ *
+ * Cleanup associated with each interface for an Octeon device when NIC
+ * module is being unloaded or if initialization fails during load.
+ */
+static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
+{
+ struct net_device *netdev = oct->props[ifidx].netdev;
+ struct lio *lio;
+
+ if (!netdev) {
+ dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
+ __func__, ifidx);
+ return;
+ }
+
+ lio = GET_LIO(netdev);
+
+ dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
+
+ send_rx_ctrl_cmd(lio, 0);
+
+ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
+ txqs_stop(netdev);
+
+ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
+ unregister_netdev(netdev);
+
+ delete_glist(lio);
+
+ free_netdev(netdev);
+
+ oct->props[ifidx].netdev = NULL;
+}
+
+/**
+ * \brief Stop complete NIC functionality
+ * @param oct octeon device
+ */
+static int liquidio_stop_nic_module(struct octeon_device *oct)
+{
+ int i, j;
+ struct lio *lio;
+
+ dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
+ if (!oct->ifcount) {
+ dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
+ return 1;
+ }
+
+ for (i = 0; i < oct->ifcount; i++) {
+ lio = GET_LIO(oct->props[i].netdev);
+ for (j = 0; j < lio->linfo.num_rxpciq; j++)
+ octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j]);
+ }
+
+ for (i = 0; i < oct->ifcount; i++)
+ liquidio_destroy_nic_device(oct, i);
+
+ dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
+ return 0;
+}
+
+/**
+ * \brief Cleans up resources at unload time
+ * @param pdev PCI device structure
+ */
+static void liquidio_remove(struct pci_dev *pdev)
+{
+ struct octeon_device *oct_dev = pci_get_drvdata(pdev);
+
+ dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
+
+ if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
+ liquidio_stop_nic_module(oct_dev);
+
+ /* Reset the octeon device and cleanup all memory allocated for
+ * the octeon device by driver.
+ */
+ octeon_destroy_resources(oct_dev);
+
+ dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
+
+ /* This octeon device has been removed. Update the global
+ * data structure to reflect this. Free the device structure.
+ */
+ octeon_free_device_mem(oct_dev);
+}
+
+/**
+ * \brief Identify the Octeon device and to map the BAR address space
+ * @param oct octeon device
+ */
+static int octeon_chip_specific_setup(struct octeon_device *oct)
+{
+ u32 dev_id, rev_id;
+ int ret = 1;
+
+ pci_read_config_dword(oct->pci_dev, 0, &dev_id);
+ pci_read_config_dword(oct->pci_dev, 8, &rev_id);
+ oct->rev_id = rev_id & 0xff;
+
+ switch (dev_id) {
+ case OCTEON_CN68XX_PCIID:
+ oct->chip_id = OCTEON_CN68XX;
+ ret = lio_setup_cn68xx_octeon_device(oct);
+ break;
+
+ case OCTEON_CN66XX_PCIID:
+ oct->chip_id = OCTEON_CN66XX;
+ ret = lio_setup_cn66xx_octeon_device(oct);
+ break;
+ default:
+ dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
+ dev_id);
+ }
+
+ if (!ret)
+ dev_info(&oct->pci_dev->dev, "CN68XX PASS%d.%d %s\n",
+ OCTEON_MAJOR_REV(oct),
+ OCTEON_MINOR_REV(oct),
+ octeon_get_conf(oct)->card_name);
+
+ return ret;
+}
+
+/**
+ * \brief PCI initialization for each Octeon device.
+ * @param oct octeon device
+ */
+static int octeon_pci_os_setup(struct octeon_device *oct)
+{
+ /* setup PCI stuff first */
+ if (pci_enable_device(oct->pci_dev)) {
+ dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
+ return 1;
+ }
+
+ if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
+ dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
+ return 1;
+ }
+
+ /* Enable PCI DMA Master. */
+ pci_set_master(oct->pci_dev);
+
+ return 0;
+}
+
+/**
+ * \brief Check Tx queue state for a given network buffer
+ * @param lio per-network private data
+ * @param skb network buffer
+ */
+static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
+{
+ int q = 0, iq = 0;
+
+ if (netif_is_multiqueue(lio->netdev)) {
+ q = skb->queue_mapping;
+ iq = lio->linfo.txpciq[(q & (lio->linfo.num_txpciq - 1))];
+ } else {
+ iq = lio->txq;
+ }
+
+ if (octnet_iq_is_full(lio->oct_dev, iq))
+ return 0;
+ wake_q(lio->netdev, q);
+ return 1;
+}
+
+/**
+ * \brief Unmap and free network buffer
+ * @param buf buffer
+ */
+static void free_netbuf(void *buf)
+{
+ struct sk_buff *skb;
+ struct octnet_buf_free_info *finfo;
+ struct lio *lio;
+
+ finfo = (struct octnet_buf_free_info *)buf;
+ skb = finfo->skb;
+ lio = finfo->lio;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
+ DMA_TO_DEVICE);
+
+ check_txq_state(lio, skb);
+
+ recv_buffer_free((struct sk_buff *)skb);
+}
+
+/**
+ * \brief Unmap and free gather buffer
+ * @param buf buffer
+ */
+static void free_netsgbuf(void *buf)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb;
+ struct lio *lio;
+ struct octnic_gather *g;
+ int i, frags;
+
+ finfo = (struct octnet_buf_free_info *)buf;
+ skb = finfo->skb;
+ lio = finfo->lio;
+ g = finfo->g;
+ frags = skb_shinfo(skb)->nr_frags;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ g->sg[0].ptr[0], (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+
+ i = 1;
+ while (frags--) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ pci_unmap_page((lio->oct_dev)->pci_dev,
+ g->sg[(i >> 2)].ptr[(i & 3)],
+ frag->size, DMA_TO_DEVICE);
+ i++;
+ }
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ finfo->dptr, g->sg_size,
+ DMA_TO_DEVICE);
+
+ spin_lock(&lio->lock);
+ list_add_tail(&g->list, &lio->glist);
+ spin_unlock(&lio->lock);
+
+ check_txq_state(lio, skb); /* mq support: sub-queue state check */
+
+ recv_buffer_free((struct sk_buff *)skb);
+}
+
+/**
+ * \brief Unmap and free gather buffer with response
+ * @param buf buffer
+ */
+static void free_netsgbuf_with_resp(void *buf)
+{
+ struct octeon_soft_command *sc;
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb;
+ struct lio *lio;
+ struct octnic_gather *g;
+ int i, frags;
+
+ sc = (struct octeon_soft_command *)buf;
+ skb = (struct sk_buff *)sc->callback_arg;
+ finfo = (struct octnet_buf_free_info *)&skb->cb;
+
+ lio = finfo->lio;
+ g = finfo->g;
+ frags = skb_shinfo(skb)->nr_frags;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ g->sg[0].ptr[0], (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+
+ i = 1;
+ while (frags--) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ pci_unmap_page((lio->oct_dev)->pci_dev,
+ g->sg[(i >> 2)].ptr[(i & 3)],
+ frag->size, DMA_TO_DEVICE);
+ i++;
+ }
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ finfo->dptr, g->sg_size,
+ DMA_TO_DEVICE);
+
+ spin_lock(&lio->lock);
+ list_add_tail(&g->list, &lio->glist);
+ spin_unlock(&lio->lock);
+
+ /* Don't free the skb yet */
+
+ check_txq_state(lio, skb);
+}
+
+/**
+ * \brief Adjust ptp frequency
+ * @param ptp PTP clock info
+ * @param ppb how much to adjust by, in parts-per-billion
+ */
+static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct lio *lio = container_of(ptp, struct lio, ptp_info);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+ u64 comp, delta;
+ unsigned long flags;
+ bool neg_adj = false;
+
+ if (ppb < 0) {
+ neg_adj = true;
+ ppb = -ppb;
+ }
+
+ /* The hardware adds the clock compensation value to the
+ * PTP clock on every coprocessor clock cycle, so we
+ * compute the delta in terms of coprocessor clocks.
+ */
+ delta = (u64)ppb << 32;
+ do_div(delta, oct->coproc_clock_rate);
+
+ spin_lock_irqsave(&lio->ptp_lock, flags);
+ comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
+ if (neg_adj)
+ comp -= delta;
+ else
+ comp += delta;
+ lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
+ spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+ return 0;
+}
+
+/**
+ * \brief Adjust ptp time
+ * @param ptp PTP clock info
+ * @param delta how much to adjust by, in nanosecs
+ */
+static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ unsigned long flags;
+ struct lio *lio = container_of(ptp, struct lio, ptp_info);
+
+ spin_lock_irqsave(&lio->ptp_lock, flags);
+ lio->ptp_adjust += delta;
+ spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+ return 0;
+}
+
+/**
+ * \brief Get hardware clock time, including any adjustment
+ * @param ptp PTP clock info
+ * @param ts timespec
+ */
+static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ u64 ns;
+ u32 remainder;
+ unsigned long flags;
+ struct lio *lio = container_of(ptp, struct lio, ptp_info);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+
+ spin_lock_irqsave(&lio->ptp_lock, flags);
+ ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
+ ns += lio->ptp_adjust;
+ spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+/**
+ * \brief Set hardware clock time. Reset adjustment
+ * @param ptp PTP clock info
+ * @param ts timespec
+ */
+static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ u64 ns;
+ unsigned long flags;
+ struct lio *lio = container_of(ptp, struct lio, ptp_info);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+
+ ns = timespec_to_ns(ts);
+
+ spin_lock_irqsave(&lio->ptp_lock, flags);
+ lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
+ lio->ptp_adjust = 0;
+ spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+ return 0;
+}
+
+/**
+ * \brief Check if PTP is enabled
+ * @param ptp PTP clock info
+ * @param rq request
+ * @param on is it on
+ */
+static int liquidio_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * \brief Open PTP clock source
+ * @param netdev network device
+ */
+static void oct_ptp_open(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+
+ spin_lock_init(&lio->ptp_lock);
+
+ snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
+ lio->ptp_info.owner = THIS_MODULE;
+ lio->ptp_info.max_adj = 250000000;
+ lio->ptp_info.n_alarm = 0;
+ lio->ptp_info.n_ext_ts = 0;
+ lio->ptp_info.n_per_out = 0;
+ lio->ptp_info.pps = 0;
+ lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
+ lio->ptp_info.adjtime = liquidio_ptp_adjtime;
+ lio->ptp_info.gettime64 = liquidio_ptp_gettime;
+ lio->ptp_info.settime64 = liquidio_ptp_settime;
+ lio->ptp_info.enable = liquidio_ptp_enable;
+
+ lio->ptp_adjust = 0;
+
+ lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
+ &oct->pci_dev->dev);
+
+ if (IS_ERR(lio->ptp_clock))
+ lio->ptp_clock = NULL;
+}
+
+/**
+ * \brief Init PTP clock
+ * @param oct octeon device
+ */
+static void liquidio_ptp_init(struct octeon_device *oct)
+{
+ u64 clock_comp, cfg;
+
+ clock_comp = (u64)NSEC_PER_SEC << 32;
+ do_div(clock_comp, oct->coproc_clock_rate);
+ lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
+
+ /* Enable */
+ cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
+ lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
+}
+
+/**
+ * \brief Load firmware to device
+ * @param oct octeon device
+ *
+ * Maps device to firmware filename, requests firmware, and downloads it
+ */
+static int load_firmware(struct octeon_device *oct)
+{
+ int ret = 0;
+ const struct firmware *fw;
+ char fw_name[LIO_MAX_FW_FILENAME_LEN];
+ char *tmp_fw_type;
+
+ if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
+ sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) {
+ dev_info(&oct->pci_dev->dev, "Skipping firmware load\n");
+ return ret;
+ }
+
+ if (fw_type[0] == '\0')
+ tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
+ else
+ tmp_fw_type = fw_type;
+
+ sprintf(fw_name, "/*(DEBLOBBED)*/", LIO_FW_DIR, LIO_FW_BASE_NAME,
+ octeon_get_conf(oct)->card_name, tmp_fw_type,
+ LIO_FW_NAME_SUFFIX);
+
+ ret = reject_firmware(&fw, fw_name, &oct->pci_dev->dev);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
+ fw_name);
+ return ret;
+ }
+
+ ret = octeon_download_firmware(oct, fw->data, fw->size);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+/**
+ * \brief Setup output queue
+ * @param oct octeon device
+ * @param q_no which queue
+ * @param num_descs how many descriptors
+ * @param desc_size size of each descriptor
+ * @param app_ctx application context
+ */
+static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
+ int desc_size, void *app_ctx)
+{
+ int ret_val = 0;
+
+ dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
+ /* droq creation and local register settings. */
+ ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
+ if (ret_val == -1)
+ return ret_val;
+
+ if (ret_val == 1) {
+ dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
+ return 0;
+ }
+ /* tasklet creation for the droq */
+
+ /* Enable the droq queues */
+ octeon_set_droq_pkt_op(oct, q_no, 1);
+
+ /* Send Credit for Octeon Output queues. Credits are always
+ * sent after the output queue is enabled.
+ */
+ writel(oct->droq[q_no]->max_count,
+ oct->droq[q_no]->pkts_credit_reg);
+
+ return ret_val;
+}
+
+/**
+ * \brief Callback for getting interface configuration
+ * @param status status of request
+ * @param buf pointer to resp structure
+ */
+static void if_cfg_callback(struct octeon_device *oct,
+ u32 status,
+ void *buf)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+ struct liquidio_if_cfg_resp *resp;
+ struct liquidio_if_cfg_context *ctx;
+
+ resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
+ ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+
+ oct = lio_get_device(ctx->octeon_id);
+ if (resp->status)
+ dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
+ CVM_CAST64(resp->status));
+ ACCESS_ONCE(ctx->cond) = 1;
+
+ /* This barrier is required to be sure that the response has been
+ * written fully before waking up the handler
+ */
+ wmb();
+
+ wake_up_interruptible(&ctx->wc);
+}
+
+/**
+ * \brief Select queue based on hash
+ * @param dev Net device
+ * @param skb sk_buff structure
+ * @returns selected queue number
+ */
+static u16 select_q(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ int qindex;
+ struct lio *lio;
+
+ lio = GET_LIO(dev);
+ /* select queue on chosen queue_mapping or core */
+ qindex = skb_rx_queue_recorded(skb) ?
+ skb_get_rx_queue(skb) : smp_processor_id();
+ return (u16)(qindex & (lio->linfo.num_txpciq - 1));
+}
+
+/** Routine to push packets arriving on Octeon interface upto network layer.
+ * @param oct_id - octeon device id.
+ * @param skbuff - skbuff struct to be passed to network layer.
+ * @param len - size of total data received.
+ * @param rh - Control header associated with the packet
+ * @param param - additional control data with the packet
+ */
+static void
+liquidio_push_packet(u32 octeon_id,
+ void *skbuff,
+ u32 len,
+ union octeon_rh *rh,
+ void *param)
+{
+ struct napi_struct *napi = param;
+ struct octeon_device *oct = lio_get_device(octeon_id);
+ struct sk_buff *skb = (struct sk_buff *)skbuff;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ u64 ns;
+ struct net_device *netdev =
+ (struct net_device *)oct->props[rh->r_dh.link].netdev;
+ struct octeon_droq *droq = container_of(param, struct octeon_droq,
+ napi);
+ if (netdev) {
+ int packet_was_received;
+ struct lio *lio = GET_LIO(netdev);
+
+ /* Do not proceed if the interface is not in RUNNING state. */
+ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
+ recv_buffer_free(skb);
+ droq->stats.rx_dropped++;
+ return;
+ }
+
+ skb->dev = netdev;
+
+ if (rh->r_dh.has_hwtstamp) {
+ /* timestamp is included from the hardware at the
+ * beginning of the packet.
+ */
+ if (ifstate_check(lio,
+ LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
+ /* Nanoseconds are in the first 64-bits
+ * of the packet.
+ */
+ memcpy(&ns, (skb->data), sizeof(ns));
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp =
+ ns_to_ktime(ns + lio->ptp_adjust);
+ }
+ skb_pull(skb, sizeof(ns));
+ }
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if ((netdev->features & NETIF_F_RXCSUM) &&
+ (rh->r_dh.csum_verified == CNNIC_CSUM_VERIFIED))
+ /* checksum has already been verified */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
+
+ if (packet_was_received) {
+ droq->stats.rx_bytes_received += len;
+ droq->stats.rx_pkts_received++;
+ netdev->last_rx = jiffies;
+ } else {
+ droq->stats.rx_dropped++;
+ netif_info(lio, rx_err, lio->netdev,
+ "droq:%d error rx_dropped:%llu\n",
+ droq->q_no, droq->stats.rx_dropped);
+ }
+
+ } else {
+ recv_buffer_free(skb);
+ }
+}
+
+/**
+ * \brief wrapper for calling napi_schedule
+ * @param param parameters to pass to napi_schedule
+ *
+ * Used when scheduling on different CPUs
+ */
+static void napi_schedule_wrapper(void *param)
+{
+ struct napi_struct *napi = param;
+
+ napi_schedule(napi);
+}
+
+/**
+ * \brief callback when receive interrupt occurs and we are in NAPI mode
+ * @param arg pointer to octeon output queue
+ */
+static void liquidio_napi_drv_callback(void *arg)
+{
+ struct octeon_droq *droq = arg;
+ int this_cpu = smp_processor_id();
+
+ if (droq->cpu_id == this_cpu) {
+ napi_schedule(&droq->napi);
+ } else {
+ struct call_single_data *csd = &droq->csd;
+
+ csd->func = napi_schedule_wrapper;
+ csd->info = &droq->napi;
+ csd->flags = 0;
+
+ smp_call_function_single_async(droq->cpu_id, csd);
+ }
+}
+
+/**
+ * \brief Main NAPI poll function
+ * @param droq octeon output queue
+ * @param budget maximum number of items to process
+ */
+static int liquidio_napi_do_rx(struct octeon_droq *droq, int budget)
+{
+ int work_done;
+ struct lio *lio = GET_LIO(droq->napi.dev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
+ POLL_EVENT_PROCESS_PKTS,
+ budget);
+ if (work_done < 0) {
+ netif_info(lio, rx_err, lio->netdev,
+ "Receive work_done < 0, rxq:%d\n", droq->q_no);
+ goto octnet_napi_finish;
+ }
+
+ if (work_done > budget)
+ dev_err(&oct->pci_dev->dev, ">>>> %s work_done: %d budget: %d\n",
+ __func__, work_done, budget);
+
+ return work_done;
+
+octnet_napi_finish:
+ napi_complete(&droq->napi);
+ octeon_process_droq_poll_cmd(oct, droq->q_no, POLL_EVENT_ENABLE_INTR,
+ 0);
+ return 0;
+}
+
+/**
+ * \brief Entry point for NAPI polling
+ * @param napi NAPI structure
+ * @param budget maximum number of items to process
+ */
+static int liquidio_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octeon_droq *droq;
+ int work_done;
+
+ droq = container_of(napi, struct octeon_droq, napi);
+
+ work_done = liquidio_napi_do_rx(droq, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
+ POLL_EVENT_ENABLE_INTR, 0);
+ return 0;
+ }
+
+ return work_done;
+}
+
+/**
+ * \brief Setup input and output queues
+ * @param octeon_dev octeon device
+ * @param net_device Net device
+ *
+ * Note: Queues are with respect to the octeon device. Thus
+ * an input queue is for egress packets, and output queues
+ * are for ingress packets.
+ */
+static inline int setup_io_queues(struct octeon_device *octeon_dev,
+ struct net_device *net_device)
+{
+ static int first_time = 1;
+ static struct octeon_droq_ops droq_ops;
+ static int cpu_id;
+ static int cpu_id_modulus;
+ struct octeon_droq *droq;
+ struct napi_struct *napi;
+ int q, q_no, retval = 0;
+ struct lio *lio;
+ int num_tx_descs;
+
+ lio = GET_LIO(net_device);
+ if (first_time) {
+ first_time = 0;
+ memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
+
+ droq_ops.fptr = liquidio_push_packet;
+
+ droq_ops.poll_mode = 1;
+ droq_ops.napi_fn = liquidio_napi_drv_callback;
+ cpu_id = 0;
+ cpu_id_modulus = num_present_cpus();
+ }
+
+ /* set up DROQs. */
+ for (q = 0; q < lio->linfo.num_rxpciq; q++) {
+ q_no = lio->linfo.rxpciq[q];
+
+ retval = octeon_setup_droq(octeon_dev, q_no,
+ CFG_GET_NUM_RX_DESCS_NIC_IF
+ (octeon_get_conf(octeon_dev),
+ lio->ifidx),
+ CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
+ (octeon_get_conf(octeon_dev),
+ lio->ifidx), NULL);
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ " %s : Runtime DROQ(RxQ) creation failed.\n",
+ __func__);
+ return 1;
+ }
+
+ droq = octeon_dev->droq[q_no];
+ napi = &droq->napi;
+ netif_napi_add(net_device, napi, liquidio_napi_poll, 64);
+
+ /* designate a CPU for this droq */
+ droq->cpu_id = cpu_id;
+ cpu_id++;
+ if (cpu_id >= cpu_id_modulus)
+ cpu_id = 0;
+
+ octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
+ }
+
+ /* set up IQs. */
+ for (q = 0; q < lio->linfo.num_txpciq; q++) {
+ num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
+ (octeon_dev),
+ lio->ifidx);
+ retval = octeon_setup_iq(octeon_dev, lio->linfo.txpciq[q],
+ num_tx_descs,
+ netdev_get_tx_queue(net_device, q));
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ " %s : Runtime IQ(TxQ) creation failed.\n",
+ __func__);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * \brief Poll routine for checking transmit queue status
+ * @param work work_struct data structure
+ */
+static void octnet_poll_check_txq_status(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio *lio = (struct lio *)wk->ctxptr;
+
+ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
+ return;
+
+ check_txq_status(lio);
+ queue_delayed_work(lio->txq_status_wq.wq,
+ &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
+}
+
+/**
+ * \brief Sets up the txq poll check
+ * @param netdev network device
+ */
+static inline void setup_tx_poll_fn(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ lio->txq_status_wq.wq = create_workqueue("txq-status");
+ if (!lio->txq_status_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
+ return;
+ }
+ INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
+ octnet_poll_check_txq_status);
+ lio->txq_status_wq.wk.ctxptr = lio;
+ queue_delayed_work(lio->txq_status_wq.wq,
+ &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
+}
+
+/**
+ * \brief Net device open for LiquidIO
+ * @param netdev network device
+ */
+static int liquidio_open(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct napi_struct *napi, *n;
+
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_enable(napi);
+
+ oct_ptp_open(netdev);
+
+ ifstate_set(lio, LIO_IFSTATE_RUNNING);
+ setup_tx_poll_fn(netdev);
+ start_txq(netdev);
+
+ netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
+ try_module_get(THIS_MODULE);
+
+ /* tell Octeon to start forwarding packets to host */
+ send_rx_ctrl_cmd(lio, 1);
+
+ /* Ready for link status updates */
+ lio->intf_open = 1;
+
+ dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
+ netdev->name);
+
+ return 0;
+}
+
+/**
+ * \brief Net device stop for LiquidIO
+ * @param netdev network device
+ */
+static int liquidio_stop(struct net_device *netdev)
+{
+ struct napi_struct *napi, *n;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
+ /* Inform that netif carrier is down */
+ lio->intf_open = 0;
+ lio->linfo.link.s.status = 0;
+
+ netif_carrier_off(netdev);
+
+ /* tell Octeon to stop forwarding packets to host */
+ send_rx_ctrl_cmd(lio, 0);
+
+ cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
+ flush_workqueue(lio->txq_status_wq.wq);
+ destroy_workqueue(lio->txq_status_wq.wq);
+
+ if (lio->ptp_clock) {
+ ptp_clock_unregister(lio->ptp_clock);
+ lio->ptp_clock = NULL;
+ }
+
+ ifstate_reset(lio, LIO_IFSTATE_RUNNING);
+
+ /* This is a hack that allows DHCP to continue working. */
+ set_bit(__LINK_STATE_START, &lio->netdev->state);
+
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ txqs_stop(netdev);
+
+ dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
+{
+ struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
+ struct net_device *netdev = (struct net_device *)nctrl->netpndev;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ switch (nctrl->ncmd.s.cmd) {
+ case OCTNET_CMD_CHANGE_DEVFLAGS:
+ case OCTNET_CMD_SET_MULTI_LIST:
+ break;
+
+ case OCTNET_CMD_CHANGE_MACADDR:
+ /* If command is successful, change the MACADDR. */
+ netif_info(lio, probe, lio->netdev, " MACAddr changed to 0x%llx\n",
+ CVM_CAST64(nctrl->udd[0]));
+ dev_info(&oct->pci_dev->dev, "%s MACAddr changed to 0x%llx\n",
+ netdev->name, CVM_CAST64(nctrl->udd[0]));
+ memcpy(netdev->dev_addr, ((u8 *)&nctrl->udd[0]) + 2, ETH_ALEN);
+ break;
+
+ case OCTNET_CMD_CHANGE_MTU:
+ /* If command is successful, change the MTU. */
+ netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n",
+ netdev->mtu, nctrl->ncmd.s.param2);
+ dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
+ netdev->name, netdev->mtu,
+ nctrl->ncmd.s.param2);
+ netdev->mtu = nctrl->ncmd.s.param2;
+ break;
+
+ case OCTNET_CMD_GPIO_ACCESS:
+ netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
+
+ break;
+
+ case OCTNET_CMD_LRO_ENABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
+ break;
+
+ case OCTNET_CMD_LRO_DISABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_VERBOSE_ENABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
+ break;
+
+ case OCTNET_CMD_VERBOSE_DISABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_SET_SETTINGS:
+ dev_info(&oct->pci_dev->dev, "%s settings changed\n",
+ netdev->name);
+
+ break;
+
+ default:
+ dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
+ nctrl->ncmd.s.cmd);
+ }
+}
+
+/**
+ * \brief Converts a mask based on net device flags
+ * @param netdev network device
+ *
+ * This routine generates a octnet_ifflags mask from the net device flags
+ * received from the OS.
+ */
+static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
+{
+ enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
+
+ if (netdev->flags & IFF_PROMISC)
+ f |= OCTNET_IFFLAG_PROMISC;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ f |= OCTNET_IFFLAG_ALLMULTI;
+
+ if (netdev->flags & IFF_MULTICAST) {
+ f |= OCTNET_IFFLAG_MULTICAST;
+
+ /* Accept all multicast addresses if there are more than we
+ * can handle
+ */
+ if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
+ f |= OCTNET_IFFLAG_ALLMULTI;
+ }
+
+ if (netdev->flags & IFF_BROADCAST)
+ f |= OCTNET_IFFLAG_BROADCAST;
+
+ return f;
+}
+
+/**
+ * \brief Net device set_multicast_list
+ * @param netdev network device
+ */
+static void liquidio_set_mcast_list(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+ struct netdev_hw_addr *ha;
+ u64 *mc;
+ int ret, i;
+ int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ /* Create a ctrl pkt command to be sent to core app. */
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = get_new_flags(netdev);
+ nctrl.ncmd.s.param3 = mc_count;
+ nctrl.ncmd.s.more = mc_count;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ /* copy all the addresses into the udd */
+ i = 0;
+ mc = &nctrl.udd[0];
+ netdev_for_each_mc_addr(ha, netdev) {
+ *mc = 0;
+ memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
+ /* no need to swap bytes */
+
+ if (++mc > &nctrl.udd[mc_count])
+ break;
+ }
+
+ /* Apparently, any activity in this call from the kernel has to
+ * be atomic. So we won't wait for response.
+ */
+ nctrl.wait_time = 0;
+
+ nparams.resp_order = OCTEON_RESP_NORESPONSE;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
+ ret);
+ }
+}
+
+/**
+ * \brief Net device set_mac_address
+ * @param netdev network device
+ */
+static int liquidio_set_mac(struct net_device *netdev, void *p)
+{
+ int ret = 0;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct sockaddr *addr = (struct sockaddr *)p;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+
+ if ((!is_valid_ether_addr(addr->sa_data)) ||
+ (ifstate_check(lio, LIO_IFSTATE_RUNNING)))
+ return -EADDRNOTAVAIL;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = 0;
+ nctrl.ncmd.s.more = 1;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+ nctrl.wait_time = 100;
+
+ nctrl.udd[0] = 0;
+ /* The MAC Address is presented in network byte order. */
+ memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
+
+ nparams.resp_order = OCTEON_RESP_ORDERED;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
+ return -ENOMEM;
+ }
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
+
+ return 0;
+}
+
+/**
+ * \brief Net device get_stats
+ * @param netdev network device
+ */
+static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct net_device_stats *stats = &netdev->stats;
+ struct octeon_device *oct;
+ u64 pkts = 0, drop = 0, bytes = 0;
+ struct oct_droq_stats *oq_stats;
+ struct oct_iq_stats *iq_stats;
+ int i, iq_no, oq_no;
+
+ oct = lio->oct_dev;
+
+ for (i = 0; i < lio->linfo.num_txpciq; i++) {
+ iq_no = lio->linfo.txpciq[i];
+ iq_stats = &oct->instr_queue[iq_no]->stats;
+ pkts += iq_stats->tx_done;
+ drop += iq_stats->tx_dropped;
+ bytes += iq_stats->tx_tot_bytes;
+ }
+
+ stats->tx_packets = pkts;
+ stats->tx_bytes = bytes;
+ stats->tx_dropped = drop;
+
+ pkts = 0;
+ drop = 0;
+ bytes = 0;
+
+ for (i = 0; i < lio->linfo.num_rxpciq; i++) {
+ oq_no = lio->linfo.rxpciq[i];
+ oq_stats = &oct->droq[oq_no]->stats;
+ pkts += oq_stats->rx_pkts_received;
+ drop += (oq_stats->rx_dropped +
+ oq_stats->dropped_nodispatch +
+ oq_stats->dropped_toomany +
+ oq_stats->dropped_nomem);
+ bytes += oq_stats->rx_bytes_received;
+ }
+
+ stats->rx_bytes = bytes;
+ stats->rx_packets = pkts;
+ stats->rx_dropped = drop;
+
+ return stats;
+}
+
+/**
+ * \brief Net device change_mtu
+ * @param netdev network device
+ */
+static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+ int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE;
+ int ret = 0;
+
+ /* Limit the MTU to make sure the ethernet packets are between 64 bytes
+ * and 65535 bytes
+ */
+ if ((max_frm_size < OCTNET_MIN_FRM_SIZE) ||
+ (max_frm_size > OCTNET_MAX_FRM_SIZE)) {
+ dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu);
+ dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n",
+ (OCTNET_MIN_FRM_SIZE - OCTNET_FRM_HEADER_SIZE),
+ (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE));
+ return -EINVAL;
+ }
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = new_mtu;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ nparams.resp_order = OCTEON_RESP_ORDERED;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
+ return -1;
+ }
+
+ lio->mtu = new_mtu;
+
+ return 0;
+}
+
+/**
+ * \brief Handler for SIOCSHWTSTAMP ioctl
+ * @param netdev network device
+ * @param ifr interface request
+ * @param cmd command
+ */
+static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config conf;
+ struct lio *lio = GET_LIO(netdev);
+
+ if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
+ return -EFAULT;
+
+ if (conf.flags)
+ return -EINVAL;
+
+ switch (conf.tx_type) {
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_OFF:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (conf.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ conf.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
+ ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
+
+ else
+ ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
+
+ return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
+}
+
+/**
+ * \brief ioctl handler
+ * @param netdev network device
+ * @param ifr interface request
+ * @param cmd command
+ */
+static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return hwtstamp_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * \brief handle a Tx timestamp response
+ * @param status response status
+ * @param buf pointer to skb
+ */
+static void handle_timestamp(struct octeon_device *oct,
+ u32 status,
+ void *buf)
+{
+ struct octnet_buf_free_info *finfo;
+ struct octeon_soft_command *sc;
+ struct oct_timestamp_resp *resp;
+ struct lio *lio;
+ struct sk_buff *skb = (struct sk_buff *)buf;
+
+ finfo = (struct octnet_buf_free_info *)skb->cb;
+ lio = finfo->lio;
+ sc = finfo->sc;
+ oct = lio->oct_dev;
+ resp = (struct oct_timestamp_resp *)sc->virtrptr;
+
+ if (status != OCTEON_REQUEST_DONE) {
+ dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
+ CVM_CAST64(status));
+ resp->timestamp = 0;
+ }
+
+ octeon_swap_8B_data(&resp->timestamp, 1);
+
+ if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) {
+ struct skb_shared_hwtstamps ts;
+ u64 ns = resp->timestamp;
+
+ netif_info(lio, tx_done, lio->netdev,
+ "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
+ skb, (unsigned long long)ns);
+ ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
+ skb_tstamp_tx(skb, &ts);
+ }
+
+ octeon_free_soft_command(oct, sc);
+ recv_buffer_free(skb);
+}
+
+/* \brief Send a data packet that will be timestamped
+ * @param oct octeon device
+ * @param ndata pointer to network data
+ * @param finfo pointer to private network data
+ */
+static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
+ struct octnic_data_pkt *ndata,
+ struct octnet_buf_free_info *finfo,
+ int xmit_more)
+{
+ int retval;
+ struct octeon_soft_command *sc;
+ struct octeon_instr_ih *ih;
+ struct octeon_instr_rdp *rdp;
+ struct lio *lio;
+ int ring_doorbell;
+
+ lio = finfo->lio;
+
+ sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
+ sizeof(struct oct_timestamp_resp));
+ finfo->sc = sc;
+
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
+ return IQ_SEND_FAILED;
+ }
+
+ if (ndata->reqtype == REQTYPE_NORESP_NET)
+ ndata->reqtype = REQTYPE_RESP_NET;
+ else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
+ ndata->reqtype = REQTYPE_RESP_NET_SG;
+
+ sc->callback = handle_timestamp;
+ sc->callback_arg = finfo->skb;
+ sc->iq_no = ndata->q_no;
+
+ ih = (struct octeon_instr_ih *)&sc->cmd.ih;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+
+ ring_doorbell = !xmit_more;
+ retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
+ sc, ih->dlengsz, ndata->reqtype);
+
+ if (retval) {
+ dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
+ retval);
+ octeon_free_soft_command(oct, sc);
+ } else {
+ netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
+ }
+
+ return retval;
+}
+
+static inline int is_ipv4(struct sk_buff *skb)
+{
+ return (skb->protocol == htons(ETH_P_IP)) &&
+ (ip_hdr(skb)->version == 4);
+}
+
+static inline int is_vlan(struct sk_buff *skb)
+{
+ return skb->protocol == htons(ETH_P_8021Q);
+}
+
+static inline int is_ip_fragmented(struct sk_buff *skb)
+{
+ /* The Don't fragment and Reserved flag fields are ignored.
+ * IP is fragmented if
+ * - the More fragments bit is set (indicating this IP is a fragment
+ * with more to follow; the current offset could be 0 ).
+ * - ths offset field is non-zero.
+ */
+ return (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ? 1 : 0;
+}
+
+static inline int is_ipv6(struct sk_buff *skb)
+{
+ return (skb->protocol == htons(ETH_P_IPV6)) &&
+ (ipv6_hdr(skb)->version == 6);
+}
+
+static inline int is_with_extn_hdr(struct sk_buff *skb)
+{
+ return (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP) &&
+ (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP);
+}
+
+static inline int is_tcpudp(struct sk_buff *skb)
+{
+ return (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
+ (ip_hdr(skb)->protocol == IPPROTO_UDP);
+}
+
+static inline u32 get_ipv4_5tuple_tag(struct sk_buff *skb)
+{
+ u32 tag;
+ struct iphdr *iphdr = ip_hdr(skb);
+
+ tag = crc32(0, &iphdr->protocol, 1);
+ tag = crc32(tag, (u8 *)&iphdr->saddr, 8);
+ tag = crc32(tag, skb_transport_header(skb), 4);
+ return tag;
+}
+
+static inline u32 get_ipv6_5tuple_tag(struct sk_buff *skb)
+{
+ u32 tag;
+ struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
+
+ tag = crc32(0, &ipv6hdr->nexthdr, 1);
+ tag = crc32(tag, (u8 *)&ipv6hdr->saddr, 32);
+ tag = crc32(tag, skb_transport_header(skb), 4);
+ return tag;
+}
+
+/** \brief Transmit networks packets to the Octeon interface
+ * @param skbuff skbuff struct to be passed to network layer.
+ * @param netdev pointer to network device
+ * @returns whether the packet was transmitted to the device okay or not
+ * (NETDEV_TX_OK or NETDEV_TX_BUSY)
+ */
+static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct lio *lio;
+ struct octnet_buf_free_info *finfo;
+ union octnic_cmd_setup cmdsetup;
+ struct octnic_data_pkt ndata;
+ struct octeon_device *oct;
+ struct oct_iq_stats *stats;
+ int cpu = 0, status = 0;
+ int q_idx = 0, iq_no = 0;
+ int xmit_more;
+ u32 tag = 0;
+
+ lio = GET_LIO(netdev);
+ oct = lio->oct_dev;
+
+ if (netif_is_multiqueue(netdev)) {
+ cpu = skb->queue_mapping;
+ q_idx = (cpu & (lio->linfo.num_txpciq - 1));
+ iq_no = lio->linfo.txpciq[q_idx];
+ } else {
+ iq_no = lio->txq;
+ }
+
+ stats = &oct->instr_queue[iq_no]->stats;
+
+ /* Check for all conditions in which the current packet cannot be
+ * transmitted.
+ */
+ if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
+ (!lio->linfo.link.s.status) ||
+ (skb->len <= 0)) {
+ netif_info(lio, tx_err, lio->netdev,
+ "Transmit failed link_status : %d\n",
+ lio->linfo.link.s.status);
+ goto lio_xmit_failed;
+ }
+
+ /* Use space in skb->cb to store info used to unmap and
+ * free the buffers.
+ */
+ finfo = (struct octnet_buf_free_info *)skb->cb;
+ finfo->lio = lio;
+ finfo->skb = skb;
+ finfo->sc = NULL;
+
+ /* Prepare the attributes for the data to be passed to OSI. */
+ memset(&ndata, 0, sizeof(struct octnic_data_pkt));
+
+ ndata.buf = (void *)finfo;
+
+ ndata.q_no = iq_no;
+
+ if (netif_is_multiqueue(netdev)) {
+ if (octnet_iq_is_full(oct, ndata.q_no)) {
+ /* defer sending if queue is full */
+ netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
+ ndata.q_no);
+ stats->tx_iq_busy++;
+ return NETDEV_TX_BUSY;
+ }
+ } else {
+ if (octnet_iq_is_full(oct, lio->txq)) {
+ /* defer sending if queue is full */
+ stats->tx_iq_busy++;
+ netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
+ ndata.q_no);
+ return NETDEV_TX_BUSY;
+ }
+ }
+ /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
+ * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no );
+ */
+
+ ndata.datasize = skb->len;
+
+ cmdsetup.u64 = 0;
+ cmdsetup.s.ifidx = lio->linfo.ifidx;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (is_ipv4(skb) && !is_ip_fragmented(skb) && is_tcpudp(skb)) {
+ tag = get_ipv4_5tuple_tag(skb);
+
+ cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1;
+
+ if (ip_hdr(skb)->ihl > 5)
+ cmdsetup.s.ipv4opts_ipv6exthdr =
+ OCT_PKT_PARAM_IPV4OPTS;
+
+ } else if (is_ipv6(skb)) {
+ tag = get_ipv6_5tuple_tag(skb);
+
+ cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1;
+
+ if (is_with_extn_hdr(skb))
+ cmdsetup.s.ipv4opts_ipv6exthdr =
+ OCT_PKT_PARAM_IPV6EXTHDR;
+
+ } else if (is_vlan(skb)) {
+ if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto
+ == htons(ETH_P_IP) &&
+ !is_ip_fragmented(skb) && is_tcpudp(skb)) {
+ tag = get_ipv4_5tuple_tag(skb);
+
+ cmdsetup.s.cksum_offset =
+ sizeof(struct vlan_ethhdr) + 1;
+
+ if (ip_hdr(skb)->ihl > 5)
+ cmdsetup.s.ipv4opts_ipv6exthdr =
+ OCT_PKT_PARAM_IPV4OPTS;
+
+ } else if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto
+ == htons(ETH_P_IPV6)) {
+ tag = get_ipv6_5tuple_tag(skb);
+
+ cmdsetup.s.cksum_offset =
+ sizeof(struct vlan_ethhdr) + 1;
+
+ if (is_with_extn_hdr(skb))
+ cmdsetup.s.ipv4opts_ipv6exthdr =
+ OCT_PKT_PARAM_IPV6EXTHDR;
+ }
+ }
+ }
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ cmdsetup.s.timestamp = 1;
+ }
+
+ if (skb_shinfo(skb)->nr_frags == 0) {
+ cmdsetup.s.u.datasize = skb->len;
+ octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag);
+ /* Offload checksum calculation for TCP/UDP packets */
+ ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) {
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
+ __func__);
+ return NETDEV_TX_BUSY;
+ }
+
+ finfo->dptr = ndata.cmd.dptr;
+
+ ndata.reqtype = REQTYPE_NORESP_NET;
+
+ } else {
+ int i, frags;
+ struct skb_frag_struct *frag;
+ struct octnic_gather *g;
+
+ spin_lock(&lio->lock);
+ g = (struct octnic_gather *)list_delete_head(&lio->glist);
+ spin_unlock(&lio->lock);
+
+ if (!g) {
+ netif_info(lio, tx_err, lio->netdev,
+ "Transmit scatter gather: glist null!\n");
+ goto lio_xmit_failed;
+ }
+
+ cmdsetup.s.gather = 1;
+ cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
+ octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag);
+
+ memset(g->sg, 0, g->sg_size);
+
+ g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
+ skb->data,
+ (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
+ __func__);
+ return NETDEV_TX_BUSY;
+ }
+ add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
+
+ frags = skb_shinfo(skb)->nr_frags;
+ i = 1;
+ while (frags--) {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+
+ g->sg[(i >> 2)].ptr[(i & 3)] =
+ dma_map_page(&oct->pci_dev->dev,
+ frag->page.p,
+ frag->page_offset,
+ frag->size,
+ DMA_TO_DEVICE);
+
+ add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
+ i++;
+ }
+
+ ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev,
+ g->sg, g->sg_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) {
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
+ __func__);
+ dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
+ skb->len - skb->data_len,
+ DMA_TO_DEVICE);
+ return NETDEV_TX_BUSY;
+ }
+
+ finfo->dptr = ndata.cmd.dptr;
+ finfo->g = g;
+
+ ndata.reqtype = REQTYPE_NORESP_NET_SG;
+ }
+
+ if (skb_shinfo(skb)->gso_size) {
+ struct octeon_instr_irh *irh =
+ (struct octeon_instr_irh *)&ndata.cmd.irh;
+ union tx_info *tx_info = (union tx_info *)&ndata.cmd.ossp[0];
+
+ irh->len = 1; /* to indicate that ossp[0] contains tx_info */
+ tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
+ tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
+ }
+
+ xmit_more = skb->xmit_more;
+
+ if (unlikely(cmdsetup.s.timestamp))
+ status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
+ else
+ status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
+ if (status == IQ_SEND_FAILED)
+ goto lio_xmit_failed;
+
+ netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
+
+ if (status == IQ_SEND_STOP)
+ stop_q(lio->netdev, q_idx);
+
+ netdev->trans_start = jiffies;
+
+ stats->tx_done++;
+ stats->tx_tot_bytes += skb->len;
+
+ return NETDEV_TX_OK;
+
+lio_xmit_failed:
+ stats->tx_dropped++;
+ netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
+ iq_no, stats->tx_dropped);
+ dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
+ ndata.datasize, DMA_TO_DEVICE);
+ recv_buffer_free(skb);
+ return NETDEV_TX_OK;
+}
+
+/** \brief Network device Tx timeout
+ * @param netdev pointer to network device
+ */
+static void liquidio_tx_timeout(struct net_device *netdev)
+{
+ struct lio *lio;
+
+ lio = GET_LIO(netdev);
+
+ netif_info(lio, tx_err, lio->netdev,
+ "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
+ netdev->stats.tx_dropped);
+ netdev->trans_start = jiffies;
+ txqs_wake(netdev);
+}
+
+int liquidio_set_feature(struct net_device *netdev, int cmd)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = cmd;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = OCTNIC_LROIPV4 | OCTNIC_LROIPV6;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ nparams.resp_order = OCTEON_RESP_NORESPONSE;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+/** \brief Net device fix features
+ * @param netdev pointer to network device
+ * @param request features requested
+ * @returns updated features list
+ */
+static netdev_features_t liquidio_fix_features(struct net_device *netdev,
+ netdev_features_t request)
+{
+ struct lio *lio = netdev_priv(netdev);
+
+ if ((request & NETIF_F_RXCSUM) &&
+ !(lio->dev_capability & NETIF_F_RXCSUM))
+ request &= ~NETIF_F_RXCSUM;
+
+ if ((request & NETIF_F_HW_CSUM) &&
+ !(lio->dev_capability & NETIF_F_HW_CSUM))
+ request &= ~NETIF_F_HW_CSUM;
+
+ if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
+ request &= ~NETIF_F_TSO;
+
+ if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
+ request &= ~NETIF_F_TSO6;
+
+ if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
+ request &= ~NETIF_F_LRO;
+
+ /*Disable LRO if RXCSUM is off */
+ if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
+ (lio->dev_capability & NETIF_F_LRO))
+ request &= ~NETIF_F_LRO;
+
+ return request;
+}
+
+/** \brief Net device set features
+ * @param netdev pointer to network device
+ * @param features features to enable/disable
+ */
+static int liquidio_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct lio *lio = netdev_priv(netdev);
+
+ if (!((netdev->features ^ features) & NETIF_F_LRO))
+ return 0;
+
+ if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE);
+ else if (!(features & NETIF_F_LRO) &&
+ (lio->dev_capability & NETIF_F_LRO))
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE);
+
+ return 0;
+}
+
+static struct net_device_ops lionetdevops = {
+ .ndo_open = liquidio_open,
+ .ndo_stop = liquidio_stop,
+ .ndo_start_xmit = liquidio_xmit,
+ .ndo_get_stats = liquidio_get_stats,
+ .ndo_set_mac_address = liquidio_set_mac,
+ .ndo_set_rx_mode = liquidio_set_mcast_list,
+ .ndo_tx_timeout = liquidio_tx_timeout,
+ .ndo_change_mtu = liquidio_change_mtu,
+ .ndo_do_ioctl = liquidio_ioctl,
+ .ndo_fix_features = liquidio_fix_features,
+ .ndo_set_features = liquidio_set_features,
+};
+
+/** \brief Entry point for the liquidio module
+ */
+static int __init liquidio_init(void)
+{
+ int i;
+ struct handshake *hs;
+
+ init_completion(&first_stage);
+
+ octeon_init_device_list(conf_type);
+
+ if (liquidio_init_pci())
+ return -EINVAL;
+
+ wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
+
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
+ hs = &handshake[i];
+ if (hs->pci_dev) {
+ wait_for_completion(&hs->init);
+ if (!hs->init_ok) {
+ /* init handshake failed */
+ dev_err(&hs->pci_dev->dev,
+ "Failed to init device\n");
+ liquidio_deinit_pci();
+ return -EIO;
+ }
+ }
+ }
+
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
+ hs = &handshake[i];
+ if (hs->pci_dev) {
+ wait_for_completion_timeout(&hs->started,
+ msecs_to_jiffies(30000));
+ if (!hs->started_ok) {
+ /* starter handshake failed */
+ dev_err(&hs->pci_dev->dev,
+ "Firmware failed to start\n");
+ liquidio_deinit_pci();
+ return -EIO;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
+{
+ struct octeon_device *oct = (struct octeon_device *)buf;
+ struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
+ int ifidx = 0;
+ union oct_link_status *ls;
+ int i;
+
+ if ((recv_pkt->buffer_size[0] != sizeof(*ls)) ||
+ (recv_pkt->rh.r_nic_info.ifidx > oct->ifcount)) {
+ dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
+ recv_pkt->buffer_size[0],
+ recv_pkt->rh.r_nic_info.ifidx);
+ goto nic_info_err;
+ }
+
+ ifidx = recv_pkt->rh.r_nic_info.ifidx;
+ ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
+
+ octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
+
+ update_link_status(oct->props[ifidx].netdev, ls);
+
+nic_info_err:
+ for (i = 0; i < recv_pkt->buffer_count; i++)
+ recv_buffer_free(recv_pkt->buffer_ptr[i]);
+ octeon_free_recv_info(recv_info);
+ return 0;
+}
+
+/**
+ * \brief Setup network interfaces
+ * @param octeon_dev octeon device
+ *
+ * Called during init time for each device. It assumes the NIC
+ * is already up and running. The link information for each
+ * interface is passed in link_info.
+ */
+static int setup_nic_devices(struct octeon_device *octeon_dev)
+{
+ struct lio *lio = NULL;
+ struct net_device *netdev;
+ u8 mac[6], i, j;
+ struct octeon_soft_command *sc;
+ struct liquidio_if_cfg_context *ctx;
+ struct liquidio_if_cfg_resp *resp;
+ struct octdev_props *props;
+ int retval, num_iqueues, num_oqueues, q_no;
+ u64 q_mask;
+ int num_cpus = num_online_cpus();
+ union oct_nic_if_cfg if_cfg;
+ unsigned int base_queue;
+ unsigned int gmx_port_id;
+ u32 resp_size, ctx_size;
+
+ /* This is to handle link status changes */
+ octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
+ OPCODE_NIC_INFO,
+ lio_nic_info, octeon_dev);
+
+ /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
+ * They are handled directly.
+ */
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
+ free_netbuf);
+
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
+ free_netsgbuf);
+
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
+ free_netsgbuf_with_resp);
+
+ for (i = 0; i < octeon_dev->ifcount; i++) {
+ resp_size = sizeof(struct liquidio_if_cfg_resp);
+ ctx_size = sizeof(struct liquidio_if_cfg_context);
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(octeon_dev, 0,
+ resp_size, ctx_size);
+ resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
+ ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+
+ num_iqueues =
+ CFG_GET_NUM_TXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
+ num_oqueues =
+ CFG_GET_NUM_RXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
+ base_queue =
+ CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i);
+ gmx_port_id =
+ CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i);
+ if (num_iqueues > num_cpus)
+ num_iqueues = num_cpus;
+ if (num_oqueues > num_cpus)
+ num_oqueues = num_cpus;
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "requesting config for interface %d, iqs %d, oqs %d\n",
+ i, num_iqueues, num_oqueues);
+ ACCESS_ONCE(ctx->cond) = 0;
+ ctx->octeon_id = lio_get_device_id(octeon_dev);
+ init_waitqueue_head(&ctx->wc);
+
+ if_cfg.u64 = 0;
+ if_cfg.s.num_iqueues = num_iqueues;
+ if_cfg.s.num_oqueues = num_oqueues;
+ if_cfg.s.base_queue = base_queue;
+ if_cfg.s.gmx_port_id = gmx_port_id;
+ octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_IF_CFG, i,
+ if_cfg.u64, 0);
+
+ sc->callback = if_cfg_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 1000;
+
+ retval = octeon_send_soft_command(octeon_dev, sc);
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "iq/oq config failed status: %x\n",
+ retval);
+ /* Soft instr is freed by driver in case of failure. */
+ goto setup_nic_dev_fail;
+ }
+
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ sleep_cond(&ctx->wc, &ctx->cond);
+ retval = resp->status;
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ octeon_swap_8B_data((u64 *)(&resp->cfg_info),
+ (sizeof(struct liquidio_if_cfg_info)) >> 3);
+
+ num_iqueues = hweight64(resp->cfg_info.iqmask);
+ num_oqueues = hweight64(resp->cfg_info.oqmask);
+
+ if (!(num_iqueues) || !(num_oqueues)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
+ resp->cfg_info.iqmask,
+ resp->cfg_info.oqmask);
+ goto setup_nic_dev_fail;
+ }
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
+ i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
+ num_iqueues, num_oqueues);
+ netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
+
+ if (!netdev) {
+ dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ props = &octeon_dev->props[i];
+ props->netdev = netdev;
+
+ if (num_iqueues > 1)
+ lionetdevops.ndo_select_queue = select_q;
+
+ /* Associate the routines that will handle different
+ * netdev tasks.
+ */
+ netdev->netdev_ops = &lionetdevops;
+
+ lio = GET_LIO(netdev);
+
+ memset(lio, 0, sizeof(struct lio));
+
+ lio->linfo.ifidx = resp->cfg_info.ifidx;
+ lio->ifidx = resp->cfg_info.ifidx;
+
+ lio->linfo.num_rxpciq = num_oqueues;
+ lio->linfo.num_txpciq = num_iqueues;
+ q_mask = resp->cfg_info.oqmask;
+ /* q_mask is 0-based and already verified mask is nonzero */
+ for (j = 0; j < num_oqueues; j++) {
+ q_no = __ffs64(q_mask);
+ q_mask &= (~(1UL << q_no));
+ lio->linfo.rxpciq[j] = q_no;
+ }
+ q_mask = resp->cfg_info.iqmask;
+ for (j = 0; j < num_iqueues; j++) {
+ q_no = __ffs64(q_mask);
+ q_mask &= (~(1UL << q_no));
+ lio->linfo.txpciq[j] = q_no;
+ }
+ lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
+ lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
+ lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
+
+ lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
+ lio->dev_capability = NETIF_F_HIGHDMA
+ | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+ | NETIF_F_SG | NETIF_F_RXCSUM
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_LRO;
+ netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
+
+ netdev->features = lio->dev_capability;
+ netdev->vlan_features = lio->dev_capability;
+
+ netdev->hw_features = lio->dev_capability;
+
+ /* Point to the properties for octeon device to which this
+ * interface belongs.
+ */
+ lio->oct_dev = octeon_dev;
+ lio->octprops = props;
+ lio->netdev = netdev;
+ spin_lock_init(&lio->lock);
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "if%d gmx: %d hw_addr: 0x%llx\n", i,
+ lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
+
+ /* 64-bit swap required on LE machines */
+ octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
+ for (j = 0; j < 6; j++)
+ mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
+
+ /* Copy MAC Address to OS network device structure */
+
+ ether_addr_copy(netdev->dev_addr, mac);
+
+ if (setup_io_queues(octeon_dev, netdev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
+
+ /* By default all interfaces on a single Octeon uses the same
+ * tx and rx queues
+ */
+ lio->txq = lio->linfo.txpciq[0];
+ lio->rxq = lio->linfo.rxpciq[0];
+
+ lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
+ lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
+
+ if (setup_glist(lio)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Gather list allocation failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ /* Register ethtool support */
+ liquidio_set_ethtool_ops(netdev);
+
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE);
+
+ if ((debug != -1) && (debug & NETIF_MSG_HW))
+ liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE);
+
+ /* Register the network device with the OS */
+ if (register_netdev(netdev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
+ i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ netif_carrier_off(netdev);
+
+ if (lio->linfo.link.s.status) {
+ netif_carrier_on(netdev);
+ start_txq(netdev);
+ } else {
+ netif_carrier_off(netdev);
+ }
+
+ ifstate_set(lio, LIO_IFSTATE_REGISTERED);
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "NIC ifidx:%d Setup successful\n", i);
+
+ octeon_free_soft_command(octeon_dev, sc);
+ }
+
+ return 0;
+
+setup_nic_dev_fail:
+
+ octeon_free_soft_command(octeon_dev, sc);
+
+ while (i--) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "NIC ifidx:%d Setup failed\n", i);
+ liquidio_destroy_nic_device(octeon_dev, i);
+ }
+ return -ENODEV;
+}
+
+/**
+ * \brief initialize the NIC
+ * @param oct octeon device
+ *
+ * This initialization routine is called once the Octeon device application is
+ * up and running
+ */
+static int liquidio_init_nic_module(struct octeon_device *oct)
+{
+ struct oct_intrmod_cfg *intrmod_cfg;
+ int retval = 0;
+ int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
+
+ dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
+
+ /* only default iq and oq were initialized
+ * initialize the rest as well
+ */
+ /* run port_config command for each port */
+ oct->ifcount = num_nic_ports;
+
+ memset(oct->props, 0,
+ sizeof(struct octdev_props) * num_nic_ports);
+
+ retval = setup_nic_devices(oct);
+ if (retval) {
+ dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
+ goto octnet_init_failure;
+ }
+
+ liquidio_ptp_init(oct);
+
+ /* Initialize interrupt moderation params */
+ intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
+ intrmod_cfg->intrmod_enable = 1;
+ intrmod_cfg->intrmod_check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
+ intrmod_cfg->intrmod_maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
+ intrmod_cfg->intrmod_minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
+ intrmod_cfg->intrmod_maxcnt_trigger = LIO_INTRMOD_MAXCNT_TRIGGER;
+ intrmod_cfg->intrmod_maxtmr_trigger = LIO_INTRMOD_MAXTMR_TRIGGER;
+ intrmod_cfg->intrmod_mintmr_trigger = LIO_INTRMOD_MINTMR_TRIGGER;
+ intrmod_cfg->intrmod_mincnt_trigger = LIO_INTRMOD_MINCNT_TRIGGER;
+
+ dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
+
+ return retval;
+
+octnet_init_failure:
+
+ oct->ifcount = 0;
+
+ return retval;
+}
+
+/**
+ * \brief starter callback that invokes the remaining initialization work after
+ * the NIC is up and running.
+ * @param octptr work struct work_struct
+ */
+static void nic_starter(struct work_struct *work)
+{
+ struct octeon_device *oct;
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+
+ oct = (struct octeon_device *)wk->ctxptr;
+
+ if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
+ return;
+
+ /* If the status of the device is CORE_OK, the core
+ * application has reported its application type. Call
+ * any registered handlers now and move to the RUNNING
+ * state.
+ */
+ if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
+ schedule_delayed_work(&oct->nic_poll_work.work,
+ LIQUIDIO_STARTER_POLL_INTERVAL_MS);
+ return;
+ }
+
+ atomic_set(&oct->status, OCT_DEV_RUNNING);
+
+ if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
+ dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
+
+ if (liquidio_init_nic_module(oct))
+ dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
+ else
+ handshake[oct->octeon_id].started_ok = 1;
+ } else {
+ dev_err(&oct->pci_dev->dev,
+ "Unexpected application running on NIC (%d). Check firmware.\n",
+ oct->app_mode);
+ }
+
+ complete(&handshake[oct->octeon_id].started);
+}
+
+/**
+ * \brief Device initialization for each Octeon device that is probed
+ * @param octeon_dev octeon device
+ */
+static int octeon_device_init(struct octeon_device *octeon_dev)
+{
+ int j, ret;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)octeon_dev->priv;
+ atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
+
+ /* Enable access to the octeon device and make its DMA capability
+ * known to the OS.
+ */
+ if (octeon_pci_os_setup(octeon_dev))
+ return 1;
+
+ /* Identify the Octeon type and map the BAR address space. */
+ if (octeon_chip_specific_setup(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
+ return 1;
+ }
+
+ atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
+
+ octeon_dev->app_mode = CVM_DRV_INVALID_APP;
+
+ /* Do a soft reset of the Octeon device. */
+ if (octeon_dev->fn_list.soft_reset(octeon_dev))
+ return 1;
+
+ /* Initialize the dispatch mechanism used to push packets arriving on
+ * Octeon Output queues.
+ */
+ if (octeon_init_dispatch_list(octeon_dev))
+ return 1;
+
+ octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
+ OPCODE_NIC_CORE_DRV_ACTIVE,
+ octeon_core_drv_init,
+ octeon_dev);
+
+ INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
+ octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
+ schedule_delayed_work(&octeon_dev->nic_poll_work.work,
+ LIQUIDIO_STARTER_POLL_INTERVAL_MS);
+
+ atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
+
+ octeon_set_io_queues_off(octeon_dev);
+
+ /* Setup the data structures that manage this Octeon's Input queues. */
+ if (octeon_setup_instr_queues(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "instruction queue initialization failed\n");
+ /* On error, release any previously allocated queues */
+ for (j = 0; j < octeon_dev->num_iqs; j++)
+ octeon_delete_instr_queue(octeon_dev, j);
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
+
+ /* Initialize soft command buffer pool
+ */
+ if (octeon_setup_sc_buffer_pool(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
+
+ /* Initialize lists to manage the requests of different types that
+ * arrive from user & kernel applications for this octeon device.
+ */
+ if (octeon_setup_response_list(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
+
+ if (octeon_setup_output_queues(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
+ /* Release any previously allocated queues */
+ for (j = 0; j < octeon_dev->num_oqs; j++)
+ octeon_delete_droq(octeon_dev, j);
+ }
+
+ atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
+
+ /* The input and output queue registers were setup earlier (the queues
+ * were not enabled). Any additional registers that need to be
+ * programmed should be done now.
+ */
+ ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Failed to configure device registers\n");
+ return ret;
+ }
+
+ /* Initialize the tasklet that handles output queue packet processing.*/
+ dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
+ tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
+ (unsigned long)octeon_dev);
+
+ /* Setup the interrupt handler and record the INT SUM register address
+ */
+ octeon_setup_interrupt(octeon_dev);
+
+ /* Enable Octeon device interrupts */
+ octeon_dev->fn_list.enable_interrupt(octeon_dev->chip);
+
+ /* Enable the input and output queues for this Octeon device */
+ octeon_dev->fn_list.enable_io_queues(octeon_dev);
+
+ atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
+
+ dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
+
+ if (ddr_timeout == 0) {
+ dev_info(&octeon_dev->pci_dev->dev,
+ "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
+ }
+
+ schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
+
+ /* Wait for the octeon to initialize DDR after the soft-reset. */
+ ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
+ ret);
+ return 1;
+ }
+
+ if (octeon_wait_for_bootloader(octeon_dev, 1000) != 0) {
+ dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
+ return 1;
+ }
+
+ dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
+ ret = octeon_init_consoles(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
+ return 1;
+ }
+ ret = octeon_add_console(octeon_dev, 0);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
+ return 1;
+ }
+
+ atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
+
+ dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
+ ret = load_firmware(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
+ return 1;
+ }
+
+ handshake[octeon_dev->octeon_id].init_ok = 1;
+ complete(&handshake[octeon_dev->octeon_id].init);
+
+ atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
+
+ /* Send Credit for Octeon Output queues. Credits are always sent after
+ * the output queue is enabled.
+ */
+ for (j = 0; j < octeon_dev->num_oqs; j++)
+ writel(octeon_dev->droq[j]->max_count,
+ octeon_dev->droq[j]->pkts_credit_reg);
+
+ /* Packets can start arriving on the output queues from this point. */
+
+ return 0;
+}
+
+/**
+ * \brief Exits the module
+ */
+static void __exit liquidio_exit(void)
+{
+ liquidio_deinit_pci();
+
+ pr_info("LiquidIO network module is now unloaded\n");
+}
+
+module_init(liquidio_init);
+module_exit(liquidio_exit);
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
new file mode 100644
index 000000000..0ac347ccc
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -0,0 +1,673 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file liquidio_common.h
+ * \brief Common: Structures and macros used in PCI-NIC package by core and
+ * host driver.
+ */
+
+#ifndef __LIQUIDIO_COMMON_H__
+#define __LIQUIDIO_COMMON_H__
+
+#include "octeon_config.h"
+
+#define LIQUIDIO_VERSION "1.1.9"
+#define LIQUIDIO_MAJOR_VERSION 1
+#define LIQUIDIO_MINOR_VERSION 1
+#define LIQUIDIO_MICRO_VERSION 9
+
+#define CONTROL_IQ 0
+/** Tag types used by Octeon cores in its work. */
+enum octeon_tag_type {
+ ORDERED_TAG = 0,
+ ATOMIC_TAG = 1,
+ NULL_TAG = 2,
+ NULL_NULL_TAG = 3
+};
+
+/* pre-defined host->NIC tag values */
+#define LIO_CONTROL (0x11111110)
+#define LIO_DATA(i) (0x11111111 + (i))
+
+/* Opcodes used by host driver/apps to perform operations on the core.
+ * These are used to identify the major subsystem that the operation
+ * is for.
+ */
+#define OPCODE_CORE 0 /* used for generic core operations */
+#define OPCODE_NIC 1 /* used for NIC operations */
+#define OPCODE_LAST OPCODE_NIC
+
+/* Subcodes are used by host driver/apps to identify the sub-operation
+ * for the core. They only need to by unique for a given subsystem.
+ */
+#define OPCODE_SUBCODE(op, sub) (((op & 0x0f) << 8) | ((sub) & 0x7f))
+
+/** OPCODE_CORE subcodes. For future use. */
+
+/** OPCODE_NIC subcodes */
+
+/* This subcode is sent by core PCI driver to indicate cores are ready. */
+#define OPCODE_NIC_CORE_DRV_ACTIVE 0x01
+#define OPCODE_NIC_NW_DATA 0x02 /* network packet data */
+#define OPCODE_NIC_CMD 0x03
+#define OPCODE_NIC_INFO 0x04
+#define OPCODE_NIC_PORT_STATS 0x05
+#define OPCODE_NIC_MDIO45 0x06
+#define OPCODE_NIC_TIMESTAMP 0x07
+#define OPCODE_NIC_INTRMOD_CFG 0x08
+#define OPCODE_NIC_IF_CFG 0x09
+
+#define CORE_DRV_TEST_SCATTER_OP 0xFFF5
+
+#define OPCODE_SLOW_PATH(rh) \
+ (OPCODE_SUBCODE(rh->r.opcode, rh->r.subcode) != \
+ OPCODE_SUBCODE(OPCODE_NIC, OPCODE_NIC_NW_DATA))
+
+/* Application codes advertised by the core driver initialization packet. */
+#define CVM_DRV_APP_START 0x0
+#define CVM_DRV_NO_APP 0
+#define CVM_DRV_APP_COUNT 0x2
+#define CVM_DRV_BASE_APP (CVM_DRV_APP_START + 0x0)
+#define CVM_DRV_NIC_APP (CVM_DRV_APP_START + 0x1)
+#define CVM_DRV_INVALID_APP (CVM_DRV_APP_START + 0x2)
+#define CVM_DRV_APP_END (CVM_DRV_INVALID_APP - 1)
+
+/* Macro to increment index.
+ * Index is incremented by count; if the sum exceeds
+ * max, index is wrapped-around to the start.
+ */
+#define INCR_INDEX(index, count, max) \
+do { \
+ if (((index) + (count)) >= (max)) \
+ index = ((index) + (count)) - (max); \
+ else \
+ index += (count); \
+} while (0)
+
+#define INCR_INDEX_BY1(index, max) \
+do { \
+ if ((++(index)) == (max)) \
+ index = 0; \
+} while (0)
+
+#define DECR_INDEX(index, count, max) \
+do { \
+ if ((count) > (index)) \
+ index = ((max) - ((count - index))); \
+ else \
+ index -= count; \
+} while (0)
+
+#define OCT_BOARD_NAME 32
+#define OCT_SERIAL_LEN 64
+
+/* Structure used by core driver to send indication that the Octeon
+ * application is ready.
+ */
+struct octeon_core_setup {
+ u64 corefreq;
+
+ char boardname[OCT_BOARD_NAME];
+
+ char board_serial_number[OCT_SERIAL_LEN];
+
+ u64 board_rev_major;
+
+ u64 board_rev_minor;
+
+};
+
+/*--------------------------- SCATTER GATHER ENTRY -----------------------*/
+
+/* The Scatter-Gather List Entry. The scatter or gather component used with
+ * a Octeon input instruction has this format.
+ */
+struct octeon_sg_entry {
+ /** The first 64 bit gives the size of data in each dptr.*/
+ union {
+ u16 size[4];
+ u64 size64;
+ } u;
+
+ /** The 4 dptr pointers for this entry. */
+ u64 ptr[4];
+
+};
+
+#define OCT_SG_ENTRY_SIZE (sizeof(struct octeon_sg_entry))
+
+/* \brief Add size to gather list
+ * @param sg_entry scatter/gather entry
+ * @param size size to add
+ * @param pos position to add it.
+ */
+static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
+ u16 size,
+ u32 pos)
+{
+#ifdef __BIG_ENDIAN_BITFIELD
+ sg_entry->u.size[pos] = size;
+#else
+ sg_entry->u.size[3 - pos] = size;
+#endif
+}
+
+/*------------------------- End Scatter/Gather ---------------------------*/
+
+#define OCTNET_FRM_PTP_HEADER_SIZE 8
+#define OCTNET_FRM_HEADER_SIZE 30 /* PTP timestamp + VLAN + Ethernet */
+
+#define OCTNET_MIN_FRM_SIZE (64 + OCTNET_FRM_PTP_HEADER_SIZE)
+#define OCTNET_MAX_FRM_SIZE (16000 + OCTNET_FRM_HEADER_SIZE)
+
+#define OCTNET_DEFAULT_FRM_SIZE (1500 + OCTNET_FRM_HEADER_SIZE)
+
+/** NIC Commands are sent using this Octeon Input Queue */
+#define OCTNET_CMD_Q 0
+
+/* NIC Command types */
+#define OCTNET_CMD_CHANGE_MTU 0x1
+#define OCTNET_CMD_CHANGE_MACADDR 0x2
+#define OCTNET_CMD_CHANGE_DEVFLAGS 0x3
+#define OCTNET_CMD_RX_CTL 0x4
+
+#define OCTNET_CMD_SET_MULTI_LIST 0x5
+#define OCTNET_CMD_CLEAR_STATS 0x6
+
+/* command for setting the speed, duplex & autoneg */
+#define OCTNET_CMD_SET_SETTINGS 0x7
+#define OCTNET_CMD_SET_FLOW_CTL 0x8
+
+#define OCTNET_CMD_MDIO_READ_WRITE 0x9
+#define OCTNET_CMD_GPIO_ACCESS 0xA
+#define OCTNET_CMD_LRO_ENABLE 0xB
+#define OCTNET_CMD_LRO_DISABLE 0xC
+#define OCTNET_CMD_SET_RSS 0xD
+#define OCTNET_CMD_WRITE_SA 0xE
+#define OCTNET_CMD_DELETE_SA 0xF
+#define OCTNET_CMD_UPDATE_SA 0x12
+
+#define OCTNET_CMD_TNL_RX_CSUM_CTL 0x10
+#define OCTNET_CMD_TNL_TX_CSUM_CTL 0x11
+#define OCTNET_CMD_IPSECV2_AH_ESP_CTL 0x13
+#define OCTNET_CMD_VERBOSE_ENABLE 0x14
+#define OCTNET_CMD_VERBOSE_DISABLE 0x15
+
+/* RX(packets coming from wire) Checksum verification flags */
+/* TCP/UDP csum */
+#define CNNIC_L4SUM_VERIFIED 0x1
+#define CNNIC_IPSUM_VERIFIED 0x2
+#define CNNIC_TUN_CSUM_VERIFIED 0x4
+#define CNNIC_CSUM_VERIFIED (CNNIC_IPSUM_VERIFIED | CNNIC_L4SUM_VERIFIED)
+
+/*LROIPV4 and LROIPV6 Flags*/
+#define OCTNIC_LROIPV4 0x1
+#define OCTNIC_LROIPV6 0x2
+
+/* Interface flags communicated between host driver and core app. */
+enum octnet_ifflags {
+ OCTNET_IFFLAG_PROMISC = 0x01,
+ OCTNET_IFFLAG_ALLMULTI = 0x02,
+ OCTNET_IFFLAG_MULTICAST = 0x04,
+ OCTNET_IFFLAG_BROADCAST = 0x08,
+ OCTNET_IFFLAG_UNICAST = 0x10
+};
+
+/* wqe
+ * --------------- 0
+ * | wqe word0-3 |
+ * --------------- 32
+ * | PCI IH |
+ * --------------- 40
+ * | RPTR |
+ * --------------- 48
+ * | PCI IRH |
+ * --------------- 56
+ * | OCT_NET_CMD |
+ * --------------- 64
+ * | Addtl 8-BData |
+ * | |
+ * ---------------
+ */
+
+union octnet_cmd {
+ u64 u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 cmd:5;
+
+ u64 more:6; /* How many udd words follow the command */
+
+ u64 param1:29;
+
+ u64 param2:16;
+
+ u64 param3:8;
+
+#else
+
+ u64 param3:8;
+
+ u64 param2:16;
+
+ u64 param1:29;
+
+ u64 more:6;
+
+ u64 cmd:5;
+
+#endif
+ } s;
+
+};
+
+#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd))
+
+/** Instruction Header */
+struct octeon_instr_ih {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /** Raw mode indicator 1 = RAW */
+ u64 raw:1;
+
+ /** Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /** Data length OR no. of entries in gather list */
+ u64 dlengsz:14;
+
+ /** Front Data size */
+ u64 fsz:6;
+
+ /** Packet Order / Work Unit selection (1 of 8)*/
+ u64 qos:3;
+
+ /** Core group selection (1 of 16) */
+ u64 grp:4;
+
+ /** Short Raw Packet Indicator 1=short raw pkt */
+ u64 rs:1;
+
+ /** Tag type */
+ u64 tagtype:2;
+
+ /** Tag Value */
+ u64 tag:32;
+#else
+ /** Tag Value */
+ u64 tag:32;
+
+ /** Tag type */
+ u64 tagtype:2;
+
+ /** Short Raw Packet Indicator 1=short raw pkt */
+ u64 rs:1;
+
+ /** Core group selection (1 of 16) */
+ u64 grp:4;
+
+ /** Packet Order / Work Unit selection (1 of 8)*/
+ u64 qos:3;
+
+ /** Front Data size */
+ u64 fsz:6;
+
+ /** Data length OR no. of entries in gather list */
+ u64 dlengsz:14;
+
+ /** Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /** Raw mode indicator 1 = RAW */
+ u64 raw:1;
+#endif
+};
+
+/** Input Request Header */
+struct octeon_instr_irh {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 opcode:4;
+ u64 rflag:1;
+ u64 subcode:7;
+ u64 len:3;
+ u64 rid:13;
+ u64 reserved:4;
+ u64 ossp:32; /* opcode/subcode specific parameters */
+#else
+ u64 ossp:32; /* opcode/subcode specific parameters */
+ u64 reserved:4;
+ u64 rid:13;
+ u64 len:3;
+ u64 subcode:7;
+ u64 rflag:1;
+ u64 opcode:4;
+#endif
+};
+
+/** Return Data Parameters */
+struct octeon_instr_rdp {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 reserved:49;
+ u64 pcie_port:3;
+ u64 rlen:12;
+#else
+ u64 rlen:12;
+ u64 pcie_port:3;
+ u64 reserved:49;
+#endif
+};
+
+/** Receive Header */
+union octeon_rh {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 u64;
+ struct {
+ u64 opcode:4;
+ u64 subcode:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 rid:13; /** request id in response to pkt sent by host */
+ u64 reserved:4;
+ u64 ossp:32; /** opcode/subcode specific parameters */
+ } r;
+ struct {
+ u64 opcode:4;
+ u64 subcode:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 rid:13; /** request id in response to pkt sent by host */
+ u64 extra:24;
+ u64 link:8;
+ u64 csum_verified:3; /** checksum verified. */
+ u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */
+ } r_dh;
+ struct {
+ u64 opcode:4;
+ u64 subcode:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 rid:13; /** request id in response to pkt sent by host */
+ u64 num_gmx_ports:8;
+ u64 max_nic_ports:8;
+ u64 app_cap_flags:4;
+ u64 app_mode:16;
+ } r_core_drv_init;
+ struct {
+ u64 opcode:4;
+ u64 subcode:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 rid:13;
+ u64 reserved:4;
+ u64 extra:25;
+ u64 ifidx:7;
+ } r_nic_info;
+#else
+ u64 u64;
+ struct {
+ u64 ossp:32; /** opcode/subcode specific parameters */
+ u64 reserved:4;
+ u64 rid:13; /** req id in response to pkt sent by host */
+ u64 len:3; /** additional 64-bit words */
+ u64 subcode:8;
+ u64 opcode:4;
+ } r;
+ struct {
+ u64 has_hwtstamp:1; /** 1 = has hwtstamp */
+ u64 csum_verified:3; /** checksum verified. */
+ u64 link:8;
+ u64 extra:24;
+ u64 rid:13; /** req id in response to pkt sent by host */
+ u64 len:3; /** additional 64-bit words */
+ u64 subcode:8;
+ u64 opcode:4;
+ } r_dh;
+ struct {
+ u64 app_mode:16;
+ u64 app_cap_flags:4;
+ u64 max_nic_ports:8;
+ u64 num_gmx_ports:8;
+ u64 rid:13;
+ u64 len:3; /** additional 64-bit words */
+ u64 subcode:8;
+ u64 opcode:4;
+ } r_core_drv_init;
+ struct {
+ u64 ifidx:7;
+ u64 extra:25;
+ u64 reserved:4;
+ u64 rid:13;
+ u64 len:3; /** additional 64-bit words */
+ u64 subcode:8;
+ u64 opcode:4;
+ } r_nic_info;
+#endif
+};
+
+#define OCT_RH_SIZE (sizeof(union octeon_rh))
+
+#define OCT_PKT_PARAM_IPV4OPTS 1
+#define OCT_PKT_PARAM_IPV6EXTHDR 2
+
+union octnic_packet_params {
+ u32 u32;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 reserved:6;
+ u32 tnl_csum:1;
+ u32 ip_csum:1;
+ u32 ipv4opts_ipv6exthdr:2;
+ u32 ipsec_ops:4;
+ u32 tsflag:1;
+ u32 csoffset:9;
+ u32 ifidx:8;
+#else
+ u32 ifidx:8;
+ u32 csoffset:9;
+ u32 tsflag:1;
+ u32 ipsec_ops:4;
+ u32 ipv4opts_ipv6exthdr:2;
+ u32 ip_csum:1;
+ u32 tnl_csum:1;
+ u32 reserved:6;
+#endif
+ } s;
+};
+
+/** Status of a RGMII Link on Octeon as seen by core driver. */
+union oct_link_status {
+ u64 u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 duplex:8;
+ u64 status:8;
+ u64 mtu:16;
+ u64 speed:16;
+ u64 autoneg:1;
+ u64 interface:4;
+ u64 pause:1;
+ u64 reserved:10;
+#else
+ u64 reserved:10;
+ u64 pause:1;
+ u64 interface:4;
+ u64 autoneg:1;
+ u64 speed:16;
+ u64 mtu:16;
+ u64 status:8;
+ u64 duplex:8;
+#endif
+ } s;
+};
+
+/** Information for a OCTEON ethernet interface shared between core & host. */
+struct oct_link_info {
+ union oct_link_status link;
+ u64 hw_addr;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ u16 gmxport;
+ u8 rsvd[3];
+ u8 num_txpciq;
+ u8 num_rxpciq;
+ u8 ifidx;
+#else
+ u8 ifidx;
+ u8 num_rxpciq;
+ u8 num_txpciq;
+ u8 rsvd[3];
+ u16 gmxport;
+#endif
+
+ u8 txpciq[MAX_IOQS_PER_NICIF];
+ u8 rxpciq[MAX_IOQS_PER_NICIF];
+};
+
+#define OCT_LINK_INFO_SIZE (sizeof(struct oct_link_info))
+
+struct liquidio_if_cfg_info {
+ u64 ifidx;
+ u64 iqmask; /** mask for IQs enabled for the port */
+ u64 oqmask; /** mask for OQs enabled for the port */
+ struct oct_link_info linfo; /** initial link information */
+};
+
+/** Stats for each NIC port in RX direction. */
+struct nic_rx_stats {
+ /* link-level stats */
+ u64 total_rcvd;
+ u64 bytes_rcvd;
+ u64 total_bcst;
+ u64 total_mcst;
+ u64 runts;
+ u64 ctl_rcvd;
+ u64 fifo_err; /* Accounts for over/under-run of buffers */
+ u64 dmac_drop;
+ u64 fcs_err;
+ u64 jabber_err;
+ u64 l2_err;
+ u64 frame_err;
+
+ /* firmware stats */
+ u64 fw_total_rcvd;
+ u64 fw_total_fwd;
+ u64 fw_err_pko;
+ u64 fw_err_link;
+ u64 fw_err_drop;
+ u64 fw_lro_pkts; /* Number of packets that are LROed */
+ u64 fw_lro_octs; /* Number of octets that are LROed */
+ u64 fw_total_lro; /* Number of LRO packets formed */
+ u64 fw_lro_aborts; /* Number of times lRO of packet aborted */
+ /* intrmod: packet forward rate */
+ u64 fwd_rate;
+};
+
+/** Stats for each NIC port in RX direction. */
+struct nic_tx_stats {
+ /* link-level stats */
+ u64 total_pkts_sent;
+ u64 total_bytes_sent;
+ u64 mcast_pkts_sent;
+ u64 bcast_pkts_sent;
+ u64 ctl_sent;
+ u64 one_collision_sent; /* Packets sent after one collision*/
+ u64 multi_collision_sent; /* Packets sent after multiple collision*/
+ u64 max_collision_fail; /* Packets not sent due to max collisions */
+ u64 max_deferral_fail; /* Packets not sent due to max deferrals */
+ u64 fifo_err; /* Accounts for over/under-run of buffers */
+ u64 runts;
+ u64 total_collisions; /* Total number of collisions detected */
+
+ /* firmware stats */
+ u64 fw_total_sent;
+ u64 fw_total_fwd;
+ u64 fw_err_pko;
+ u64 fw_err_link;
+ u64 fw_err_drop;
+};
+
+struct oct_link_stats {
+ struct nic_rx_stats fromwire;
+ struct nic_tx_stats fromhost;
+
+};
+
+#define LIO68XX_LED_CTRL_ADDR 0x3501
+#define LIO68XX_LED_CTRL_CFGON 0x1f
+#define LIO68XX_LED_CTRL_CFGOFF 0x100
+#define LIO68XX_LED_BEACON_ADDR 0x3508
+#define LIO68XX_LED_BEACON_CFGON 0x47fd
+#define LIO68XX_LED_BEACON_CFGOFF 0x11fc
+#define VITESSE_PHY_GPIO_DRIVEON 0x1
+#define VITESSE_PHY_GPIO_CFG 0x8
+#define VITESSE_PHY_GPIO_DRIVEOFF 0x4
+#define VITESSE_PHY_GPIO_HIGH 0x2
+#define VITESSE_PHY_GPIO_LOW 0x3
+
+struct oct_mdio_cmd {
+ u64 op;
+ u64 mdio_addr;
+ u64 value1;
+ u64 value2;
+ u64 value3;
+};
+
+#define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats))
+
+#define LIO_INTRMOD_CHECK_INTERVAL 1
+#define LIO_INTRMOD_MAXPKT_RATETHR 196608 /* max pkt rate threshold */
+#define LIO_INTRMOD_MINPKT_RATETHR 9216 /* min pkt rate threshold */
+#define LIO_INTRMOD_MAXCNT_TRIGGER 384 /* max pkts to trigger interrupt */
+#define LIO_INTRMOD_MINCNT_TRIGGER 1 /* min pkts to trigger interrupt */
+#define LIO_INTRMOD_MAXTMR_TRIGGER 128 /* max time to trigger interrupt */
+#define LIO_INTRMOD_MINTMR_TRIGGER 32 /* min time to trigger interrupt */
+
+struct oct_intrmod_cfg {
+ u64 intrmod_enable;
+ u64 intrmod_check_intrvl;
+ u64 intrmod_maxpkt_ratethr;
+ u64 intrmod_minpkt_ratethr;
+ u64 intrmod_maxcnt_trigger;
+ u64 intrmod_maxtmr_trigger;
+ u64 intrmod_mincnt_trigger;
+ u64 intrmod_mintmr_trigger;
+};
+
+#define BASE_QUEUE_NOT_REQUESTED 65535
+
+union oct_nic_if_cfg {
+ u64 u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 base_queue:16;
+ u64 num_iqueues:16;
+ u64 num_oqueues:16;
+ u64 gmx_port_id:8;
+ u64 reserved:8;
+#else
+ u64 reserved:8;
+ u64 gmx_port_id:8;
+ u64 num_oqueues:16;
+ u64 num_iqueues:16;
+ u64 base_queue:16;
+#endif
+ } s;
+};
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_image.h b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h
new file mode 100644
index 000000000..93819bd86
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h
@@ -0,0 +1,57 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#ifndef _LIQUIDIO_IMAGE_H_
+#define _LIQUIDIO_IMAGE_H_
+
+#define LIO_MAX_FW_TYPE_LEN (8)
+#define LIO_MAX_FW_FILENAME_LEN (256)
+#define LIO_FW_DIR "liquidio/"
+#define LIO_FW_BASE_NAME "lio_"
+#define LIO_FW_NAME_SUFFIX ".bin"
+#define LIO_FW_NAME_TYPE_NIC "nic"
+#define LIO_FW_NAME_TYPE_NONE "none"
+#define LIO_MAX_FIRMWARE_VERSION_LEN 16
+
+#define LIO_MAX_BOOTCMD_LEN 1024
+#define LIO_MAX_IMAGES 16
+#define LIO_NIC_MAGIC 0x434E4943 /* "CNIC" */
+struct octeon_firmware_desc {
+ __be64 addr;
+ __be32 len;
+ __be32 crc32; /* crc32 of image */
+};
+
+/* Following the header is a list of 64-bit aligned binary images,
+ * as described by the desc field.
+ * Numeric fields are in network byte order.
+ */
+struct octeon_firmware_file_header {
+ __be32 magic;
+ char version[LIO_MAX_FIRMWARE_VERSION_LEN];
+ char bootcmd[LIO_MAX_BOOTCMD_LEN];
+ __be32 num_images;
+ struct octeon_firmware_desc desc[LIO_MAX_IMAGES];
+ __be32 pad;
+ __be32 crc32; /* header checksum */
+};
+
+#endif /* _LIQUIDIO_IMAGE_H_ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
new file mode 100644
index 000000000..62a8dd5cd
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -0,0 +1,424 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file octeon_config.h
+ * \brief Host Driver: Configuration data structures for the host driver.
+ */
+
+#ifndef __OCTEON_CONFIG_H__
+#define __OCTEON_CONFIG_H__
+
+/*--------------------------CONFIG VALUES------------------------*/
+
+/* The following macros affect the way the driver data structures
+ * are generated for Octeon devices.
+ * They can be modified.
+ */
+
+/* Maximum octeon devices defined as MAX_OCTEON_NICIF to support
+ * multiple(<= MAX_OCTEON_NICIF) Miniports
+ */
+#define MAX_OCTEON_NICIF 32
+#define MAX_OCTEON_DEVICES MAX_OCTEON_NICIF
+#define MAX_OCTEON_LINKS MAX_OCTEON_NICIF
+#define MAX_OCTEON_MULTICAST_ADDR 32
+
+/* CN6xxx IQ configuration macros */
+#define CN6XXX_MAX_INPUT_QUEUES 32
+#define CN6XXX_MAX_IQ_DESCRIPTORS 2048
+#define CN6XXX_DB_MIN 1
+#define CN6XXX_DB_MAX 8
+#define CN6XXX_DB_TIMEOUT 1
+
+/* CN6xxx OQ configuration macros */
+#define CN6XXX_MAX_OUTPUT_QUEUES 32
+#define CN6XXX_MAX_OQ_DESCRIPTORS 2048
+#define CN6XXX_OQ_BUF_SIZE 1536
+#define CN6XXX_OQ_PKTSPER_INTR ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \
+ (CN6XXX_MAX_OQ_DESCRIPTORS / 4) : 128)
+#define CN6XXX_OQ_REFIL_THRESHOLD ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \
+ (CN6XXX_MAX_OQ_DESCRIPTORS / 4) : 128)
+
+#define CN6XXX_OQ_INTR_PKT 64
+#define CN6XXX_OQ_INTR_TIME 100
+#define DEFAULT_NUM_NIC_PORTS_66XX 2
+#define DEFAULT_NUM_NIC_PORTS_68XX 4
+#define DEFAULT_NUM_NIC_PORTS_68XX_210NV 2
+
+/* common OCTEON configuration macros */
+#define CN6XXX_CFG_IO_QUEUES 32
+#define OCTEON_32BYTE_INSTR 32
+#define OCTEON_64BYTE_INSTR 64
+#define OCTEON_MAX_BASE_IOQ 4
+#define OCTEON_OQ_BUFPTR_MODE 0
+#define OCTEON_OQ_INFOPTR_MODE 1
+
+#define OCTEON_DMA_INTR_PKT 64
+#define OCTEON_DMA_INTR_TIME 1000
+
+#define MAX_TXQS_PER_INTF 8
+#define MAX_RXQS_PER_INTF 8
+#define DEF_TXQS_PER_INTF 4
+#define DEF_RXQS_PER_INTF 4
+
+#define INVALID_IOQ_NO 0xff
+
+#define DEFAULT_POW_GRP 0
+
+/* Macros to get octeon config params */
+#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
+#define CFG_GET_IQ_MAX_Q(cfg) ((cfg)->iq.max_iqs)
+#define CFG_GET_IQ_PENDING_LIST_SIZE(cfg) ((cfg)->iq.pending_list_size)
+#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
+#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
+#define CFG_GET_IQ_DB_TIMEOUT(cfg) ((cfg)->iq.db_timeout)
+
+#define CFG_GET_OQ_MAX_Q(cfg) ((cfg)->oq.max_oqs)
+#define CFG_GET_OQ_INFO_PTR(cfg) ((cfg)->oq.info_ptr)
+#define CFG_GET_OQ_PKTS_PER_INTR(cfg) ((cfg)->oq.pkts_per_intr)
+#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
+#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
+#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+#define CFG_SET_OQ_INTR_PKT(cfg, val) (cfg)->oq.oq_intr_pkt = val
+#define CFG_SET_OQ_INTR_TIME(cfg, val) (cfg)->oq.oq_intr_time = val
+
+#define CFG_GET_DMA_INTR_PKT(cfg) ((cfg)->dma.dma_intr_pkt)
+#define CFG_GET_DMA_INTR_TIME(cfg) ((cfg)->dma.dma_intr_time)
+#define CFG_GET_NUM_NIC_PORTS(cfg) ((cfg)->num_nic_ports)
+#define CFG_GET_NUM_DEF_TX_DESCS(cfg) ((cfg)->num_def_tx_descs)
+#define CFG_GET_NUM_DEF_RX_DESCS(cfg) ((cfg)->num_def_rx_descs)
+#define CFG_GET_DEF_RX_BUF_SIZE(cfg) ((cfg)->def_rx_buf_size)
+
+#define CFG_GET_MAX_TXQS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].max_txqs)
+#define CFG_GET_NUM_TXQS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].num_txqs)
+#define CFG_GET_MAX_RXQS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].max_rxqs)
+#define CFG_GET_NUM_RXQS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].num_rxqs)
+#define CFG_GET_NUM_RX_DESCS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].num_rx_descs)
+#define CFG_GET_NUM_TX_DESCS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].num_tx_descs)
+#define CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].rx_buf_size)
+#define CFG_GET_BASE_QUE_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].base_queue)
+#define CFG_GET_GMXID_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].gmx_port_id)
+
+#define CFG_GET_CTRL_Q_GRP(cfg) ((cfg)->misc.ctrlq_grp)
+#define CFG_GET_HOST_LINK_QUERY_INTERVAL(cfg) \
+ ((cfg)->misc.host_link_query_interval)
+#define CFG_GET_OCT_LINK_QUERY_INTERVAL(cfg) \
+ ((cfg)->misc.oct_link_query_interval)
+#define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp)
+
+/* Max IOQs per OCTEON Link */
+#define MAX_IOQS_PER_NICIF 32
+
+enum lio_card_type {
+ LIO_210SV = 0, /* Two port, 66xx */
+ LIO_210NV, /* Two port, 68xx */
+ LIO_410NV /* Four port, 68xx */
+};
+
+#define LIO_210SV_NAME "210sv"
+#define LIO_210NV_NAME "210nv"
+#define LIO_410NV_NAME "410nv"
+
+/** Structure to define the configuration attributes for each Input queue.
+ * Applicable to all Octeon processors
+ **/
+struct octeon_iq_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 reserved:32;
+
+ /** Minimum ticks to wait before checking for pending instructions. */
+ u64 db_timeout:16;
+
+ /** Minimum number of commands pending to be posted to Octeon
+ * before driver hits the Input queue doorbell.
+ */
+ u64 db_min:8;
+
+ /** Command size - 32 or 64 bytes */
+ u64 instr_type:32;
+
+ /** Pending list size (usually set to the sum of the size of all Input
+ * queues)
+ */
+ u64 pending_list_size:32;
+
+ /* Max number of IQs available */
+ u64 max_iqs:8;
+#else
+ /* Max number of IQs available */
+ u64 max_iqs:8;
+
+ /** Pending list size (usually set to the sum of the size of all Input
+ * queues)
+ */
+ u64 pending_list_size:32;
+
+ /** Command size - 32 or 64 bytes */
+ u64 instr_type:32;
+
+ /** Minimum number of commands pending to be posted to Octeon
+ * before driver hits the Input queue doorbell.
+ */
+ u64 db_min:8;
+
+ /** Minimum ticks to wait before checking for pending instructions. */
+ u64 db_timeout:16;
+
+ u64 reserved:32;
+#endif
+};
+
+/** Structure to define the configuration attributes for each Output queue.
+ * Applicable to all Octeon processors
+ **/
+struct octeon_oq_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 reserved:16;
+
+ u64 pkts_per_intr:16;
+
+ /** Interrupt Coalescing (Time Interval). Octeon will interrupt the
+ * host if atleast one packet was sent in the time interval specified
+ * by this field. The driver uses time interval interrupt coalescing
+ * by default. The time is specified in microseconds.
+ */
+ u64 oq_intr_time:16;
+
+ /** Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver
+ * usually does not use packet count interrupt coalescing.
+ */
+ u64 oq_intr_pkt:16;
+
+ /** The number of buffers that were consumed during packet processing by
+ * the driver on this Output queue before the driver attempts to
+ * replenish
+ * the descriptor ring with new buffers.
+ */
+ u64 refill_threshold:16;
+
+ /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+ u64 info_ptr:32;
+
+ /* Max number of OQs available */
+ u64 max_oqs:8;
+
+#else
+ /* Max number of OQs available */
+ u64 max_oqs:8;
+
+ /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+ u64 info_ptr:32;
+
+ /** The number of buffers that were consumed during packet processing by
+ * the driver on this Output queue before the driver attempts to
+ * replenish
+ * the descriptor ring with new buffers.
+ */
+ u64 refill_threshold:16;
+
+ /** Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver
+ * usually does not use packet count interrupt coalescing.
+ */
+ u64 oq_intr_pkt:16;
+
+ /** Interrupt Coalescing (Time Interval). Octeon will interrupt the
+ * host if atleast one packet was sent in the time interval specified
+ * by this field. The driver uses time interval interrupt coalescing
+ * by default. The time is specified in microseconds.
+ */
+ u64 oq_intr_time:16;
+
+ u64 pkts_per_intr:16;
+
+ u64 reserved:16;
+#endif
+
+};
+
+/** This structure conatins the NIC link configuration attributes,
+ * common for all the OCTEON Modles.
+ */
+struct octeon_nic_if_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 reserved:56;
+
+ u64 base_queue:16;
+
+ u64 gmx_port_id:8;
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ u64 rx_buf_size:16;
+
+ /* Num of desc for tx rings */
+ u64 num_tx_descs:16;
+
+ /* Num of desc for rx rings */
+ u64 num_rx_descs:16;
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ u64 num_rxqs:16;
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ u64 max_rxqs:16;
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ u64 num_txqs:16;
+
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ u64 max_txqs:16;
+#else
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ u64 max_txqs:16;
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ u64 num_txqs:16;
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ u64 max_rxqs:16;
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ u64 num_rxqs:16;
+
+ /* Num of desc for rx rings */
+ u64 num_rx_descs:16;
+
+ /* Num of desc for tx rings */
+ u64 num_tx_descs:16;
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ u64 rx_buf_size:16;
+
+ u64 gmx_port_id:8;
+
+ u64 base_queue:16;
+
+ u64 reserved:56;
+#endif
+
+};
+
+/** Structure to define the configuration attributes for meta data.
+ * Applicable to all Octeon processors.
+ */
+
+struct octeon_misc_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /** Host link status polling period */
+ u64 host_link_query_interval:32;
+ /** Oct link status polling period */
+ u64 oct_link_query_interval:32;
+
+ u64 enable_sli_oq_bp:1;
+ /** Control IQ Group */
+ u64 ctrlq_grp:4;
+#else
+ /** Control IQ Group */
+ u64 ctrlq_grp:4;
+ /** BP for SLI OQ */
+ u64 enable_sli_oq_bp:1;
+ /** Host link status polling period */
+ u64 oct_link_query_interval:32;
+ /** Oct link status polling period */
+ u64 host_link_query_interval:32;
+#endif
+};
+
+/** Structure to define the configuration for all OCTEON processors. */
+struct octeon_config {
+ u16 card_type;
+ char *card_name;
+
+ /** Input Queue attributes. */
+ struct octeon_iq_config iq;
+
+ /** Output Queue attributes. */
+ struct octeon_oq_config oq;
+
+ /** NIC Port Configuration */
+ struct octeon_nic_if_config nic_if_cfg[MAX_OCTEON_NICIF];
+
+ /** Miscellaneous attributes */
+ struct octeon_misc_config misc;
+
+ int num_nic_ports;
+
+ int num_def_tx_descs;
+
+ /* Num of desc for rx rings */
+ int num_def_rx_descs;
+
+ int def_rx_buf_size;
+
+};
+
+/* The following config values are fixed and should not be modified. */
+
+/* Maximum address space to be mapped for Octeon's BAR1 index-based access. */
+#define MAX_BAR1_MAP_INDEX 2
+#define OCTEON_BAR1_ENTRY_SIZE (4 * 1024 * 1024)
+
+/* BAR1 Index 0 to (MAX_BAR1_MAP_INDEX - 1) for normal mapped memory access.
+ * Bar1 register at MAX_BAR1_MAP_INDEX used by driver for dynamic access.
+ */
+#define MAX_BAR1_IOREMAP_SIZE ((MAX_BAR1_MAP_INDEX + 1) * \
+ OCTEON_BAR1_ENTRY_SIZE)
+
+/* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking
+ * NoResponse Lists are now maintained with each IQ. (Dec' 2007).
+ */
+#define MAX_RESPONSE_LISTS 4
+
+/* Opcode hash bits. The opcode is hashed on the lower 6-bits to lookup the
+ * dispatch table.
+ */
+#define OPCODE_MASK_BITS 6
+
+/* Mask for the 6-bit lookup hash */
+#define OCTEON_OPCODE_MASK 0x3f
+
+/* Size of the dispatch table. The 6-bit hash can index into 2^6 entries */
+#define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS)
+
+/* Maximum number of Octeon Instruction (command) queues */
+#define MAX_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES
+
+/* Maximum number of Octeon Instruction (command) queues */
+#define MAX_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES
+
+#endif /* __OCTEON_CONFIG_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
new file mode 100644
index 000000000..466147e40
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -0,0 +1,723 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/**
+ * @file octeon_console.c
+ */
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+static void octeon_remote_lock(void);
+static void octeon_remote_unlock(void);
+static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
+ const char *name,
+ u32 flags);
+
+#define MIN(a, b) min((a), (b))
+#define CAST_ULL(v) ((u64)(v))
+
+#define BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR 0x0006c008
+#define BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR 0x0006c004
+#define BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR 0x0006c000
+#define BOOTLOADER_PCI_READ_DESC_ADDR 0x0006c100
+#define BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN 248
+
+#define OCTEON_PCI_IO_BUF_OWNER_OCTEON 0x00000001
+#define OCTEON_PCI_IO_BUF_OWNER_HOST 0x00000002
+
+/** Can change without breaking ABI */
+#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64
+
+/** minimum alignment of bootmem alloced blocks */
+#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull)
+
+/** CVMX bootmem descriptor major version */
+#define CVMX_BOOTMEM_DESC_MAJ_VER 3
+/* CVMX bootmem descriptor minor version */
+#define CVMX_BOOTMEM_DESC_MIN_VER 0
+
+/* Current versions */
+#define OCTEON_PCI_CONSOLE_MAJOR_VERSION 1
+#define OCTEON_PCI_CONSOLE_MINOR_VERSION 0
+#define OCTEON_PCI_CONSOLE_BLOCK_NAME "__pci_console"
+#define OCTEON_CONSOLE_POLL_INTERVAL_MS 100 /* 10 times per second */
+
+/* First three members of cvmx_bootmem_desc are left in original
+** positions for backwards compatibility.
+** Assumes big endian target
+*/
+struct cvmx_bootmem_desc {
+ /** spinlock to control access to list */
+ u32 lock;
+
+ /** flags for indicating various conditions */
+ u32 flags;
+
+ u64 head_addr;
+
+ /** incremented changed when incompatible changes made */
+ u32 major_version;
+
+ /** incremented changed when compatible changes made,
+ * reset to zero when major incremented
+ */
+ u32 minor_version;
+
+ u64 app_data_addr;
+ u64 app_data_size;
+
+ /** number of elements in named blocks array */
+ u32 nb_num_blocks;
+
+ /** length of name array in bootmem blocks */
+ u32 named_block_name_len;
+
+ /** address of named memory block descriptors */
+ u64 named_block_array_addr;
+};
+
+/* Structure that defines a single console.
+ *
+ * Note: when read_index == write_index, the buffer is empty.
+ * The actual usable size of each console is console_buf_size -1;
+ */
+struct octeon_pci_console {
+ u64 input_base_addr;
+ u32 input_read_index;
+ u32 input_write_index;
+ u64 output_base_addr;
+ u32 output_read_index;
+ u32 output_write_index;
+ u32 lock;
+ u32 buf_size;
+};
+
+/* This is the main container structure that contains all the information
+ * about all PCI consoles. The address of this structure is passed to various
+ * routines that operation on PCI consoles.
+ */
+struct octeon_pci_console_desc {
+ u32 major_version;
+ u32 minor_version;
+ u32 lock;
+ u32 flags;
+ u32 num_consoles;
+ u32 pad;
+ /* must be 64 bit aligned here... */
+ /* Array of addresses of octeon_pci_console structures */
+ u64 console_addr_array[0];
+ /* Implicit storage for console_addr_array */
+};
+
+/**
+ * This macro returns the size of a member of a structure.
+ * Logically it is the same as "sizeof(s::field)" in C++, but
+ * C lacks the "::" operator.
+ */
+#define SIZEOF_FIELD(s, field) sizeof(((s *)NULL)->field)
+
+/**
+ * This macro returns a member of the cvmx_bootmem_desc
+ * structure. These members can't be directly addressed as
+ * they might be in memory not directly reachable. In the case
+ * where bootmem is compiled with LINUX_HOST, the structure
+ * itself might be located on a remote Octeon. The argument
+ * "field" is the member name of the cvmx_bootmem_desc to read.
+ * Regardless of the type of the field, the return type is always
+ * a u64.
+ */
+#define CVMX_BOOTMEM_DESC_GET_FIELD(oct, field) \
+ __cvmx_bootmem_desc_get(oct, oct->bootmem_desc_addr, \
+ offsetof(struct cvmx_bootmem_desc, field), \
+ SIZEOF_FIELD(struct cvmx_bootmem_desc, field))
+
+#define __cvmx_bootmem_lock(flags)
+#define __cvmx_bootmem_unlock(flags)
+
+/**
+ * This macro returns a member of the
+ * cvmx_bootmem_named_block_desc structure. These members can't
+ * be directly addressed as they might be in memory not directly
+ * reachable. In the case where bootmem is compiled with
+ * LINUX_HOST, the structure itself might be located on a remote
+ * Octeon. The argument "field" is the member name of the
+ * cvmx_bootmem_named_block_desc to read. Regardless of the type
+ * of the field, the return type is always a u64. The "addr"
+ * parameter is the physical address of the structure.
+ */
+#define CVMX_BOOTMEM_NAMED_GET_FIELD(oct, addr, field) \
+ __cvmx_bootmem_desc_get(oct, addr, \
+ offsetof(struct cvmx_bootmem_named_block_desc, field), \
+ SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field))
+
+/**
+ * This function is the implementation of the get macros defined
+ * for individual structure members. The argument are generated
+ * by the macros inorder to read only the needed memory.
+ *
+ * @param oct Pointer to current octeon device
+ * @param base 64bit physical address of the complete structure
+ * @param offset Offset from the beginning of the structure to the member being
+ * accessed.
+ * @param size Size of the structure member.
+ *
+ * @return Value of the structure member promoted into a u64.
+ */
+static inline u64 __cvmx_bootmem_desc_get(struct octeon_device *oct,
+ u64 base,
+ u32 offset,
+ u32 size)
+{
+ base = (1ull << 63) | (base + offset);
+ switch (size) {
+ case 4:
+ return octeon_read_device_mem32(oct, base);
+ case 8:
+ return octeon_read_device_mem64(oct, base);
+ default:
+ return 0;
+ }
+}
+
+/**
+ * This function retrieves the string name of a named block. It is
+ * more complicated than a simple memcpy() since the named block
+ * descriptor may not be directly accessible.
+ *
+ * @param addr Physical address of the named block descriptor
+ * @param str String to receive the named block string name
+ * @param len Length of the string buffer, which must match the length
+ * stored in the bootmem descriptor.
+ */
+static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct,
+ u64 addr,
+ char *str,
+ u32 len)
+{
+ addr += offsetof(struct cvmx_bootmem_named_block_desc, name);
+ octeon_pci_read_core_mem(oct, addr, str, len);
+ str[len] = 0;
+}
+
+/* See header file for descriptions of functions */
+
+/**
+ * Check the version information on the bootmem descriptor
+ *
+ * @param exact_match
+ * Exact major version to check against. A zero means
+ * check that the version supports named blocks.
+ *
+ * @return Zero if the version is correct. Negative if the version is
+ * incorrect. Failures also cause a message to be displayed.
+ */
+static int __cvmx_bootmem_check_version(struct octeon_device *oct,
+ u32 exact_match)
+{
+ u32 major_version;
+ u32 minor_version;
+
+ if (!oct->bootmem_desc_addr)
+ oct->bootmem_desc_addr =
+ octeon_read_device_mem64(oct,
+ BOOTLOADER_PCI_READ_DESC_ADDR);
+ major_version =
+ (u32)CVMX_BOOTMEM_DESC_GET_FIELD(oct, major_version);
+ minor_version =
+ (u32)CVMX_BOOTMEM_DESC_GET_FIELD(oct, minor_version);
+ dev_dbg(&oct->pci_dev->dev, "%s: major_version=%d\n", __func__,
+ major_version);
+ if ((major_version > 3) ||
+ (exact_match && major_version != exact_match)) {
+ dev_err(&oct->pci_dev->dev, "bootmem ver mismatch %d.%d addr:0x%llx\n",
+ major_version, minor_version,
+ CAST_ULL(oct->bootmem_desc_addr));
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+static const struct cvmx_bootmem_named_block_desc
+*__cvmx_bootmem_find_named_block_flags(struct octeon_device *oct,
+ const char *name, u32 flags)
+{
+ struct cvmx_bootmem_named_block_desc *desc =
+ &oct->bootmem_named_block_desc;
+ u64 named_addr = cvmx_bootmem_phy_named_block_find(oct, name, flags);
+
+ if (named_addr) {
+ desc->base_addr = CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr,
+ base_addr);
+ desc->size =
+ CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr, size);
+ strncpy(desc->name, name, sizeof(desc->name));
+ desc->name[sizeof(desc->name) - 1] = 0;
+ return &oct->bootmem_named_block_desc;
+ } else {
+ return NULL;
+ }
+}
+
+static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
+ const char *name,
+ u32 flags)
+{
+ u64 result = 0;
+
+ __cvmx_bootmem_lock(flags);
+ if (!__cvmx_bootmem_check_version(oct, 3)) {
+ u32 i;
+ u64 named_block_array_addr =
+ CVMX_BOOTMEM_DESC_GET_FIELD(oct,
+ named_block_array_addr);
+ u32 num_blocks = (u32)
+ CVMX_BOOTMEM_DESC_GET_FIELD(oct, nb_num_blocks);
+ u32 name_length = (u32)
+ CVMX_BOOTMEM_DESC_GET_FIELD(oct, named_block_name_len);
+ u64 named_addr = named_block_array_addr;
+
+ for (i = 0; i < num_blocks; i++) {
+ u64 named_size =
+ CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr,
+ size);
+ if (name && named_size) {
+ char *name_tmp =
+ kmalloc(name_length + 1, GFP_KERNEL);
+ CVMX_BOOTMEM_NAMED_GET_NAME(oct, named_addr,
+ name_tmp,
+ name_length);
+ if (!strncmp(name, name_tmp, name_length)) {
+ result = named_addr;
+ kfree(name_tmp);
+ break;
+ }
+ kfree(name_tmp);
+ } else if (!name && !named_size) {
+ result = named_addr;
+ break;
+ }
+
+ named_addr +=
+ sizeof(struct cvmx_bootmem_named_block_desc);
+ }
+ }
+ __cvmx_bootmem_unlock(flags);
+ return result;
+}
+
+/**
+ * Find a named block on the remote Octeon
+ *
+ * @param name Name of block to find
+ * @param base_addr Address the block is at (OUTPUT)
+ * @param size The size of the block (OUTPUT)
+ *
+ * @return Zero on success, One on failure.
+ */
+static int octeon_named_block_find(struct octeon_device *oct, const char *name,
+ u64 *base_addr, u64 *size)
+{
+ const struct cvmx_bootmem_named_block_desc *named_block;
+
+ octeon_remote_lock();
+ named_block = __cvmx_bootmem_find_named_block_flags(oct, name, 0);
+ octeon_remote_unlock();
+ if (named_block) {
+ *base_addr = named_block->base_addr;
+ *size = named_block->size;
+ return 0;
+ }
+ return 1;
+}
+
+static void octeon_remote_lock(void)
+{
+ /* fill this in if any sharing is needed */
+}
+
+static void octeon_remote_unlock(void)
+{
+ /* fill this in if any sharing is needed */
+}
+
+int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str,
+ u32 wait_hundredths)
+{
+ u32 len = strlen(cmd_str);
+
+ dev_dbg(&oct->pci_dev->dev, "sending \"%s\" to bootloader\n", cmd_str);
+
+ if (len > BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN - 1) {
+ dev_err(&oct->pci_dev->dev, "Command string too long, max length is: %d\n",
+ BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN - 1);
+ return -1;
+ }
+
+ if (octeon_wait_for_bootloader(oct, wait_hundredths) != 0) {
+ dev_err(&oct->pci_dev->dev, "Bootloader not ready for command.\n");
+ return -1;
+ }
+
+ /* Write command to bootloader */
+ octeon_remote_lock();
+ octeon_pci_write_core_mem(oct, BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR,
+ (u8 *)cmd_str, len);
+ octeon_write_device_mem32(oct, BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR,
+ len);
+ octeon_write_device_mem32(oct, BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR,
+ OCTEON_PCI_IO_BUF_OWNER_OCTEON);
+
+ /* Bootloader should accept command very quickly
+ * if it really was ready
+ */
+ if (octeon_wait_for_bootloader(oct, 200) != 0) {
+ octeon_remote_unlock();
+ dev_err(&oct->pci_dev->dev, "Bootloader did not accept command.\n");
+ return -1;
+ }
+ octeon_remote_unlock();
+ return 0;
+}
+
+int octeon_wait_for_bootloader(struct octeon_device *oct,
+ u32 wait_time_hundredths)
+{
+ dev_dbg(&oct->pci_dev->dev, "waiting %d0 ms for bootloader\n",
+ wait_time_hundredths);
+
+ if (octeon_mem_access_ok(oct))
+ return -1;
+
+ while (wait_time_hundredths > 0 &&
+ octeon_read_device_mem32(oct,
+ BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR)
+ != OCTEON_PCI_IO_BUF_OWNER_HOST) {
+ if (--wait_time_hundredths <= 0)
+ return -1;
+ schedule_timeout_uninterruptible(HZ / 100);
+ }
+ return 0;
+}
+
+static void octeon_console_handle_result(struct octeon_device *oct,
+ size_t console_num,
+ char *buffer, s32 bytes_read)
+{
+ struct octeon_console *console;
+
+ console = &oct->console[console_num];
+
+ console->waiting = 0;
+}
+
+static char console_buffer[OCTEON_CONSOLE_MAX_READ_BYTES];
+
+static void output_console_line(struct octeon_device *oct,
+ struct octeon_console *console,
+ size_t console_num,
+ char *console_buffer,
+ s32 bytes_read)
+{
+ char *line;
+ s32 i;
+
+ line = console_buffer;
+ for (i = 0; i < bytes_read; i++) {
+ /* Output a line at a time, prefixed */
+ if (console_buffer[i] == '\n') {
+ console_buffer[i] = '\0';
+ if (console->leftover[0]) {
+ dev_info(&oct->pci_dev->dev, "%lu: %s%s\n",
+ console_num, console->leftover,
+ line);
+ console->leftover[0] = '\0';
+ } else {
+ dev_info(&oct->pci_dev->dev, "%lu: %s\n",
+ console_num, line);
+ }
+ line = &console_buffer[i + 1];
+ }
+ }
+
+ /* Save off any leftovers */
+ if (line != &console_buffer[bytes_read]) {
+ console_buffer[bytes_read] = '\0';
+ strcpy(console->leftover, line);
+ }
+}
+
+static void check_console(struct work_struct *work)
+{
+ s32 bytes_read, tries, total_read;
+ struct octeon_console *console;
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
+ size_t console_num = wk->ctxul;
+ u32 delay;
+
+ console = &oct->console[console_num];
+ tries = 0;
+ total_read = 0;
+
+ do {
+ /* Take console output regardless of whether it will
+ * be logged
+ */
+ bytes_read =
+ octeon_console_read(oct, console_num, console_buffer,
+ sizeof(console_buffer) - 1, 0);
+ if (bytes_read > 0) {
+ total_read += bytes_read;
+ if (console->waiting) {
+ octeon_console_handle_result(oct, console_num,
+ console_buffer,
+ bytes_read);
+ }
+ if (octeon_console_debug_enabled(console_num)) {
+ output_console_line(oct, console, console_num,
+ console_buffer, bytes_read);
+ }
+ } else if (bytes_read < 0) {
+ dev_err(&oct->pci_dev->dev, "Error reading console %lu, ret=%d\n",
+ console_num, bytes_read);
+ }
+
+ tries++;
+ } while ((bytes_read > 0) && (tries < 16));
+
+ /* If nothing is read after polling the console,
+ * output any leftovers if any
+ */
+ if (octeon_console_debug_enabled(console_num) &&
+ (total_read == 0) && (console->leftover[0])) {
+ dev_info(&oct->pci_dev->dev, "%lu: %s\n",
+ console_num, console->leftover);
+ console->leftover[0] = '\0';
+ }
+
+ delay = OCTEON_CONSOLE_POLL_INTERVAL_MS;
+
+ schedule_delayed_work(&wk->work, msecs_to_jiffies(delay));
+}
+
+int octeon_init_consoles(struct octeon_device *oct)
+{
+ int ret = 0;
+ u64 addr, size;
+
+ ret = octeon_mem_access_ok(oct);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Memory access not okay'\n");
+ return ret;
+ }
+
+ ret = octeon_named_block_find(oct, OCTEON_PCI_CONSOLE_BLOCK_NAME, &addr,
+ &size);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Could not find console '%s'\n",
+ OCTEON_PCI_CONSOLE_BLOCK_NAME);
+ return ret;
+ }
+
+ /* num_consoles > 0, is an indication that the consoles
+ * are accessible
+ */
+ oct->num_consoles = octeon_read_device_mem32(oct,
+ addr + offsetof(struct octeon_pci_console_desc,
+ num_consoles));
+ oct->console_desc_addr = addr;
+
+ dev_dbg(&oct->pci_dev->dev, "Initialized consoles. %d available\n",
+ oct->num_consoles);
+
+ return ret;
+}
+
+int octeon_add_console(struct octeon_device *oct, u32 console_num)
+{
+ int ret = 0;
+ u32 delay;
+ u64 coreaddr;
+ struct delayed_work *work;
+ struct octeon_console *console;
+
+ if (console_num >= oct->num_consoles) {
+ dev_err(&oct->pci_dev->dev,
+ "trying to read from console number %d when only 0 to %d exist\n",
+ console_num, oct->num_consoles);
+ } else {
+ console = &oct->console[console_num];
+
+ console->waiting = 0;
+
+ coreaddr = oct->console_desc_addr + console_num * 8 +
+ offsetof(struct octeon_pci_console_desc,
+ console_addr_array);
+ console->addr = octeon_read_device_mem64(oct, coreaddr);
+ coreaddr = console->addr + offsetof(struct octeon_pci_console,
+ buf_size);
+ console->buffer_size = octeon_read_device_mem32(oct, coreaddr);
+ coreaddr = console->addr + offsetof(struct octeon_pci_console,
+ input_base_addr);
+ console->input_base_addr =
+ octeon_read_device_mem64(oct, coreaddr);
+ coreaddr = console->addr + offsetof(struct octeon_pci_console,
+ output_base_addr);
+ console->output_base_addr =
+ octeon_read_device_mem64(oct, coreaddr);
+ console->leftover[0] = '\0';
+
+ work = &oct->console_poll_work[console_num].work;
+
+ INIT_DELAYED_WORK(work, check_console);
+ oct->console_poll_work[console_num].ctxptr = (void *)oct;
+ oct->console_poll_work[console_num].ctxul = console_num;
+ delay = OCTEON_CONSOLE_POLL_INTERVAL_MS;
+ schedule_delayed_work(work, msecs_to_jiffies(delay));
+
+ if (octeon_console_debug_enabled(console_num)) {
+ ret = octeon_console_send_cmd(oct,
+ "setenv pci_console_active 1",
+ 2000);
+ }
+
+ console->active = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * Removes all consoles
+ *
+ * @param oct octeon device
+ */
+void octeon_remove_consoles(struct octeon_device *oct)
+{
+ u32 i;
+ struct octeon_console *console;
+
+ for (i = 0; i < oct->num_consoles; i++) {
+ console = &oct->console[i];
+
+ if (!console->active)
+ continue;
+
+ cancel_delayed_work_sync(&oct->console_poll_work[i].
+ work);
+ console->addr = 0;
+ console->buffer_size = 0;
+ console->input_base_addr = 0;
+ console->output_base_addr = 0;
+ }
+
+ oct->num_consoles = 0;
+}
+
+static inline int octeon_console_free_bytes(u32 buffer_size,
+ u32 wr_idx,
+ u32 rd_idx)
+{
+ if (rd_idx >= buffer_size || wr_idx >= buffer_size)
+ return -1;
+
+ return ((buffer_size - 1) - (wr_idx - rd_idx)) % buffer_size;
+}
+
+static inline int octeon_console_avail_bytes(u32 buffer_size,
+ u32 wr_idx,
+ u32 rd_idx)
+{
+ if (rd_idx >= buffer_size || wr_idx >= buffer_size)
+ return -1;
+
+ return buffer_size - 1 -
+ octeon_console_free_bytes(buffer_size, wr_idx, rd_idx);
+}
+
+int octeon_console_read(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 buf_size, u32 flags)
+{
+ int bytes_to_read;
+ u32 rd_idx, wr_idx;
+ struct octeon_console *console;
+
+ if (console_num >= oct->num_consoles) {
+ dev_err(&oct->pci_dev->dev, "Attempted to read from disabled console %d\n",
+ console_num);
+ return 0;
+ }
+
+ console = &oct->console[console_num];
+
+ /* Check to see if any data is available.
+ * Maybe optimize this with 64-bit read.
+ */
+ rd_idx = octeon_read_device_mem32(oct, console->addr +
+ offsetof(struct octeon_pci_console, output_read_index));
+ wr_idx = octeon_read_device_mem32(oct, console->addr +
+ offsetof(struct octeon_pci_console, output_write_index));
+
+ bytes_to_read = octeon_console_avail_bytes(console->buffer_size,
+ wr_idx, rd_idx);
+ if (bytes_to_read <= 0)
+ return bytes_to_read;
+
+ bytes_to_read = MIN(bytes_to_read, (s32)buf_size);
+
+ /* Check to see if what we want to read is not contiguous, and limit
+ * ourselves to the contiguous block
+ */
+ if (rd_idx + bytes_to_read >= console->buffer_size)
+ bytes_to_read = console->buffer_size - rd_idx;
+
+ octeon_pci_read_core_mem(oct, console->output_base_addr + rd_idx,
+ buffer, bytes_to_read);
+ octeon_write_device_mem32(oct, console->addr +
+ offsetof(struct octeon_pci_console,
+ output_read_index),
+ (rd_idx + bytes_to_read) %
+ console->buffer_size);
+
+ return bytes_to_read;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
new file mode 100644
index 000000000..f67641a2f
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -0,0 +1,1304 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/crc32.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+/** Default configuration
+ * for CN66XX OCTEON Models.
+ */
+static struct octeon_config default_cn66xx_conf = {
+ .card_type = LIO_210SV,
+ .card_name = LIO_210SV_NAME,
+
+ /** IQ attributes */
+ .iq = {
+ .max_iqs = CN6XXX_CFG_IO_QUEUES,
+ .pending_list_size =
+ (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ .db_min = CN6XXX_DB_MIN,
+ .db_timeout = CN6XXX_DB_TIMEOUT,
+ }
+ ,
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN6XXX_CFG_IO_QUEUES,
+ .info_ptr = OCTEON_OQ_INFOPTR_MODE,
+ .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
+ .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
+ .oq_intr_time = CN6XXX_OQ_INTR_TIME,
+ .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
+ }
+ ,
+
+ .num_nic_ports = DEFAULT_NUM_NIC_PORTS_66XX,
+ .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ /* For ethernet interface 0: Port cfg Attributes */
+ .nic_if_cfg[0] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 0,
+ },
+
+ .nic_if_cfg[1] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 1,
+ },
+
+ /** Miscellaneous attributes */
+ .misc = {
+ /* Host driver link query interval */
+ .oct_link_query_interval = 100,
+
+ /* Octeon link query interval */
+ .host_link_query_interval = 500,
+
+ .enable_sli_oq_bp = 0,
+
+ /* Control queue group */
+ .ctrlq_grp = 1,
+ }
+ ,
+};
+
+/** Default configuration
+ * for CN68XX OCTEON Model.
+ */
+
+static struct octeon_config default_cn68xx_conf = {
+ .card_type = LIO_410NV,
+ .card_name = LIO_410NV_NAME,
+
+ /** IQ attributes */
+ .iq = {
+ .max_iqs = CN6XXX_CFG_IO_QUEUES,
+ .pending_list_size =
+ (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ .db_min = CN6XXX_DB_MIN,
+ .db_timeout = CN6XXX_DB_TIMEOUT,
+ }
+ ,
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN6XXX_CFG_IO_QUEUES,
+ .info_ptr = OCTEON_OQ_INFOPTR_MODE,
+ .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
+ .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
+ .oq_intr_time = CN6XXX_OQ_INTR_TIME,
+ .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
+ }
+ ,
+
+ .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX,
+ .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .nic_if_cfg[0] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 0,
+ },
+
+ .nic_if_cfg[1] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 1,
+ },
+
+ .nic_if_cfg[2] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 2,
+ },
+
+ .nic_if_cfg[3] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 3,
+ },
+
+ /** Miscellaneous attributes */
+ .misc = {
+ /* Host driver link query interval */
+ .oct_link_query_interval = 100,
+
+ /* Octeon link query interval */
+ .host_link_query_interval = 500,
+
+ .enable_sli_oq_bp = 0,
+
+ /* Control queue group */
+ .ctrlq_grp = 1,
+ }
+ ,
+};
+
+/** Default configuration
+ * for CN68XX OCTEON Model.
+ */
+static struct octeon_config default_cn68xx_210nv_conf = {
+ .card_type = LIO_210NV,
+ .card_name = LIO_210NV_NAME,
+
+ /** IQ attributes */
+
+ .iq = {
+ .max_iqs = CN6XXX_CFG_IO_QUEUES,
+ .pending_list_size =
+ (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ .db_min = CN6XXX_DB_MIN,
+ .db_timeout = CN6XXX_DB_TIMEOUT,
+ }
+ ,
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN6XXX_CFG_IO_QUEUES,
+ .info_ptr = OCTEON_OQ_INFOPTR_MODE,
+ .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
+ .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
+ .oq_intr_time = CN6XXX_OQ_INTR_TIME,
+ .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
+ }
+ ,
+
+ .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX_210NV,
+ .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .nic_if_cfg[0] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 0,
+ },
+
+ .nic_if_cfg[1] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 1,
+ },
+
+ /** Miscellaneous attributes */
+ .misc = {
+ /* Host driver link query interval */
+ .oct_link_query_interval = 100,
+
+ /* Octeon link query interval */
+ .host_link_query_interval = 500,
+
+ .enable_sli_oq_bp = 0,
+
+ /* Control queue group */
+ .ctrlq_grp = 1,
+ }
+ ,
+};
+
+enum {
+ OCTEON_CONFIG_TYPE_DEFAULT = 0,
+ NUM_OCTEON_CONFS,
+};
+
+static struct octeon_config_ptr {
+ u32 conf_type;
+} oct_conf_info[MAX_OCTEON_DEVICES] = {
+ {
+ OCTEON_CONFIG_TYPE_DEFAULT,
+ }, {
+ OCTEON_CONFIG_TYPE_DEFAULT,
+ }, {
+ OCTEON_CONFIG_TYPE_DEFAULT,
+ }, {
+ OCTEON_CONFIG_TYPE_DEFAULT,
+ },
+};
+
+static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = {
+ "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
+ "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
+ "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
+ "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
+ "INVALID"
+};
+
+static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = {
+ "BASE", "NIC", "UNKNOWN"};
+
+static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES];
+static u32 octeon_device_count;
+
+static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
+
+static void oct_set_config_info(int oct_id, int conf_type)
+{
+ if (conf_type < 0 || conf_type > (NUM_OCTEON_CONFS - 1))
+ conf_type = OCTEON_CONFIG_TYPE_DEFAULT;
+ oct_conf_info[oct_id].conf_type = conf_type;
+}
+
+void octeon_init_device_list(int conf_type)
+{
+ int i;
+
+ memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++)
+ oct_set_config_info(i, conf_type);
+}
+
+static void *__retrieve_octeon_config_info(struct octeon_device *oct,
+ u16 card_type)
+{
+ u32 oct_id = oct->octeon_id;
+ void *ret = NULL;
+
+ switch (oct_conf_info[oct_id].conf_type) {
+ case OCTEON_CONFIG_TYPE_DEFAULT:
+ if (oct->chip_id == OCTEON_CN66XX) {
+ ret = (void *)&default_cn66xx_conf;
+ } else if ((oct->chip_id == OCTEON_CN68XX) &&
+ (card_type == LIO_210NV)) {
+ ret = (void *)&default_cn68xx_210nv_conf;
+ } else if ((oct->chip_id == OCTEON_CN68XX) &&
+ (card_type == LIO_410NV)) {
+ ret = (void *)&default_cn68xx_conf;
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
+{
+ switch (oct->chip_id) {
+ case OCTEON_CN66XX:
+ case OCTEON_CN68XX:
+ return lio_validate_cn6xxx_config_info(oct, conf);
+
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+void *oct_get_config_info(struct octeon_device *oct, u16 card_type)
+{
+ void *conf = NULL;
+
+ conf = __retrieve_octeon_config_info(oct, card_type);
+ if (!conf)
+ return NULL;
+
+ if (__verify_octeon_config_info(oct, conf)) {
+ dev_err(&oct->pci_dev->dev, "Configuration verification failed\n");
+ return NULL;
+ }
+
+ return conf;
+}
+
+char *lio_get_state_string(atomic_t *state_ptr)
+{
+ s32 istate = (s32)atomic_read(state_ptr);
+
+ if (istate > OCT_DEV_STATES || istate < 0)
+ return oct_dev_state_str[OCT_DEV_STATE_INVALID];
+ return oct_dev_state_str[istate];
+}
+
+static char *get_oct_app_string(u32 app_mode)
+{
+ if (app_mode <= CVM_DRV_APP_END)
+ return oct_dev_app_str[app_mode - CVM_DRV_APP_START];
+ return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
+}
+
+int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
+ size_t size)
+{
+ int ret = 0;
+ u8 *p;
+ u8 *buffer;
+ u32 crc32_result;
+ u64 load_addr;
+ u32 image_len;
+ struct octeon_firmware_file_header *h;
+ u32 i;
+
+ if (size < sizeof(struct octeon_firmware_file_header)) {
+ dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
+ (u32)size,
+ (u32)sizeof(struct octeon_firmware_file_header));
+ return -EINVAL;
+ }
+
+ h = (struct octeon_firmware_file_header *)data;
+
+ if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) {
+ dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
+ return -EINVAL;
+ }
+
+ crc32_result =
+ crc32(~0, data,
+ sizeof(struct octeon_firmware_file_header) -
+ sizeof(u32)) ^ ~0U;
+ if (crc32_result != be32_to_cpu(h->crc32)) {
+ dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
+ crc32_result, be32_to_cpu(h->crc32));
+ return -EINVAL;
+ }
+
+ if (memcmp(LIQUIDIO_VERSION, h->version, strlen(LIQUIDIO_VERSION))) {
+ dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s, got %s.\n",
+ LIQUIDIO_VERSION, h->version);
+ return -EINVAL;
+ }
+
+ if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) {
+ dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n",
+ be32_to_cpu(h->num_images));
+ return -EINVAL;
+ }
+
+ dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version);
+ snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
+ h->version);
+
+ buffer = kmalloc(size, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ memcpy(buffer, data, size);
+
+ p = buffer + sizeof(struct octeon_firmware_file_header);
+
+ /* load all images */
+ for (i = 0; i < be32_to_cpu(h->num_images); i++) {
+ load_addr = be64_to_cpu(h->desc[i].addr);
+ image_len = be32_to_cpu(h->desc[i].len);
+
+ /* validate the image */
+ crc32_result = crc32(~0, p, image_len) ^ ~0U;
+ if (crc32_result != be32_to_cpu(h->desc[i].crc32)) {
+ dev_err(&oct->pci_dev->dev,
+ "Firmware CRC mismatch in image %d (0x%08x != 0x%08x).\n",
+ i, crc32_result,
+ be32_to_cpu(h->desc[i].crc32));
+ ret = -EINVAL;
+ goto done_downloading;
+ }
+
+ /* download the image */
+ octeon_pci_write_core_mem(oct, load_addr, p, image_len);
+
+ p += image_len;
+ dev_dbg(&oct->pci_dev->dev,
+ "Downloaded image %d (%d bytes) to address 0x%016llx\n",
+ i, image_len, load_addr);
+ }
+
+ /* Invoke the bootcmd */
+ ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
+
+done_downloading:
+ kfree(buffer);
+
+ return ret;
+}
+
+void octeon_free_device_mem(struct octeon_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ /* could check mask as well */
+ vfree(oct->droq[i]);
+ }
+
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ /* could check mask as well */
+ vfree(oct->instr_queue[i]);
+ }
+
+ i = oct->octeon_id;
+ vfree(oct);
+
+ octeon_device[i] = NULL;
+ octeon_device_count--;
+}
+
+static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
+ u32 priv_size)
+{
+ struct octeon_device *oct;
+ u8 *buf = NULL;
+ u32 octdevsize = 0, configsize = 0, size;
+
+ switch (pci_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ configsize = sizeof(struct octeon_cn6xxx);
+ break;
+
+ default:
+ pr_err("%s: Unknown PCI Device: 0x%x\n",
+ __func__,
+ pci_id);
+ return NULL;
+ }
+
+ if (configsize & 0x7)
+ configsize += (8 - (configsize & 0x7));
+
+ octdevsize = sizeof(struct octeon_device);
+ if (octdevsize & 0x7)
+ octdevsize += (8 - (octdevsize & 0x7));
+
+ if (priv_size & 0x7)
+ priv_size += (8 - (priv_size & 0x7));
+
+ size = octdevsize + priv_size + configsize +
+ (sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
+
+ buf = vmalloc(size);
+ if (!buf)
+ return NULL;
+
+ memset(buf, 0, size);
+
+ oct = (struct octeon_device *)buf;
+ oct->priv = (void *)(buf + octdevsize);
+ oct->chip = (void *)(buf + octdevsize + priv_size);
+ oct->dispatch.dlist = (struct octeon_dispatch *)
+ (buf + octdevsize + priv_size + configsize);
+
+ return oct;
+}
+
+struct octeon_device *octeon_allocate_device(u32 pci_id,
+ u32 priv_size)
+{
+ u32 oct_idx = 0;
+ struct octeon_device *oct = NULL;
+
+ for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++)
+ if (!octeon_device[oct_idx])
+ break;
+
+ if (oct_idx == MAX_OCTEON_DEVICES)
+ return NULL;
+
+ oct = octeon_allocate_device_mem(pci_id, priv_size);
+ if (!oct)
+ return NULL;
+
+ spin_lock_init(&oct->pci_win_lock);
+ spin_lock_init(&oct->mem_access_lock);
+
+ octeon_device_count++;
+ octeon_device[oct_idx] = oct;
+
+ oct->octeon_id = oct_idx;
+ snprintf((oct->device_name), sizeof(oct->device_name),
+ "LiquidIO%d", (oct->octeon_id));
+
+ return oct;
+}
+
+int octeon_setup_instr_queues(struct octeon_device *oct)
+{
+ u32 i, num_iqs = 0;
+ u32 num_descs = 0;
+
+ /* this causes queue 0 to be default queue */
+ if (OCTEON_CN6XXX(oct)) {
+ num_iqs = 1;
+ num_descs =
+ CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
+ }
+
+ oct->num_iqs = 0;
+
+ for (i = 0; i < num_iqs; i++) {
+ oct->instr_queue[i] =
+ vmalloc(sizeof(struct octeon_instr_queue));
+ if (!oct->instr_queue[i])
+ return 1;
+
+ memset(oct->instr_queue[i], 0,
+ sizeof(struct octeon_instr_queue));
+
+ oct->instr_queue[i]->app_ctx = (void *)(size_t)i;
+ if (octeon_init_instr_queue(oct, i, num_descs))
+ return 1;
+
+ oct->num_iqs++;
+ }
+
+ return 0;
+}
+
+int octeon_setup_output_queues(struct octeon_device *oct)
+{
+ u32 i, num_oqs = 0;
+ u32 num_descs = 0;
+ u32 desc_size = 0;
+
+ /* this causes queue 0 to be default queue */
+ if (OCTEON_CN6XXX(oct)) {
+ /* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */
+ num_oqs = 1;
+ num_descs =
+ CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
+ desc_size =
+ CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn6xxx, conf));
+ }
+
+ oct->num_oqs = 0;
+
+ for (i = 0; i < num_oqs; i++) {
+ oct->droq[i] = vmalloc(sizeof(*oct->droq[i]));
+ if (!oct->droq[i])
+ return 1;
+
+ memset(oct->droq[i], 0, sizeof(struct octeon_droq));
+
+ if (octeon_init_droq(oct, i, num_descs, desc_size, NULL))
+ return 1;
+
+ oct->num_oqs++;
+ }
+
+ return 0;
+}
+
+void octeon_set_io_queues_off(struct octeon_device *oct)
+{
+ /* Disable the i/p and o/p queues for this Octeon. */
+
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+}
+
+void octeon_set_droq_pkt_op(struct octeon_device *oct,
+ u32 q_no,
+ u32 enable)
+{
+ u32 reg_val = 0;
+
+ /* Disable the i/p and o/p queues for this Octeon. */
+ reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+
+ if (enable)
+ reg_val = reg_val | (1 << q_no);
+ else
+ reg_val = reg_val & (~(1 << q_no));
+
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
+}
+
+int octeon_init_dispatch_list(struct octeon_device *oct)
+{
+ u32 i;
+
+ oct->dispatch.count = 0;
+
+ for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
+ oct->dispatch.dlist[i].opcode = 0;
+ INIT_LIST_HEAD(&oct->dispatch.dlist[i].list);
+ }
+
+ for (i = 0; i <= REQTYPE_LAST; i++)
+ octeon_register_reqtype_free_fn(oct, i, NULL);
+
+ spin_lock_init(&oct->dispatch.lock);
+
+ return 0;
+}
+
+void octeon_delete_dispatch_list(struct octeon_device *oct)
+{
+ u32 i;
+ struct list_head freelist, *temp, *tmp2;
+
+ INIT_LIST_HEAD(&freelist);
+
+ spin_lock_bh(&oct->dispatch.lock);
+
+ for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
+ struct list_head *dispatch;
+
+ dispatch = &oct->dispatch.dlist[i].list;
+ while (dispatch->next != dispatch) {
+ temp = dispatch->next;
+ list_del(temp);
+ list_add_tail(temp, &freelist);
+ }
+
+ oct->dispatch.dlist[i].opcode = 0;
+ }
+
+ oct->dispatch.count = 0;
+
+ spin_unlock_bh(&oct->dispatch.lock);
+
+ list_for_each_safe(temp, tmp2, &freelist) {
+ list_del(temp);
+ vfree(temp);
+ }
+}
+
+octeon_dispatch_fn_t
+octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
+ u16 subcode)
+{
+ u32 idx;
+ struct list_head *dispatch;
+ octeon_dispatch_fn_t fn = NULL;
+ u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+ idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+ spin_lock_bh(&octeon_dev->dispatch.lock);
+
+ if (octeon_dev->dispatch.count == 0) {
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return NULL;
+ }
+
+ if (!(octeon_dev->dispatch.dlist[idx].opcode)) {
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return NULL;
+ }
+
+ if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
+ fn = octeon_dev->dispatch.dlist[idx].dispatch_fn;
+ } else {
+ list_for_each(dispatch,
+ &octeon_dev->dispatch.dlist[idx].list) {
+ if (((struct octeon_dispatch *)dispatch)->opcode ==
+ combined_opcode) {
+ fn = ((struct octeon_dispatch *)
+ dispatch)->dispatch_fn;
+ break;
+ }
+ }
+ }
+
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return fn;
+}
+
+/* octeon_register_dispatch_fn
+ * Parameters:
+ * octeon_id - id of the octeon device.
+ * opcode - opcode for which driver should call the registered function
+ * subcode - subcode for which driver should call the registered function
+ * fn - The function to call when a packet with "opcode" arrives in
+ * octeon output queues.
+ * fn_arg - The argument to be passed when calling function "fn".
+ * Description:
+ * Registers a function and its argument to be called when a packet
+ * arrives in Octeon output queues with "opcode".
+ * Returns:
+ * Success: 0
+ * Failure: 1
+ * Locks:
+ * No locks are held.
+ */
+int
+octeon_register_dispatch_fn(struct octeon_device *oct,
+ u16 opcode,
+ u16 subcode,
+ octeon_dispatch_fn_t fn, void *fn_arg)
+{
+ u32 idx;
+ octeon_dispatch_fn_t pfn;
+ u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+ idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+ spin_lock_bh(&oct->dispatch.lock);
+ /* Add dispatch function to first level of lookup table */
+ if (oct->dispatch.dlist[idx].opcode == 0) {
+ oct->dispatch.dlist[idx].opcode = combined_opcode;
+ oct->dispatch.dlist[idx].dispatch_fn = fn;
+ oct->dispatch.dlist[idx].arg = fn_arg;
+ oct->dispatch.count++;
+ spin_unlock_bh(&oct->dispatch.lock);
+ return 0;
+ }
+
+ spin_unlock_bh(&oct->dispatch.lock);
+
+ /* Check if there was a function already registered for this
+ * opcode/subcode.
+ */
+ pfn = octeon_get_dispatch(oct, opcode, subcode);
+ if (!pfn) {
+ struct octeon_dispatch *dispatch;
+
+ dev_dbg(&oct->pci_dev->dev,
+ "Adding opcode to dispatch list linked list\n");
+ dispatch = (struct octeon_dispatch *)
+ vmalloc(sizeof(struct octeon_dispatch));
+ if (!dispatch) {
+ dev_err(&oct->pci_dev->dev,
+ "No memory to add dispatch function\n");
+ return 1;
+ }
+ dispatch->opcode = combined_opcode;
+ dispatch->dispatch_fn = fn;
+ dispatch->arg = fn_arg;
+
+ /* Add dispatch function to linked list of fn ptrs
+ * at the hashed index.
+ */
+ spin_lock_bh(&oct->dispatch.lock);
+ list_add(&dispatch->list, &oct->dispatch.dlist[idx].list);
+ oct->dispatch.count++;
+ spin_unlock_bh(&oct->dispatch.lock);
+
+ } else {
+ dev_err(&oct->pci_dev->dev,
+ "Found previously registered dispatch fn for opcode/subcode: %x/%x\n",
+ opcode, subcode);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* octeon_unregister_dispatch_fn
+ * Parameters:
+ * oct - octeon device
+ * opcode - driver should unregister the function for this opcode
+ * subcode - driver should unregister the function for this subcode
+ * Description:
+ * Unregister the function set for this opcode+subcode.
+ * Returns:
+ * Success: 0
+ * Failure: 1
+ * Locks:
+ * No locks are held.
+ */
+int
+octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode,
+ u16 subcode)
+{
+ int retval = 0;
+ u32 idx;
+ struct list_head *dispatch, *dfree = NULL, *tmp2;
+ u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+ idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+ spin_lock_bh(&oct->dispatch.lock);
+
+ if (oct->dispatch.count == 0) {
+ spin_unlock_bh(&oct->dispatch.lock);
+ dev_err(&oct->pci_dev->dev,
+ "No dispatch functions registered for this device\n");
+ return 1;
+ }
+
+ if (oct->dispatch.dlist[idx].opcode == combined_opcode) {
+ dispatch = &oct->dispatch.dlist[idx].list;
+ if (dispatch->next != dispatch) {
+ dispatch = dispatch->next;
+ oct->dispatch.dlist[idx].opcode =
+ ((struct octeon_dispatch *)dispatch)->opcode;
+ oct->dispatch.dlist[idx].dispatch_fn =
+ ((struct octeon_dispatch *)
+ dispatch)->dispatch_fn;
+ oct->dispatch.dlist[idx].arg =
+ ((struct octeon_dispatch *)dispatch)->arg;
+ list_del(dispatch);
+ dfree = dispatch;
+ } else {
+ oct->dispatch.dlist[idx].opcode = 0;
+ oct->dispatch.dlist[idx].dispatch_fn = NULL;
+ oct->dispatch.dlist[idx].arg = NULL;
+ }
+ } else {
+ retval = 1;
+ list_for_each_safe(dispatch, tmp2,
+ &(oct->dispatch.dlist[idx].
+ list)) {
+ if (((struct octeon_dispatch *)dispatch)->opcode ==
+ combined_opcode) {
+ list_del(dispatch);
+ dfree = dispatch;
+ retval = 0;
+ }
+ }
+ }
+
+ if (!retval)
+ oct->dispatch.count--;
+
+ spin_unlock_bh(&oct->dispatch.lock);
+ vfree(dfree);
+ return retval;
+}
+
+int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
+{
+ u32 i;
+ char app_name[16];
+ struct octeon_device *oct = (struct octeon_device *)buf;
+ struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
+ struct octeon_core_setup *cs = NULL;
+ u32 num_nic_ports = 0;
+
+ if (OCTEON_CN6XXX(oct))
+ num_nic_ports =
+ CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn6xxx, conf));
+
+ if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
+ dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
+ atomic_read(&oct->status));
+ goto core_drv_init_err;
+ }
+
+ strncpy(app_name,
+ get_oct_app_string(
+ (u32)recv_pkt->rh.r_core_drv_init.app_mode),
+ sizeof(app_name) - 1);
+ oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
+ if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) {
+ oct->fw_info.max_nic_ports =
+ (u32)recv_pkt->rh.r_core_drv_init.max_nic_ports;
+ oct->fw_info.num_gmx_ports =
+ (u32)recv_pkt->rh.r_core_drv_init.num_gmx_ports;
+ }
+
+ if (oct->fw_info.max_nic_ports < num_nic_ports) {
+ dev_err(&oct->pci_dev->dev,
+ "Config has more ports than firmware allows (%d > %d).\n",
+ num_nic_ports, oct->fw_info.max_nic_ports);
+ goto core_drv_init_err;
+ }
+ oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags;
+ oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
+
+ atomic_set(&oct->status, OCT_DEV_CORE_OK);
+
+ cs = &core_setup[oct->octeon_id];
+
+ if (recv_pkt->buffer_size[0] != sizeof(*cs)) {
+ dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n",
+ (u32)sizeof(*cs),
+ recv_pkt->buffer_size[0]);
+ }
+
+ memcpy(cs, get_rbd(recv_pkt->buffer_ptr[0]), sizeof(*cs));
+ strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME);
+ strncpy(oct->boardinfo.serial_number, cs->board_serial_number,
+ OCT_SERIAL_LEN);
+
+ octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3));
+
+ oct->boardinfo.major = cs->board_rev_major;
+ oct->boardinfo.minor = cs->board_rev_minor;
+
+ dev_info(&oct->pci_dev->dev,
+ "Running %s (%llu Hz)\n",
+ app_name, CVM_CAST64(cs->corefreq));
+
+core_drv_init_err:
+ for (i = 0; i < recv_pkt->buffer_count; i++)
+ recv_buffer_free(recv_pkt->buffer_ptr[i]);
+ octeon_free_recv_info(recv_info);
+ return 0;
+}
+
+int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
+
+{
+ if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) &&
+ (oct->io_qmask.iq & (1UL << q_no)))
+ return oct->instr_queue[q_no]->max_count;
+
+ return -1;
+}
+
+int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
+{
+ if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) &&
+ (oct->io_qmask.oq & (1UL << q_no)))
+ return oct->droq[q_no]->max_count;
+ return -1;
+}
+
+/* Retruns the host firmware handshake OCTEON specific configuration */
+struct octeon_config *octeon_get_conf(struct octeon_device *oct)
+{
+ struct octeon_config *default_oct_conf = NULL;
+
+ /* check the OCTEON Device model & return the corresponding octeon
+ * configuration
+ */
+
+ if (OCTEON_CN6XXX(oct)) {
+ default_oct_conf =
+ (struct octeon_config *)(CHIP_FIELD(oct, cn6xxx, conf));
+ }
+
+ return default_oct_conf;
+}
+
+/* scratch register address is same in all the OCT-II and CN70XX models */
+#define CNXX_SLI_SCRATCH1 0x3C0
+
+/** Get the octeon device pointer.
+ * @param octeon_id - The id for which the octeon device pointer is required.
+ * @return Success: Octeon device pointer.
+ * @return Failure: NULL.
+ */
+struct octeon_device *lio_get_device(u32 octeon_id)
+{
+ if (octeon_id >= MAX_OCTEON_DEVICES)
+ return NULL;
+ else
+ return octeon_device[octeon_id];
+}
+
+u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
+{
+ u64 val64;
+ unsigned long flags;
+ u32 val32, addrhi;
+
+ spin_lock_irqsave(&oct->pci_win_lock, flags);
+
+ /* The windowed read happens when the LSB of the addr is written.
+ * So write MSB first
+ */
+ addrhi = (addr >> 32);
+ if ((oct->chip_id == OCTEON_CN66XX) || (oct->chip_id == OCTEON_CN68XX))
+ addrhi |= 0x00060000;
+ writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
+
+ /* Read back to preserve ordering of writes */
+ val32 = readl(oct->reg_list.pci_win_rd_addr_hi);
+
+ writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo);
+ val32 = readl(oct->reg_list.pci_win_rd_addr_lo);
+
+ val64 = readq(oct->reg_list.pci_win_rd_data);
+
+ spin_unlock_irqrestore(&oct->pci_win_lock, flags);
+
+ return val64;
+}
+
+void lio_pci_writeq(struct octeon_device *oct,
+ u64 val,
+ u64 addr)
+{
+ u32 val32;
+ unsigned long flags;
+
+ spin_lock_irqsave(&oct->pci_win_lock, flags);
+
+ writeq(addr, oct->reg_list.pci_win_wr_addr);
+
+ /* The write happens when the LSB is written. So write MSB first. */
+ writel(val >> 32, oct->reg_list.pci_win_wr_data_hi);
+ /* Read the MSB to ensure ordering of writes. */
+ val32 = readl(oct->reg_list.pci_win_wr_data_hi);
+
+ writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo);
+
+ spin_unlock_irqrestore(&oct->pci_win_lock, flags);
+}
+
+int octeon_mem_access_ok(struct octeon_device *oct)
+{
+ u64 access_okay = 0;
+
+ /* Check to make sure a DDR interface is enabled */
+ u64 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
+
+ access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
+
+ return access_okay ? 0 : 1;
+}
+
+int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
+{
+ int ret = 1;
+ u32 ms;
+
+ if (!timeout)
+ return ret;
+
+ while (*timeout == 0)
+ schedule_timeout_uninterruptible(HZ / 10);
+
+ for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout));
+ ms += HZ / 10) {
+ ret = octeon_mem_access_ok(oct);
+
+ /* wait 100 ms */
+ if (ret)
+ schedule_timeout_uninterruptible(HZ / 10);
+ }
+
+ return ret;
+}
+
+/** Get the octeon id assigned to the octeon device passed as argument.
+ * This function is exported to other modules.
+ * @param dev - octeon device pointer passed as a void *.
+ * @return octeon device id
+ */
+int lio_get_device_id(void *dev)
+{
+ struct octeon_device *octeon_dev = (struct octeon_device *)dev;
+ u32 i;
+
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++)
+ if (octeon_device[i] == octeon_dev)
+ return octeon_dev->octeon_id;
+ return -1;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
new file mode 100644
index 000000000..36e1f85df
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -0,0 +1,649 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file octeon_device.h
+ * \brief Host Driver: This file defines the octeon device structure.
+ */
+
+#ifndef _OCTEON_DEVICE_H_
+#define _OCTEON_DEVICE_H_
+
+/** PCI VendorId Device Id */
+#define OCTEON_CN68XX_PCIID 0x91177d
+#define OCTEON_CN66XX_PCIID 0x92177d
+
+/** Driver identifies chips by these Ids, created by clubbing together
+ * DeviceId+RevisionId; Where Revision Id is not used to distinguish
+ * between chips, a value of 0 is used for revision id.
+ */
+#define OCTEON_CN68XX 0x0091
+#define OCTEON_CN66XX 0x0092
+
+/** Endian-swap modes supported by Octeon. */
+enum octeon_pci_swap_mode {
+ OCTEON_PCI_PASSTHROUGH = 0,
+ OCTEON_PCI_64BIT_SWAP = 1,
+ OCTEON_PCI_32BIT_BYTE_SWAP = 2,
+ OCTEON_PCI_32BIT_LW_SWAP = 3
+};
+
+/*--------------- PCI BAR1 index registers -------------*/
+
+/* BAR1 Mask */
+#define PCI_BAR1_ENABLE_CA 1
+#define PCI_BAR1_ENDIAN_MODE OCTEON_PCI_64BIT_SWAP
+#define PCI_BAR1_ENTRY_VALID 1
+#define PCI_BAR1_MASK ((PCI_BAR1_ENABLE_CA << 3) \
+ | (PCI_BAR1_ENDIAN_MODE << 1) \
+ | PCI_BAR1_ENTRY_VALID)
+
+/** Octeon Device state.
+ * Each octeon device goes through each of these states
+ * as it is initialized.
+ */
+#define OCT_DEV_BEGIN_STATE 0x0
+#define OCT_DEV_PCI_MAP_DONE 0x1
+#define OCT_DEV_DISPATCH_INIT_DONE 0x2
+#define OCT_DEV_INSTR_QUEUE_INIT_DONE 0x3
+#define OCT_DEV_SC_BUFF_POOL_INIT_DONE 0x4
+#define OCT_DEV_RESP_LIST_INIT_DONE 0x5
+#define OCT_DEV_DROQ_INIT_DONE 0x6
+#define OCT_DEV_IO_QUEUES_DONE 0x7
+#define OCT_DEV_CONSOLE_INIT_DONE 0x8
+#define OCT_DEV_HOST_OK 0x9
+#define OCT_DEV_CORE_OK 0xa
+#define OCT_DEV_RUNNING 0xb
+#define OCT_DEV_IN_RESET 0xc
+#define OCT_DEV_STATE_INVALID 0xd
+
+#define OCT_DEV_STATES OCT_DEV_STATE_INVALID
+
+/** Octeon Device interrupts
+ * These interrupt bits are set in int_status filed of
+ * octeon_device structure
+ */
+#define OCT_DEV_INTR_DMA0_FORCE 0x01
+#define OCT_DEV_INTR_DMA1_FORCE 0x02
+#define OCT_DEV_INTR_PKT_DATA 0x04
+
+#define LIO_RESET_SECS (3)
+
+/*---------------------------DISPATCH LIST-------------------------------*/
+
+/** The dispatch list entry.
+ * The driver keeps a record of functions registered for each
+ * response header opcode in this structure. Since the opcode is
+ * hashed to index into the driver's list, more than one opcode
+ * can hash to the same entry, in which case the list field points
+ * to a linked list with the other entries.
+ */
+struct octeon_dispatch {
+ /** List head for this entry */
+ struct list_head list;
+
+ /** The opcode for which the dispatch function & arg should be used */
+ u16 opcode;
+
+ /** The function to be called for a packet received by the driver */
+ octeon_dispatch_fn_t dispatch_fn;
+
+ /* The application specified argument to be passed to the above
+ * function along with the received packet
+ */
+ void *arg;
+};
+
+/** The dispatch list structure. */
+struct octeon_dispatch_list {
+ /** access to dispatch list must be atomic */
+ spinlock_t lock;
+
+ /** Count of dispatch functions currently registered */
+ u32 count;
+
+ /** The list of dispatch functions */
+ struct octeon_dispatch *dlist;
+};
+
+/*----------------------- THE OCTEON DEVICE ---------------------------*/
+
+#define OCT_MEM_REGIONS 3
+/** PCI address space mapping information.
+ * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
+ * Octeon gets mapped to different physical address spaces in
+ * the kernel.
+ */
+struct octeon_mmio {
+ /** PCI address to which the BAR is mapped. */
+ u64 start;
+
+ /** Length of this PCI address space. */
+ u32 len;
+
+ /** Length that has been mapped to phys. address space. */
+ u32 mapped_len;
+
+ /** The physical address to which the PCI address space is mapped. */
+ u8 __iomem *hw_addr;
+
+ /** Flag indicating the mapping was successful. */
+ u32 done;
+};
+
+#define MAX_OCTEON_MAPS 32
+
+struct octeon_io_enable {
+ u32 iq;
+ u32 oq;
+ u32 iq64B;
+};
+
+struct octeon_reg_list {
+ u32 __iomem *pci_win_wr_addr_hi;
+ u32 __iomem *pci_win_wr_addr_lo;
+ u64 __iomem *pci_win_wr_addr;
+
+ u32 __iomem *pci_win_rd_addr_hi;
+ u32 __iomem *pci_win_rd_addr_lo;
+ u64 __iomem *pci_win_rd_addr;
+
+ u32 __iomem *pci_win_wr_data_hi;
+ u32 __iomem *pci_win_wr_data_lo;
+ u64 __iomem *pci_win_wr_data;
+
+ u32 __iomem *pci_win_rd_data_hi;
+ u32 __iomem *pci_win_rd_data_lo;
+ u64 __iomem *pci_win_rd_data;
+};
+
+#define OCTEON_CONSOLE_MAX_READ_BYTES 512
+struct octeon_console {
+ u32 active;
+ u32 waiting;
+ u64 addr;
+ u32 buffer_size;
+ u64 input_base_addr;
+ u64 output_base_addr;
+ char leftover[OCTEON_CONSOLE_MAX_READ_BYTES];
+};
+
+struct octeon_board_info {
+ char name[OCT_BOARD_NAME];
+ char serial_number[OCT_SERIAL_LEN];
+ u64 major;
+ u64 minor;
+};
+
+struct octeon_fn_list {
+ void (*setup_iq_regs)(struct octeon_device *, u32);
+ void (*setup_oq_regs)(struct octeon_device *, u32);
+
+ irqreturn_t (*process_interrupt_regs)(void *);
+ int (*soft_reset)(struct octeon_device *);
+ int (*setup_device_regs)(struct octeon_device *);
+ void (*reinit_regs)(struct octeon_device *);
+ void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int);
+ void (*bar1_idx_write)(struct octeon_device *, u32, u32);
+ u32 (*bar1_idx_read)(struct octeon_device *, u32);
+ u32 (*update_iq_read_idx)(struct octeon_device *,
+ struct octeon_instr_queue *);
+
+ void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32);
+ void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32);
+
+ void (*enable_interrupt)(void *);
+ void (*disable_interrupt)(void *);
+
+ void (*enable_io_queues)(struct octeon_device *);
+ void (*disable_io_queues)(struct octeon_device *);
+};
+
+/* Must be multiple of 8, changing breaks ABI */
+#define CVMX_BOOTMEM_NAME_LEN 128
+
+/* Structure for named memory blocks
+ * Number of descriptors
+ * available can be changed without affecting compatiblity,
+ * but name length changes require a bump in the bootmem
+ * descriptor version
+ * Note: This structure must be naturally 64 bit aligned, as a single
+ * memory image will be used by both 32 and 64 bit programs.
+ */
+struct cvmx_bootmem_named_block_desc {
+ /** Base address of named block */
+ u64 base_addr;
+
+ /** Size actually allocated for named block */
+ u64 size;
+
+ /** name of named block */
+ char name[CVMX_BOOTMEM_NAME_LEN];
+};
+
+struct oct_fw_info {
+ u32 max_nic_ports; /** max nic ports for the device */
+ u32 num_gmx_ports; /** num gmx ports */
+ u64 app_cap_flags; /** firmware cap flags */
+
+ /** The core application is running in this mode.
+ * See octeon-drv-opcodes.h for values.
+ */
+ u32 app_mode;
+ char liquidio_firmware_version[32];
+};
+
+/* wrappers around work structs */
+struct cavium_wk {
+ struct delayed_work work;
+ void *ctxptr;
+ size_t ctxul;
+};
+
+struct cavium_wq {
+ struct workqueue_struct *wq;
+ struct cavium_wk wk;
+};
+
+struct octdev_props {
+ /* Each interface in the Octeon device has a network
+ * device pointer (used for OS specific calls).
+ */
+ struct net_device *netdev;
+};
+
+/** The Octeon device.
+ * Each Octeon device has this structure to represent all its
+ * components.
+ */
+struct octeon_device {
+ /** Lock for PCI window configuration accesses */
+ spinlock_t pci_win_lock;
+
+ /** Lock for memory accesses */
+ spinlock_t mem_access_lock;
+
+ /** PCI device pointer */
+ struct pci_dev *pci_dev;
+
+ /** Chip specific information. */
+ void *chip;
+
+ /** Number of interfaces detected in this octeon device. */
+ u32 ifcount;
+
+ struct octdev_props props[MAX_OCTEON_LINKS];
+
+ /** Octeon Chip type. */
+ u16 chip_id;
+ u16 rev_id;
+
+ /** This device's id - set by the driver. */
+ u32 octeon_id;
+
+ /** This device's PCIe port used for traffic. */
+ u16 pcie_port;
+
+ u16 flags;
+#define LIO_FLAG_MSI_ENABLED (u32)(1 << 1)
+#define LIO_FLAG_MSIX_ENABLED (u32)(1 << 2)
+
+ /** The state of this device */
+ atomic_t status;
+
+ /** memory mapped io range */
+ struct octeon_mmio mmio[OCT_MEM_REGIONS];
+
+ struct octeon_reg_list reg_list;
+
+ struct octeon_fn_list fn_list;
+
+ struct octeon_board_info boardinfo;
+
+ u32 num_iqs;
+
+ /* The pool containing pre allocated buffers used for soft commands */
+ struct octeon_sc_buffer_pool sc_buf_pool;
+
+ /** The input instruction queues */
+ struct octeon_instr_queue *instr_queue[MAX_OCTEON_INSTR_QUEUES];
+
+ /** The doubly-linked list of instruction response */
+ struct octeon_response_list response_list[MAX_RESPONSE_LISTS];
+
+ u32 num_oqs;
+
+ /** The DROQ output queues */
+ struct octeon_droq *droq[MAX_OCTEON_OUTPUT_QUEUES];
+
+ struct octeon_io_enable io_qmask;
+
+ /** List of dispatch functions */
+ struct octeon_dispatch_list dispatch;
+
+ /* Interrupt Moderation */
+ struct oct_intrmod_cfg intrmod;
+
+ u32 int_status;
+
+ u64 droq_intr;
+
+ /** Physical location of the cvmx_bootmem_desc_t in octeon memory */
+ u64 bootmem_desc_addr;
+
+ /** Placeholder memory for named blocks.
+ * Assumes single-threaded access
+ */
+ struct cvmx_bootmem_named_block_desc bootmem_named_block_desc;
+
+ /** Address of consoles descriptor */
+ u64 console_desc_addr;
+
+ /** Number of consoles available. 0 means they are inaccessible */
+ u32 num_consoles;
+
+ /* Console caches */
+ struct octeon_console console[MAX_OCTEON_MAPS];
+
+ /* Coprocessor clock rate. */
+ u64 coproc_clock_rate;
+
+ /** The core application is running in this mode. See liquidio_common.h
+ * for values.
+ */
+ u32 app_mode;
+
+ struct oct_fw_info fw_info;
+
+ /** The name given to this device. */
+ char device_name[32];
+
+ /** Application Context */
+ void *app_ctx;
+
+ struct cavium_wq dma_comp_wq;
+
+ struct cavium_wq check_db_wq[MAX_OCTEON_INSTR_QUEUES];
+
+ struct cavium_wk nic_poll_work;
+
+ struct cavium_wk console_poll_work[MAX_OCTEON_MAPS];
+
+ void *priv;
+};
+
+#define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \
+ (oct->chip_id == OCTEON_CN68XX))
+#define CHIP_FIELD(oct, TYPE, field) \
+ (((struct octeon_ ## TYPE *)(oct->chip))->field)
+
+struct oct_intrmod_cmd {
+ struct octeon_device *oct_dev;
+ struct octeon_soft_command *sc;
+ struct oct_intrmod_cfg *cfg;
+};
+
+/*------------------ Function Prototypes ----------------------*/
+
+/** Initialize device list memory */
+void octeon_init_device_list(int conf_type);
+
+/** Free memory for Input and Output queue structures for a octeon device */
+void octeon_free_device_mem(struct octeon_device *);
+
+/* Look up a free entry in the octeon_device table and allocate resources
+ * for the octeon_device structure for an octeon device. Called at init
+ * time.
+ */
+struct octeon_device *octeon_allocate_device(u32 pci_id,
+ u32 priv_size);
+
+/** Initialize the driver's dispatch list which is a mix of a hash table
+ * and a linked list. This is done at driver load time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @return 0 on success, else -ve error value
+ */
+int octeon_init_dispatch_list(struct octeon_device *octeon_dev);
+
+/** Delete the driver's dispatch list and all registered entries.
+ * This is done at driver unload time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ */
+void octeon_delete_dispatch_list(struct octeon_device *octeon_dev);
+
+/** Initialize the core device fields with the info returned by the FW.
+ * @param recv_info - Receive info structure
+ * @param buf - Receive buffer
+ */
+int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf);
+
+/** Gets the dispatch function registered to receive packets with a
+ * given opcode/subcode.
+ * @param octeon_dev - the octeon device pointer.
+ * @param opcode - the opcode for which the dispatch function
+ * is to checked.
+ * @param subcode - the subcode for which the dispatch function
+ * is to checked.
+ *
+ * @return Success: octeon_dispatch_fn_t (dispatch function pointer)
+ * @return Failure: NULL
+ *
+ * Looks up the dispatch list to get the dispatch function for a
+ * given opcode.
+ */
+octeon_dispatch_fn_t
+octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
+ u16 subcode);
+
+/** Get the octeon device pointer.
+ * @param octeon_id - The id for which the octeon device pointer is required.
+ * @return Success: Octeon device pointer.
+ * @return Failure: NULL.
+ */
+struct octeon_device *lio_get_device(u32 octeon_id);
+
+/** Get the octeon id assigned to the octeon device passed as argument.
+ * This function is exported to other modules.
+ * @param dev - octeon device pointer passed as a void *.
+ * @return octeon device id
+ */
+int lio_get_device_id(void *dev);
+
+static inline u16 OCTEON_MAJOR_REV(struct octeon_device *oct)
+{
+ u16 rev = (oct->rev_id & 0xC) >> 2;
+
+ return (rev == 0) ? 1 : rev;
+}
+
+static inline u16 OCTEON_MINOR_REV(struct octeon_device *oct)
+{
+ return oct->rev_id & 0x3;
+}
+
+/** Read windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param addr - Address of the register to read.
+ *
+ * This routine is called to read from the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return - 64 bit value read from the register.
+ */
+
+u64 lio_pci_readq(struct octeon_device *oct, u64 addr);
+
+/** Write windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param val - Value to write
+ * @param addr - Address of the register to write
+ *
+ * This routine is called to write to the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return Nothing.
+ */
+void lio_pci_writeq(struct octeon_device *oct, u64 val, u64 addr);
+
+/* Routines for reading and writing CSRs */
+#define octeon_write_csr(oct_dev, reg_off, value) \
+ writel(value, oct_dev->mmio[0].hw_addr + reg_off)
+
+#define octeon_write_csr64(oct_dev, reg_off, val64) \
+ writeq(val64, oct_dev->mmio[0].hw_addr + reg_off)
+
+#define octeon_read_csr(oct_dev, reg_off) \
+ readl(oct_dev->mmio[0].hw_addr + reg_off)
+
+#define octeon_read_csr64(oct_dev, reg_off) \
+ readq(oct_dev->mmio[0].hw_addr + reg_off)
+
+/**
+ * Checks if memory access is okay
+ *
+ * @param oct which octeon to send to
+ * @return Zero on success, negative on failure.
+ */
+int octeon_mem_access_ok(struct octeon_device *oct);
+
+/**
+ * Waits for DDR initialization.
+ *
+ * @param oct which octeon to send to
+ * @param timeout_in_ms pointer to how long to wait until DDR is initialized
+ * in ms.
+ * If contents are 0, it waits until contents are non-zero
+ * before starting to check.
+ * @return Zero on success, negative on failure.
+ */
+int octeon_wait_for_ddr_init(struct octeon_device *oct,
+ u32 *timeout_in_ms);
+
+/**
+ * Wait for u-boot to boot and be waiting for a command.
+ *
+ * @param wait_time_hundredths
+ * Maximum time to wait
+ *
+ * @return Zero on success, negative on failure.
+ */
+int octeon_wait_for_bootloader(struct octeon_device *oct,
+ u32 wait_time_hundredths);
+
+/**
+ * Initialize console access
+ *
+ * @param oct which octeon initialize
+ * @return Zero on success, negative on failure.
+ */
+int octeon_init_consoles(struct octeon_device *oct);
+
+/**
+ * Adds access to a console to the device.
+ *
+ * @param oct which octeon to add to
+ * @param console_num which console
+ * @return Zero on success, negative on failure.
+ */
+int octeon_add_console(struct octeon_device *oct, u32 console_num);
+
+/** write or read from a console */
+int octeon_console_write(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 write_request_size, u32 flags);
+int octeon_console_write_avail(struct octeon_device *oct, u32 console_num);
+int octeon_console_read(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 buf_size, u32 flags);
+int octeon_console_read_avail(struct octeon_device *oct, u32 console_num);
+
+/** Removes all attached consoles. */
+void octeon_remove_consoles(struct octeon_device *oct);
+
+/**
+ * Send a string to u-boot on console 0 as a command.
+ *
+ * @param oct which octeon to send to
+ * @param cmd_str String to send
+ * @param wait_hundredths Time to wait for u-boot to accept the command.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str,
+ u32 wait_hundredths);
+
+/** Parses, validates, and downloads firmware, then boots associated cores.
+ * @param oct which octeon to download firmware to
+ * @param data - The complete firmware file image
+ * @param size - The size of the data
+ *
+ * @return 0 if success.
+ * -EINVAL if file is incompatible or badly formatted.
+ * -ENODEV if no handler was found for the application type or an
+ * invalid octeon id was passed.
+ */
+int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
+ size_t size);
+
+char *lio_get_state_string(atomic_t *state_ptr);
+
+/** Sets up instruction queues for the device
+ * @param oct which octeon to setup
+ *
+ * @return 0 if success. 1 if fails
+ */
+int octeon_setup_instr_queues(struct octeon_device *oct);
+
+/** Sets up output queues for the device
+ * @param oct which octeon to setup
+ *
+ * @return 0 if success. 1 if fails
+ */
+int octeon_setup_output_queues(struct octeon_device *oct);
+
+int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no);
+
+int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no);
+
+/** Turns off the input and output queues for the device
+ * @param oct which octeon to disable
+ */
+void octeon_set_io_queues_off(struct octeon_device *oct);
+
+/** Turns on or off the given output queue for the device
+ * @param oct which octeon to change
+ * @param q_no which queue
+ * @param enable 1 to enable, 0 to disable
+ */
+void octeon_set_droq_pkt_op(struct octeon_device *oct, u32 q_no, u32 enable);
+
+/** Retrieve the config for the device
+ * @param oct which octeon
+ * @param card_type type of card
+ *
+ * @returns pointer to configuration
+ */
+void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
+
+/** Gets the octeon device configuration
+ * @return - pointer to the octeon configuration struture
+ */
+struct octeon_config *octeon_get_conf(struct octeon_device *oct);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
new file mode 100644
index 000000000..4dba86eaa
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -0,0 +1,987 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+/* #define CAVIUM_ONLY_PERF_MODE */
+
+#define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2))
+#define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2))
+
+struct niclist {
+ struct list_head list;
+ void *ptr;
+};
+
+struct __dispatch {
+ struct list_head list;
+ struct octeon_recv_info *rinfo;
+ octeon_dispatch_fn_t disp_fn;
+};
+
+/** Get the argument that the user set when registering dispatch
+ * function for a given opcode/subcode.
+ * @param octeon_dev - the octeon device pointer.
+ * @param opcode - the opcode for which the dispatch argument
+ * is to be checked.
+ * @param subcode - the subcode for which the dispatch argument
+ * is to be checked.
+ * @return Success: void * (argument to the dispatch function)
+ * @return Failure: NULL
+ *
+ */
+static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
+ u16 opcode, u16 subcode)
+{
+ int idx;
+ struct list_head *dispatch;
+ void *fn_arg = NULL;
+ u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+ idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+ spin_lock_bh(&octeon_dev->dispatch.lock);
+
+ if (octeon_dev->dispatch.count == 0) {
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return NULL;
+ }
+
+ if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
+ fn_arg = octeon_dev->dispatch.dlist[idx].arg;
+ } else {
+ list_for_each(dispatch,
+ &octeon_dev->dispatch.dlist[idx].list) {
+ if (((struct octeon_dispatch *)dispatch)->opcode ==
+ combined_opcode) {
+ fn_arg = ((struct octeon_dispatch *)
+ dispatch)->arg;
+ break;
+ }
+ }
+ }
+
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return fn_arg;
+}
+
+u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ u32 pkt_count = 0;
+
+ pkt_count = readl(droq->pkts_sent_reg);
+ if (pkt_count) {
+ atomic_add(pkt_count, &droq->pkts_pending);
+ writel(pkt_count, droq->pkts_sent_reg);
+ }
+
+ return pkt_count;
+}
+
+static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
+{
+ u32 count = 0;
+
+ /* max_empty_descs is the max. no. of descs that can have no buffers.
+ * If the empty desc count goes beyond this value, we cannot safely
+ * read in a 64K packet sent by Octeon
+ * (64K is max pkt size from Octeon)
+ */
+ droq->max_empty_descs = 0;
+
+ do {
+ droq->max_empty_descs++;
+ count += droq->buffer_size;
+ } while (count < (64 * 1024));
+
+ droq->max_empty_descs = droq->max_count - droq->max_empty_descs;
+}
+
+static void octeon_droq_reset_indices(struct octeon_droq *droq)
+{
+ droq->read_idx = 0;
+ droq->write_idx = 0;
+ droq->refill_idx = 0;
+ droq->refill_count = 0;
+ atomic_set(&droq->pkts_pending, 0);
+}
+
+static void
+octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ u32 i;
+
+ for (i = 0; i < droq->max_count; i++) {
+ if (droq->recv_buf_list[i].buffer) {
+ if (droq->desc_ring) {
+ lio_unmap_ring_info(oct->pci_dev,
+ (u64)droq->
+ desc_ring[i].info_ptr,
+ OCT_DROQ_INFO_SIZE);
+ lio_unmap_ring(oct->pci_dev,
+ (u64)droq->desc_ring[i].
+ buffer_ptr,
+ droq->buffer_size);
+ }
+ recv_buffer_free(droq->recv_buf_list[i].buffer);
+ droq->recv_buf_list[i].buffer = NULL;
+ }
+ }
+
+ octeon_droq_reset_indices(droq);
+}
+
+static int
+octeon_droq_setup_ring_buffers(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ u32 i;
+ void *buf;
+ struct octeon_droq_desc *desc_ring = droq->desc_ring;
+
+ for (i = 0; i < droq->max_count; i++) {
+ buf = recv_buffer_alloc(oct, droq->q_no, droq->buffer_size);
+
+ if (!buf) {
+ dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ droq->recv_buf_list[i].buffer = buf;
+ droq->recv_buf_list[i].data = get_rbd(buf);
+
+ droq->info_list[i].length = 0;
+
+ /* map ring buffers into memory */
+ desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
+ desc_ring[i].buffer_ptr =
+ lio_map_ring(oct->pci_dev,
+ droq->recv_buf_list[i].buffer,
+ droq->buffer_size);
+ }
+
+ octeon_droq_reset_indices(droq);
+
+ octeon_droq_compute_max_packet_bufs(droq);
+
+ return 0;
+}
+
+int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
+{
+ struct octeon_droq *droq = oct->droq[q_no];
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
+
+ octeon_droq_destroy_ring_buffers(oct, droq);
+ vfree(droq->recv_buf_list);
+
+ if (droq->info_base_addr)
+ cnnic_free_aligned_dma(oct->pci_dev, droq->info_list,
+ droq->info_alloc_size,
+ droq->info_base_addr,
+ droq->info_list_dma);
+
+ if (droq->desc_ring)
+ lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
+ droq->desc_ring, droq->desc_ring_dma);
+
+ memset(droq, 0, OCT_DROQ_SIZE);
+
+ return 0;
+}
+
+int octeon_init_droq(struct octeon_device *oct,
+ u32 q_no,
+ u32 num_descs,
+ u32 desc_size,
+ void *app_ctx)
+{
+ struct octeon_droq *droq;
+ u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
+ u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
+
+ droq = oct->droq[q_no];
+ memset(droq, 0, OCT_DROQ_SIZE);
+
+ droq->oct_dev = oct;
+ droq->q_no = q_no;
+ if (app_ctx)
+ droq->app_ctx = app_ctx;
+ else
+ droq->app_ctx = (void *)(size_t)q_no;
+
+ c_num_descs = num_descs;
+ c_buf_size = desc_size;
+ if (OCTEON_CN6XXX(oct)) {
+ struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
+
+ c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
+ c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
+ }
+
+ droq->max_count = c_num_descs;
+ droq->buffer_size = c_buf_size;
+
+ desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
+ droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
+ (dma_addr_t *)&droq->desc_ring_dma);
+
+ if (!droq->desc_ring) {
+ dev_err(&oct->pci_dev->dev,
+ "Output queue %d ring alloc failed\n", q_no);
+ return 1;
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
+ q_no, droq->desc_ring, droq->desc_ring_dma);
+ dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
+ droq->max_count);
+
+ droq->info_list =
+ cnnic_alloc_aligned_dma(oct->pci_dev,
+ (droq->max_count * OCT_DROQ_INFO_SIZE),
+ &droq->info_alloc_size,
+ &droq->info_base_addr,
+ &droq->info_list_dma);
+
+ if (!droq->info_list) {
+ dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
+ lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
+ droq->desc_ring, droq->desc_ring_dma);
+ return 1;
+ }
+
+ droq->recv_buf_list = (struct octeon_recv_buffer *)
+ vmalloc(droq->max_count *
+ OCT_DROQ_RECVBUF_SIZE);
+ if (!droq->recv_buf_list) {
+ dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
+ goto init_droq_fail;
+ }
+
+ if (octeon_droq_setup_ring_buffers(oct, droq))
+ goto init_droq_fail;
+
+ droq->pkts_per_intr = c_pkts_per_intr;
+ droq->refill_threshold = c_refill_threshold;
+
+ dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n",
+ droq->max_empty_descs);
+
+ spin_lock_init(&droq->lock);
+
+ INIT_LIST_HEAD(&droq->dispatch_list);
+
+ /* For 56xx Pass1, this function won't be called, so no checks. */
+ oct->fn_list.setup_oq_regs(oct, q_no);
+
+ oct->io_qmask.oq |= (1 << q_no);
+
+ return 0;
+
+init_droq_fail:
+ octeon_delete_droq(oct, q_no);
+ return 1;
+}
+
+/* octeon_create_recv_info
+ * Parameters:
+ * octeon_dev - pointer to the octeon device structure
+ * droq - droq in which the packet arrived.
+ * buf_cnt - no. of buffers used by the packet.
+ * idx - index in the descriptor for the first buffer in the packet.
+ * Description:
+ * Allocates a recv_info_t and copies the buffer addresses for packet data
+ * into the recv_pkt space which starts at an 8B offset from recv_info_t.
+ * Flags the descriptors for refill later. If available descriptors go
+ * below the threshold to receive a 64K pkt, new buffers are first allocated
+ * before the recv_pkt_t is created.
+ * This routine will be called in interrupt context.
+ * Returns:
+ * Success: Pointer to recv_info_t
+ * Failure: NULL.
+ * Locks:
+ * The droq->lock is held when this routine is called.
+ */
+static inline struct octeon_recv_info *octeon_create_recv_info(
+ struct octeon_device *octeon_dev,
+ struct octeon_droq *droq,
+ u32 buf_cnt,
+ u32 idx)
+{
+ struct octeon_droq_info *info;
+ struct octeon_recv_pkt *recv_pkt;
+ struct octeon_recv_info *recv_info;
+ u32 i, bytes_left;
+
+ info = &droq->info_list[idx];
+
+ recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch));
+ if (!recv_info)
+ return NULL;
+
+ recv_pkt = recv_info->recv_pkt;
+ recv_pkt->rh = info->rh;
+ recv_pkt->length = (u32)info->length;
+ recv_pkt->buffer_count = (u16)buf_cnt;
+ recv_pkt->octeon_id = (u16)octeon_dev->octeon_id;
+
+ i = 0;
+ bytes_left = (u32)info->length;
+
+ while (buf_cnt) {
+ lio_unmap_ring(octeon_dev->pci_dev,
+ (u64)droq->desc_ring[idx].buffer_ptr,
+ droq->buffer_size);
+
+ recv_pkt->buffer_size[i] =
+ (bytes_left >=
+ droq->buffer_size) ? droq->buffer_size : bytes_left;
+
+ recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer;
+ droq->recv_buf_list[idx].buffer = NULL;
+
+ INCR_INDEX_BY1(idx, droq->max_count);
+ bytes_left -= droq->buffer_size;
+ i++;
+ buf_cnt--;
+ }
+
+ return recv_info;
+}
+
+/* If we were not able to refill all buffers, try to move around
+ * the buffers that were not dispatched.
+ */
+static inline u32
+octeon_droq_refill_pullup_descs(struct octeon_droq *droq,
+ struct octeon_droq_desc *desc_ring)
+{
+ u32 desc_refilled = 0;
+
+ u32 refill_index = droq->refill_idx;
+
+ while (refill_index != droq->read_idx) {
+ if (droq->recv_buf_list[refill_index].buffer) {
+ droq->recv_buf_list[droq->refill_idx].buffer =
+ droq->recv_buf_list[refill_index].buffer;
+ droq->recv_buf_list[droq->refill_idx].data =
+ droq->recv_buf_list[refill_index].data;
+ desc_ring[droq->refill_idx].buffer_ptr =
+ desc_ring[refill_index].buffer_ptr;
+ droq->recv_buf_list[refill_index].buffer = NULL;
+ desc_ring[refill_index].buffer_ptr = 0;
+ do {
+ INCR_INDEX_BY1(droq->refill_idx,
+ droq->max_count);
+ desc_refilled++;
+ droq->refill_count--;
+ } while (droq->recv_buf_list[droq->refill_idx].
+ buffer);
+ }
+ INCR_INDEX_BY1(refill_index, droq->max_count);
+ } /* while */
+ return desc_refilled;
+}
+
+/* octeon_droq_refill
+ * Parameters:
+ * droq - droq in which descriptors require new buffers.
+ * Description:
+ * Called during normal DROQ processing in interrupt mode or by the poll
+ * thread to refill the descriptors from which buffers were dispatched
+ * to upper layers. Attempts to allocate new buffers. If that fails, moves
+ * up buffers (that were not dispatched) to form a contiguous ring.
+ * Returns:
+ * No of descriptors refilled.
+ * Locks:
+ * This routine is called with droq->lock held.
+ */
+static u32
+octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
+{
+ struct octeon_droq_desc *desc_ring;
+ void *buf = NULL;
+ u8 *data;
+ u32 desc_refilled = 0;
+
+ desc_ring = droq->desc_ring;
+
+ while (droq->refill_count && (desc_refilled < droq->max_count)) {
+ /* If a valid buffer exists (happens if there is no dispatch),
+ * reuse
+ * the buffer, else allocate.
+ */
+ if (!droq->recv_buf_list[droq->refill_idx].buffer) {
+ buf = recv_buffer_alloc(octeon_dev, droq->q_no,
+ droq->buffer_size);
+ /* If a buffer could not be allocated, no point in
+ * continuing
+ */
+ if (!buf)
+ break;
+ droq->recv_buf_list[droq->refill_idx].buffer =
+ buf;
+ data = get_rbd(buf);
+ } else {
+ data = get_rbd(droq->recv_buf_list
+ [droq->refill_idx].buffer);
+ }
+
+ droq->recv_buf_list[droq->refill_idx].data = data;
+
+ desc_ring[droq->refill_idx].buffer_ptr =
+ lio_map_ring(octeon_dev->pci_dev,
+ droq->recv_buf_list[droq->
+ refill_idx].buffer,
+ droq->buffer_size);
+
+ /* Reset any previous values in the length field. */
+ droq->info_list[droq->refill_idx].length = 0;
+
+ INCR_INDEX_BY1(droq->refill_idx, droq->max_count);
+ desc_refilled++;
+ droq->refill_count--;
+ }
+
+ if (droq->refill_count)
+ desc_refilled +=
+ octeon_droq_refill_pullup_descs(droq, desc_ring);
+
+ /* if droq->refill_count
+ * The refill count would not change in pass two. We only moved buffers
+ * to close the gap in the ring, but we would still have the same no. of
+ * buffers to refill.
+ */
+ return desc_refilled;
+}
+
+static inline u32
+octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
+{
+ u32 buf_cnt = 0;
+
+ while (total_len > (buf_size * buf_cnt))
+ buf_cnt++;
+ return buf_cnt;
+}
+
+static int
+octeon_droq_dispatch_pkt(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ union octeon_rh *rh,
+ struct octeon_droq_info *info)
+{
+ u32 cnt;
+ octeon_dispatch_fn_t disp_fn;
+ struct octeon_recv_info *rinfo;
+
+ cnt = octeon_droq_get_bufcount(droq->buffer_size, (u32)info->length);
+
+ disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode,
+ (u16)rh->r.subcode);
+ if (disp_fn) {
+ rinfo = octeon_create_recv_info(oct, droq, cnt, droq->read_idx);
+ if (rinfo) {
+ struct __dispatch *rdisp = rinfo->rsvd;
+
+ rdisp->rinfo = rinfo;
+ rdisp->disp_fn = disp_fn;
+ rinfo->recv_pkt->rh = *rh;
+ list_add_tail(&rdisp->list,
+ &droq->dispatch_list);
+ } else {
+ droq->stats.dropped_nomem++;
+ }
+ } else {
+ dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function\n");
+ droq->stats.dropped_nodispatch++;
+ } /* else (dispatch_fn ... */
+
+ return cnt;
+}
+
+static inline void octeon_droq_drop_packets(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 cnt)
+{
+ u32 i = 0, buf_cnt;
+ struct octeon_droq_info *info;
+
+ for (i = 0; i < cnt; i++) {
+ info = &droq->info_list[droq->read_idx];
+ octeon_swap_8B_data((u64 *)info, 2);
+
+ if (info->length) {
+ info->length -= OCT_RH_SIZE;
+ droq->stats.bytes_received += info->length;
+ buf_cnt = octeon_droq_get_bufcount(droq->buffer_size,
+ (u32)info->length);
+ } else {
+ dev_err(&oct->pci_dev->dev, "DROQ: In drop: pkt with len 0\n");
+ buf_cnt = 1;
+ }
+
+ INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count);
+ droq->refill_count += buf_cnt;
+ }
+}
+
+static u32
+octeon_droq_fast_process_packets(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 pkts_to_process)
+{
+ struct octeon_droq_info *info;
+ union octeon_rh *rh;
+ u32 pkt, total_len = 0, pkt_count;
+
+ pkt_count = pkts_to_process;
+
+ for (pkt = 0; pkt < pkt_count; pkt++) {
+ u32 pkt_len = 0;
+ struct sk_buff *nicbuf = NULL;
+
+ info = &droq->info_list[droq->read_idx];
+ octeon_swap_8B_data((u64 *)info, 2);
+
+ if (!info->length) {
+ dev_err(&oct->pci_dev->dev,
+ "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
+ droq->q_no, droq->read_idx, pkt_count);
+ print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS,
+ (u8 *)info,
+ OCT_DROQ_INFO_SIZE);
+ break;
+ }
+
+ /* Len of resp hdr in included in the received data len. */
+ info->length -= OCT_RH_SIZE;
+ rh = &info->rh;
+
+ total_len += (u32)info->length;
+
+ if (OPCODE_SLOW_PATH(rh)) {
+ u32 buf_cnt;
+
+ buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info);
+ INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count);
+ droq->refill_count += buf_cnt;
+ } else {
+ if (info->length <= droq->buffer_size) {
+ lio_unmap_ring(oct->pci_dev,
+ (u64)droq->desc_ring[
+ droq->read_idx].buffer_ptr,
+ droq->buffer_size);
+ pkt_len = (u32)info->length;
+ nicbuf = droq->recv_buf_list[
+ droq->read_idx].buffer;
+ droq->recv_buf_list[droq->read_idx].buffer =
+ NULL;
+ INCR_INDEX_BY1(droq->read_idx, droq->max_count);
+ skb_put(nicbuf, pkt_len);
+ droq->refill_count++;
+ } else {
+ nicbuf = octeon_fast_packet_alloc(oct, droq,
+ droq->q_no,
+ (u32)
+ info->length);
+ pkt_len = 0;
+ /* nicbuf allocation can fail. We'll handle it
+ * inside the loop.
+ */
+ while (pkt_len < info->length) {
+ int cpy_len;
+
+ cpy_len = ((pkt_len +
+ droq->buffer_size) >
+ info->length) ?
+ ((u32)info->length - pkt_len) :
+ droq->buffer_size;
+
+ if (nicbuf) {
+ lio_unmap_ring(oct->pci_dev,
+ (u64)
+ droq->desc_ring
+ [droq->read_idx].
+ buffer_ptr,
+ droq->
+ buffer_size);
+ octeon_fast_packet_next(droq,
+ nicbuf,
+ cpy_len,
+ droq->
+ read_idx
+ );
+ }
+
+ pkt_len += cpy_len;
+ INCR_INDEX_BY1(droq->read_idx,
+ droq->max_count);
+ droq->refill_count++;
+ }
+ }
+
+ if (nicbuf) {
+ if (droq->ops.fptr)
+ droq->ops.fptr(oct->octeon_id,
+ nicbuf, pkt_len,
+ rh, &droq->napi);
+ else
+ recv_buffer_free(nicbuf);
+ }
+ }
+
+ if (droq->refill_count >= droq->refill_threshold) {
+ int desc_refilled = octeon_droq_refill(oct, droq);
+
+ /* Flush the droq descriptor data to memory to be sure
+ * that when we update the credits the data in memory
+ * is accurate.
+ */
+ wmb();
+ writel((desc_refilled), droq->pkts_credit_reg);
+ /* make sure mmio write completes */
+ mmiowb();
+ }
+
+ } /* for ( each packet )... */
+
+ /* Increment refill_count by the number of buffers processed. */
+ droq->stats.pkts_received += pkt;
+ droq->stats.bytes_received += total_len;
+
+ if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) {
+ octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt));
+
+ droq->stats.dropped_toomany += (pkts_to_process - pkt);
+ return pkts_to_process;
+ }
+
+ return pkt;
+}
+
+int
+octeon_droq_process_packets(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 budget)
+{
+ u32 pkt_count = 0, pkts_processed = 0;
+ struct list_head *tmp, *tmp2;
+
+ pkt_count = atomic_read(&droq->pkts_pending);
+ if (!pkt_count)
+ return 0;
+
+ if (pkt_count > budget)
+ pkt_count = budget;
+
+ /* Grab the lock */
+ spin_lock(&droq->lock);
+
+ pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
+
+ atomic_sub(pkts_processed, &droq->pkts_pending);
+
+ /* Release the spin lock */
+ spin_unlock(&droq->lock);
+
+ list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
+ struct __dispatch *rdisp = (struct __dispatch *)tmp;
+
+ list_del(tmp);
+ rdisp->disp_fn(rdisp->rinfo,
+ octeon_get_dispatch_arg
+ (oct,
+ (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
+ (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
+ }
+
+ /* If there are packets pending. schedule tasklet again */
+ if (atomic_read(&droq->pkts_pending))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Utility function to poll for packets. check_hw_for_packets must be
+ * called before calling this routine.
+ */
+
+static int
+octeon_droq_process_poll_pkts(struct octeon_device *oct,
+ struct octeon_droq *droq, u32 budget)
+{
+ struct list_head *tmp, *tmp2;
+ u32 pkts_available = 0, pkts_processed = 0;
+ u32 total_pkts_processed = 0;
+
+ if (budget > droq->max_count)
+ budget = droq->max_count;
+
+ spin_lock(&droq->lock);
+
+ while (total_pkts_processed < budget) {
+ pkts_available =
+ CVM_MIN((budget - total_pkts_processed),
+ (u32)(atomic_read(&droq->pkts_pending)));
+
+ if (pkts_available == 0)
+ break;
+
+ pkts_processed =
+ octeon_droq_fast_process_packets(oct, droq,
+ pkts_available);
+
+ atomic_sub(pkts_processed, &droq->pkts_pending);
+
+ total_pkts_processed += pkts_processed;
+
+ octeon_droq_check_hw_for_pkts(oct, droq);
+ }
+
+ spin_unlock(&droq->lock);
+
+ list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
+ struct __dispatch *rdisp = (struct __dispatch *)tmp;
+
+ list_del(tmp);
+ rdisp->disp_fn(rdisp->rinfo,
+ octeon_get_dispatch_arg
+ (oct,
+ (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
+ (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
+ }
+
+ return total_pkts_processed;
+}
+
+int
+octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd,
+ u32 arg)
+{
+ struct octeon_droq *droq;
+ struct octeon_config *oct_cfg = NULL;
+
+ oct_cfg = octeon_get_conf(oct);
+
+ if (!oct_cfg)
+ return -EINVAL;
+
+ if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
+ dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
+ __func__, q_no, (oct->num_oqs - 1));
+ return -EINVAL;
+ }
+
+ droq = oct->droq[q_no];
+
+ if (cmd == POLL_EVENT_PROCESS_PKTS)
+ return octeon_droq_process_poll_pkts(oct, droq, arg);
+
+ if (cmd == POLL_EVENT_PENDING_PKTS) {
+ u32 pkt_cnt = atomic_read(&droq->pkts_pending);
+
+ return octeon_droq_process_packets(oct, droq, pkt_cnt);
+ }
+
+ if (cmd == POLL_EVENT_ENABLE_INTR) {
+ u32 value;
+ unsigned long flags;
+
+ /* Enable Pkt Interrupt */
+ switch (oct->chip_id) {
+ case OCTEON_CN66XX:
+ case OCTEON_CN68XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+ spin_lock_irqsave
+ (&cn6xxx->lock_for_droq_int_enb_reg, flags);
+ value =
+ octeon_read_csr(oct,
+ CN6XXX_SLI_PKT_TIME_INT_ENB);
+ value |= (1 << q_no);
+ octeon_write_csr(oct,
+ CN6XXX_SLI_PKT_TIME_INT_ENB,
+ value);
+ value =
+ octeon_read_csr(oct,
+ CN6XXX_SLI_PKT_CNT_INT_ENB);
+ value |= (1 << q_no);
+ octeon_write_csr(oct,
+ CN6XXX_SLI_PKT_CNT_INT_ENB,
+ value);
+
+ /* don't bother flushing the enables */
+
+ spin_unlock_irqrestore
+ (&cn6xxx->lock_for_droq_int_enb_reg, flags);
+ return 0;
+ }
+ break;
+ }
+
+ return 0;
+ }
+
+ dev_err(&oct->pci_dev->dev, "%s Unknown command: %d\n", __func__, cmd);
+ return -EINVAL;
+}
+
+int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no,
+ struct octeon_droq_ops *ops)
+{
+ struct octeon_droq *droq;
+ unsigned long flags;
+ struct octeon_config *oct_cfg = NULL;
+
+ oct_cfg = octeon_get_conf(oct);
+
+ if (!oct_cfg)
+ return -EINVAL;
+
+ if (!(ops)) {
+ dev_err(&oct->pci_dev->dev, "%s: droq_ops pointer is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
+ dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
+ __func__, q_no, (oct->num_oqs - 1));
+ return -EINVAL;
+ }
+
+ droq = oct->droq[q_no];
+
+ spin_lock_irqsave(&droq->lock, flags);
+
+ memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops));
+
+ spin_unlock_irqrestore(&droq->lock, flags);
+
+ return 0;
+}
+
+int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
+{
+ unsigned long flags;
+ struct octeon_droq *droq;
+ struct octeon_config *oct_cfg = NULL;
+
+ oct_cfg = octeon_get_conf(oct);
+
+ if (!oct_cfg)
+ return -EINVAL;
+
+ if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
+ dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
+ __func__, q_no, oct->num_oqs - 1);
+ return -EINVAL;
+ }
+
+ droq = oct->droq[q_no];
+
+ if (!droq) {
+ dev_info(&oct->pci_dev->dev,
+ "Droq id (%d) not available.\n", q_no);
+ return 0;
+ }
+
+ spin_lock_irqsave(&droq->lock, flags);
+
+ droq->ops.fptr = NULL;
+ droq->ops.drop_on_max = 0;
+
+ spin_unlock_irqrestore(&droq->lock, flags);
+
+ return 0;
+}
+
+int octeon_create_droq(struct octeon_device *oct,
+ u32 q_no, u32 num_descs,
+ u32 desc_size, void *app_ctx)
+{
+ struct octeon_droq *droq;
+
+ if (oct->droq[q_no]) {
+ dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
+ q_no);
+ return 1;
+ }
+
+ /* Allocate the DS for the new droq. */
+ droq = vmalloc(sizeof(*droq));
+ if (!droq)
+ goto create_droq_fail;
+ memset(droq, 0, sizeof(struct octeon_droq));
+
+ /*Disable the pkt o/p for this Q */
+ octeon_set_droq_pkt_op(oct, q_no, 0);
+ oct->droq[q_no] = droq;
+
+ /* Initialize the Droq */
+ octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx);
+
+ oct->num_oqs++;
+
+ dev_dbg(&oct->pci_dev->dev, "%s: Total number of OQ: %d\n", __func__,
+ oct->num_oqs);
+
+ /* Global Droq register settings */
+
+ /* As of now not required, as setting are done for all 32 Droqs at
+ * the same time.
+ */
+ return 0;
+
+create_droq_fail:
+ octeon_delete_droq(oct, q_no);
+ return -1;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
new file mode 100644
index 000000000..7940ccee1
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -0,0 +1,426 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_droq.h
+ * \brief Implementation of Octeon Output queues. "Output" is with
+ * respect to the Octeon device on the NIC. From this driver's point of
+ * view they are ingress queues.
+ */
+
+#ifndef __OCTEON_DROQ_H__
+#define __OCTEON_DROQ_H__
+
+/* Default number of packets that will be processed in one iteration. */
+#define MAX_PACKET_BUDGET 0xFFFFFFFF
+
+/** Octeon descriptor format.
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ * -# Physical (bus) address of the data buffer.
+ * -# Physical (bus) address of a octeon_droq_info structure.
+ * The Octeon device DMA's incoming packets and its information at the address
+ * given by these descriptor fields.
+ */
+struct octeon_droq_desc {
+ /** The buffer pointer */
+ u64 buffer_ptr;
+
+ /** The Info pointer */
+ u64 info_ptr;
+};
+
+#define OCT_DROQ_DESC_SIZE (sizeof(struct octeon_droq_desc))
+
+/** Information about packet DMA'ed by Octeon.
+ * The format of the information available at Info Pointer after Octeon
+ * has posted a packet. Not all descriptors have valid information. Only
+ * the Info field of the first descriptor for a packet has information
+ * about the packet.
+ */
+struct octeon_droq_info {
+ /** The Output Receive Header. */
+ union octeon_rh rh;
+
+ /** The Length of the packet. */
+ u64 length;
+};
+
+#define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info))
+
+/** Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers.
+*/
+struct octeon_recv_buffer {
+ /** Packet buffer, including metadata. */
+ void *buffer;
+
+ /** Data in the packet buffer. */
+ u8 *data;
+};
+
+#define OCT_DROQ_RECVBUF_SIZE (sizeof(struct octeon_recv_buffer))
+
+/** Output Queue statistics. Each output queue has four stats fields. */
+struct oct_droq_stats {
+ /** Number of packets received in this queue. */
+ u64 pkts_received;
+
+ /** Bytes received by this queue. */
+ u64 bytes_received;
+
+ /** Packets dropped due to no dispatch function. */
+ u64 dropped_nodispatch;
+
+ /** Packets dropped due to no memory available. */
+ u64 dropped_nomem;
+
+ /** Packets dropped due to large number of pkts to process. */
+ u64 dropped_toomany;
+
+ /** Number of packets sent to stack from this queue. */
+ u64 rx_pkts_received;
+
+ /** Number of Bytes sent to stack from this queue. */
+ u64 rx_bytes_received;
+
+ /** Num of Packets dropped due to receive path failures. */
+ u64 rx_dropped;
+};
+
+#define POLL_EVENT_INTR_ARRIVED 1
+#define POLL_EVENT_PROCESS_PKTS 2
+#define POLL_EVENT_PENDING_PKTS 3
+#define POLL_EVENT_ENABLE_INTR 4
+
+/* The maximum number of buffers that can be dispatched from the
+ * output/dma queue. Set to 64 assuming 1K buffers in DROQ and the fact that
+ * max packet size from DROQ is 64K.
+ */
+#define MAX_RECV_BUFS 64
+
+/** Receive Packet format used when dispatching output queue packets
+ * with non-raw opcodes.
+ * The received packet will be sent to the upper layers using this
+ * structure which is passed as a parameter to the dispatch function
+ */
+struct octeon_recv_pkt {
+ /** Number of buffers in this received packet */
+ u16 buffer_count;
+
+ /** Id of the device that is sending the packet up */
+ u16 octeon_id;
+
+ /** Length of data in the packet buffer */
+ u32 length;
+
+ /** The receive header */
+ union octeon_rh rh;
+
+ /** Pointer to the OS-specific packet buffer */
+ void *buffer_ptr[MAX_RECV_BUFS];
+
+ /** Size of the buffers pointed to by ptr's in buffer_ptr */
+ u32 buffer_size[MAX_RECV_BUFS];
+};
+
+#define OCT_RECV_PKT_SIZE (sizeof(struct octeon_recv_pkt))
+
+/** The first parameter of a dispatch function.
+ * For a raw mode opcode, the driver dispatches with the device
+ * pointer in this structure.
+ * For non-raw mode opcode, the driver dispatches the recv_pkt
+ * created to contain the buffers with data received from Octeon.
+ * ---------------------
+ * | *recv_pkt ----|---
+ * |-------------------| |
+ * | 0 or more bytes | |
+ * | reserved by driver| |
+ * |-------------------|<-/
+ * | octeon_recv_pkt |
+ * | |
+ * |___________________|
+ */
+struct octeon_recv_info {
+ void *rsvd;
+ struct octeon_recv_pkt *recv_pkt;
+};
+
+#define OCT_RECV_INFO_SIZE (sizeof(struct octeon_recv_info))
+
+/** Allocate a recv_info structure. The recv_pkt pointer in the recv_info
+ * structure is filled in before this call returns.
+ * @param extra_bytes - extra bytes to be allocated at the end of the recv info
+ * structure.
+ * @return - pointer to a newly allocated recv_info structure.
+ */
+static inline struct octeon_recv_info *octeon_alloc_recv_info(int extra_bytes)
+{
+ struct octeon_recv_info *recv_info;
+ u8 *buf;
+
+ buf = kmalloc(OCT_RECV_PKT_SIZE + OCT_RECV_INFO_SIZE +
+ extra_bytes, GFP_ATOMIC);
+ if (!buf)
+ return NULL;
+
+ recv_info = (struct octeon_recv_info *)buf;
+ recv_info->recv_pkt =
+ (struct octeon_recv_pkt *)(buf + OCT_RECV_INFO_SIZE);
+ recv_info->rsvd = NULL;
+ if (extra_bytes)
+ recv_info->rsvd = buf + OCT_RECV_INFO_SIZE + OCT_RECV_PKT_SIZE;
+
+ return recv_info;
+}
+
+/** Free a recv_info structure.
+ * @param recv_info - Pointer to receive_info to be freed
+ */
+static inline void octeon_free_recv_info(struct octeon_recv_info *recv_info)
+{
+ kfree(recv_info);
+}
+
+typedef int (*octeon_dispatch_fn_t)(struct octeon_recv_info *, void *);
+
+/** Used by NIC module to register packet handler and to get device
+ * information for each octeon device.
+ */
+struct octeon_droq_ops {
+ /** This registered function will be called by the driver with
+ * the octeon id, pointer to buffer from droq and length of
+ * data in the buffer. The receive header gives the port
+ * number to the caller. Function pointer is set by caller.
+ */
+ void (*fptr)(u32, void *, u32, union octeon_rh *, void *);
+
+ /* This function will be called by the driver for all NAPI related
+ * events. The first param is the octeon id. The second param is the
+ * output queue number. The third is the NAPI event that occurred.
+ */
+ void (*napi_fn)(void *);
+
+ u32 poll_mode;
+
+ /** Flag indicating if the DROQ handler should drop packets that
+ * it cannot handle in one iteration. Set by caller.
+ */
+ u32 drop_on_max;
+};
+
+/** The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * Octeon DROQ.
+ */
+struct octeon_droq {
+ /** A spinlock to protect access to this ring. */
+ spinlock_t lock;
+
+ u32 q_no;
+
+ struct octeon_droq_ops ops;
+
+ struct octeon_device *oct_dev;
+
+ /** The 8B aligned descriptor ring starts at this address. */
+ struct octeon_droq_desc *desc_ring;
+
+ /** Index in the ring where the driver should read the next packet */
+ u32 read_idx;
+
+ /** Index in the ring where Octeon will write the next packet */
+ u32 write_idx;
+
+ /** Index in the ring where the driver will refill the descriptor's
+ * buffer
+ */
+ u32 refill_idx;
+
+ /** Packets pending to be processed */
+ atomic_t pkts_pending;
+
+ /** Number of descriptors in this ring. */
+ u32 max_count;
+
+ /** The number of descriptors pending refill. */
+ u32 refill_count;
+
+ u32 pkts_per_intr;
+ u32 refill_threshold;
+
+ /** The max number of descriptors in DROQ without a buffer.
+ * This field is used to keep track of empty space threshold. If the
+ * refill_count reaches this value, the DROQ cannot accept a max-sized
+ * (64K) packet.
+ */
+ u32 max_empty_descs;
+
+ /** The 8B aligned info ptrs begin from this address. */
+ struct octeon_droq_info *info_list;
+
+ /** The receive buffer list. This list has the virtual addresses of the
+ * buffers.
+ */
+ struct octeon_recv_buffer *recv_buf_list;
+
+ /** The size of each buffer pointed by the buffer pointer. */
+ u32 buffer_size;
+
+ /** Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ void __iomem *pkts_credit_reg;
+
+ /** Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ void __iomem *pkts_sent_reg;
+
+ struct list_head dispatch_list;
+
+ /** Statistics for this DROQ. */
+ struct oct_droq_stats stats;
+
+ /** DMA mapped address of the DROQ descriptor ring. */
+ size_t desc_ring_dma;
+
+ /** Info ptr list are allocated at this virtual address. */
+ size_t info_base_addr;
+
+ /** DMA mapped address of the info list */
+ size_t info_list_dma;
+
+ /** Allocated size of info list. */
+ u32 info_alloc_size;
+
+ /** application context */
+ void *app_ctx;
+
+ struct napi_struct napi;
+
+ u32 cpu_id;
+
+ struct call_single_data csd;
+};
+
+#define OCT_DROQ_SIZE (sizeof(struct octeon_droq))
+
+/**
+ * Allocates space for the descriptor ring for the droq and sets the
+ * base addr, num desc etc in Octeon registers.
+ *
+ * @param oct_dev - pointer to the octeon device structure
+ * @param q_no - droq no. ranges from 0 - 3.
+ * @param app_ctx - pointer to application context
+ * @return Success: 0 Failure: 1
+*/
+int octeon_init_droq(struct octeon_device *oct_dev,
+ u32 q_no,
+ u32 num_descs,
+ u32 desc_size,
+ void *app_ctx);
+
+/**
+ * Frees the space for descriptor ring for the droq.
+ *
+ * @param oct_dev - pointer to the octeon device structure
+ * @param q_no - droq no. ranges from 0 - 3.
+ * @return: Success: 0 Failure: 1
+*/
+int octeon_delete_droq(struct octeon_device *oct_dev, u32 q_no);
+
+/** Register a change in droq operations. The ops field has a pointer to a
+ * function which will called by the DROQ handler for all packets arriving
+ * on output queues given by q_no irrespective of the type of packet.
+ * The ops field also has a flag which if set tells the DROQ handler to
+ * drop packets if it receives more than what it can process in one
+ * invocation of the handler.
+ * @param oct - octeon device
+ * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1
+ * @param ops - the droq_ops settings for this queue
+ * @return - 0 on success, -ENODEV or -EINVAL on error.
+ */
+int
+octeon_register_droq_ops(struct octeon_device *oct,
+ u32 q_no,
+ struct octeon_droq_ops *ops);
+
+/** Resets the function pointer and flag settings made by
+ * octeon_register_droq_ops(). After this routine is called, the DROQ handler
+ * will lookup dispatch function for each arriving packet on the output queue
+ * given by q_no.
+ * @param oct - octeon device
+ * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1
+ * @return - 0 on success, -ENODEV or -EINVAL on error.
+ */
+int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no);
+
+/** Register a dispatch function for a opcode/subcode. The driver will call
+ * this dispatch function when it receives a packet with the given
+ * opcode/subcode in its output queues along with the user specified
+ * argument.
+ * @param oct - the octeon device to register with.
+ * @param opcode - the opcode for which the dispatch will be registered.
+ * @param subcode - the subcode for which the dispatch will be registered
+ * @param fn - the dispatch function.
+ * @param fn_arg - user specified that will be passed along with the
+ * dispatch function by the driver.
+ * @return Success: 0; Failure: 1
+ */
+int octeon_register_dispatch_fn(struct octeon_device *oct,
+ u16 opcode,
+ u16 subcode,
+ octeon_dispatch_fn_t fn, void *fn_arg);
+
+/** Remove registration for an opcode/subcode. This will delete the mapping for
+ * an opcode/subcode. The dispatch function will be unregistered and will no
+ * longer be called if a packet with the opcode/subcode arrives in the driver
+ * output queues.
+ * @param oct - the octeon device to unregister from.
+ * @param opcode - the opcode to be unregistered.
+ * @param subcode - the subcode to be unregistered.
+ *
+ * @return Success: 0; Failure: 1
+ */
+int octeon_unregister_dispatch_fn(struct octeon_device *oct,
+ u16 opcode,
+ u16 subcode);
+
+void octeon_droq_print_stats(void);
+
+u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
+ struct octeon_droq *droq);
+
+int octeon_create_droq(struct octeon_device *oct, u32 q_no,
+ u32 num_descs, u32 desc_size, void *app_ctx);
+
+int octeon_droq_process_packets(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 budget);
+
+int octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no,
+ int cmd, u32 arg);
+
+#endif /*__OCTEON_DROQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
new file mode 100644
index 000000000..592fe49b5
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -0,0 +1,319 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_iq.h
+ * \brief Host Driver: Implementation of Octeon input queues. "Input" is
+ * with respect to the Octeon device on the NIC. From this driver's
+ * point of view they are egress queues.
+ */
+
+#ifndef __OCTEON_IQ_H__
+#define __OCTEON_IQ_H__
+
+#define IQ_STATUS_RUNNING 1
+
+#define IQ_SEND_OK 0
+#define IQ_SEND_STOP 1
+#define IQ_SEND_FAILED -1
+
+/*------------------------- INSTRUCTION QUEUE --------------------------*/
+
+/* \cond */
+
+#define REQTYPE_NONE 0
+#define REQTYPE_NORESP_NET 1
+#define REQTYPE_NORESP_NET_SG 2
+#define REQTYPE_RESP_NET 3
+#define REQTYPE_RESP_NET_SG 4
+#define REQTYPE_SOFT_COMMAND 5
+#define REQTYPE_LAST 5
+
+struct octeon_request_list {
+ u32 reqtype;
+ void *buf;
+};
+
+/* \endcond */
+
+/** Input Queue statistics. Each input queue has four stats fields. */
+struct oct_iq_stats {
+ u64 instr_posted; /**< Instructions posted to this queue. */
+ u64 instr_processed; /**< Instructions processed in this queue. */
+ u64 instr_dropped; /**< Instructions that could not be processed */
+ u64 bytes_sent; /**< Bytes sent through this queue. */
+ u64 sgentry_sent;/**< Gather entries sent through this queue. */
+ u64 tx_done;/**< Num of packets sent to network. */
+ u64 tx_iq_busy;/**< Numof times this iq was found to be full. */
+ u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */
+ u64 tx_tot_bytes;/**< Total count of bytes sento to network. */
+};
+
+#define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats))
+
+/** The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue (upto 4) for
+ * a Octeon device has one such structure to represent it.
+*/
+struct octeon_instr_queue {
+ /** A spinlock to protect access to the input ring. */
+ spinlock_t lock;
+
+ /** Flag that indicates if the queue uses 64 byte commands. */
+ u32 iqcmd_64B:1;
+
+ /** Queue Number. */
+ u32 iq_no:5;
+
+ u32 rsvd:17;
+
+ /* Controls the periodic flushing of iq */
+ u32 do_auto_flush:1;
+
+ u32 status:8;
+
+ /** Maximum no. of instructions in this queue. */
+ u32 max_count;
+
+ /** Index in input ring where the driver should write the next packet */
+ u32 host_write_index;
+
+ /** Index in input ring where Octeon is expected to read the next
+ * packet.
+ */
+ u32 octeon_read_index;
+
+ /** This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ u32 flush_index;
+
+ /** This field keeps track of the instructions pending in this queue. */
+ atomic_t instr_pending;
+
+ u32 reset_instr_cnt;
+
+ /** Pointer to the Virtual Base addr of the input ring. */
+ u8 *base_addr;
+
+ struct octeon_request_list *request_list;
+
+ /** Octeon doorbell register for the ring. */
+ void __iomem *doorbell_reg;
+
+ /** Octeon instruction count register for this ring. */
+ void __iomem *inst_cnt_reg;
+
+ /** Number of instructions pending to be posted to Octeon. */
+ u32 fill_cnt;
+
+ /** The max. number of instructions that can be held pending by the
+ * driver.
+ */
+ u32 fill_threshold;
+
+ /** The last time that the doorbell was rung. */
+ u64 last_db_time;
+
+ /** The doorbell timeout. If the doorbell was not rung for this time and
+ * fill_cnt is non-zero, ring the doorbell again.
+ */
+ u32 db_timeout;
+
+ /** Statistics for this input queue. */
+ struct oct_iq_stats stats;
+
+ /** DMA mapped base address of the input descriptor ring. */
+ u64 base_addr_dma;
+
+ /** Application context */
+ void *app_ctx;
+};
+
+/*---------------------- INSTRUCTION FORMAT ----------------------------*/
+
+/** 32-byte instruction format.
+ * Format of instruction for a 32-byte mode input queue.
+ */
+struct octeon_instr_32B {
+ /** Pointer where the input data is available. */
+ u64 dptr;
+
+ /** Instruction Header. */
+ u64 ih;
+
+ /** Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+ /** Input Request Header. Additional info about the input. */
+ u64 irh;
+
+};
+
+#define OCT_32B_INSTR_SIZE (sizeof(struct octeon_instr_32B))
+
+/** 64-byte instruction format.
+ * Format of instruction for a 64-byte mode input queue.
+ */
+struct octeon_instr_64B {
+ /** Pointer where the input data is available. */
+ u64 dptr;
+
+ /** Instruction Header. */
+ u64 ih;
+
+ /** Input Request Header. */
+ u64 irh;
+
+ /** opcode/subcode specific parameters */
+ u64 ossp[2];
+
+ /** Return Data Parameters */
+ u64 rdp;
+
+ /** Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+ u64 reserved;
+
+};
+
+#define OCT_64B_INSTR_SIZE (sizeof(struct octeon_instr_64B))
+
+/** The size of each buffer in soft command buffer pool
+ */
+#define SOFT_COMMAND_BUFFER_SIZE 1024
+
+struct octeon_soft_command {
+ /** Soft command buffer info. */
+ struct list_head node;
+ u64 dma_addr;
+ u32 size;
+
+ /** Command and return status */
+ struct octeon_instr_64B cmd;
+#define COMPLETION_WORD_INIT 0xffffffffffffffffULL
+ u64 *status_word;
+
+ /** Data buffer info */
+ void *virtdptr;
+ u64 dmadptr;
+ u32 datasize;
+
+ /** Return buffer info */
+ void *virtrptr;
+ u64 dmarptr;
+ u32 rdatasize;
+
+ /** Context buffer info */
+ void *ctxptr;
+ u32 ctxsize;
+
+ /** Time out and callback */
+ size_t wait_time;
+ size_t timeout;
+ u32 iq_no;
+ void (*callback)(struct octeon_device *, u32, void *);
+ void *callback_arg;
+};
+
+/** Maximum number of buffers to allocate into soft command buffer pool
+ */
+#define MAX_SOFT_COMMAND_BUFFERS 16
+
+/** Head of a soft command buffer pool.
+ */
+struct octeon_sc_buffer_pool {
+ /** List structure to add delete pending entries to */
+ struct list_head head;
+
+ /** A lock for this response list */
+ spinlock_t lock;
+
+ atomic_t alloc_buf_count;
+};
+
+int octeon_setup_sc_buffer_pool(struct octeon_device *oct);
+int octeon_free_sc_buffer_pool(struct octeon_device *oct);
+struct octeon_soft_command *
+ octeon_alloc_soft_command(struct octeon_device *oct,
+ u32 datasize, u32 rdatasize,
+ u32 ctxsize);
+void octeon_free_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc);
+
+/**
+ * octeon_init_instr_queue()
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @param iq_no - queue to be initialized (0 <= q_no <= 3).
+ *
+ * Called at driver init time for each input queue. iq_conf has the
+ * configuration parameters for the queue.
+ *
+ * @return Success: 0 Failure: 1
+ */
+int octeon_init_instr_queue(struct octeon_device *octeon_dev, u32 iq_no,
+ u32 num_descs);
+
+/**
+ * octeon_delete_instr_queue()
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @param iq_no - queue to be deleted (0 <= q_no <= 3).
+ *
+ * Called at driver unload time for each input queue. Deletes all
+ * allocated resources for the input queue.
+ *
+ * @return Success: 0 Failure: 1
+ */
+int octeon_delete_instr_queue(struct octeon_device *octeon_dev, u32 iq_no);
+
+int lio_wait_for_instr_fetch(struct octeon_device *oct);
+
+int
+octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
+ void (*fn)(void *));
+
+int
+lio_process_iq_request_list(struct octeon_device *oct,
+ struct octeon_instr_queue *iq);
+
+int octeon_send_command(struct octeon_device *oct, u32 iq_no,
+ u32 force_db, void *cmd, void *buf,
+ u32 datasize, u32 reqtype);
+
+void octeon_prepare_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc,
+ u8 opcode, u8 subcode,
+ u32 irh_ossp, u64 ossp0,
+ u64 ossp1);
+
+int octeon_send_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc);
+
+int octeon_setup_iq(struct octeon_device *oct, u32 iq_no,
+ u32 num_descs, void *app_ctx);
+
+#endif /* __OCTEON_IQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
new file mode 100644
index 000000000..cbd081981
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -0,0 +1,237 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_main.h
+ * \brief Host Driver: This file is included by all host driver source files
+ * to include common definitions.
+ */
+
+#ifndef _OCTEON_MAIN_H_
+#define _OCTEON_MAIN_H_
+
+#if BITS_PER_LONG == 32
+#define CVM_CAST64(v) ((long long)(v))
+#elif BITS_PER_LONG == 64
+#define CVM_CAST64(v) ((long long)(long)(v))
+#else
+#error "Unknown system architecture"
+#endif
+
+#define DRV_NAME "LiquidIO"
+
+/**
+ * \brief determines if a given console has debug enabled.
+ * @param console console to check
+ * @returns 1 = enabled. 0 otherwise
+ */
+int octeon_console_debug_enabled(u32 console);
+
+/* BQL-related functions */
+void octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
+void octeon_update_tx_completion_counters(void *buf, int reqtype,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl);
+void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
+ unsigned int bytes_compl);
+
+/** Swap 8B blocks */
+static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
+{
+ while (blocks) {
+ cpu_to_be64s(data);
+ blocks--;
+ data++;
+ }
+}
+
+/**
+ * \brief unmaps a PCI BAR
+ * @param oct Pointer to Octeon device
+ * @param baridx bar index
+ */
+static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx)
+{
+ dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n",
+ baridx);
+
+ if (oct->mmio[baridx].done)
+ iounmap(oct->mmio[baridx].hw_addr);
+
+ if (oct->mmio[baridx].start)
+ pci_release_region(oct->pci_dev, baridx * 2);
+}
+
+/**
+ * \brief maps a PCI BAR
+ * @param oct Pointer to Octeon device
+ * @param baridx bar index
+ * @param max_map_len maximum length of mapped memory
+ */
+static inline int octeon_map_pci_barx(struct octeon_device *oct,
+ int baridx, int max_map_len)
+{
+ u32 mapped_len = 0;
+
+ if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) {
+ dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n",
+ baridx);
+ return 1;
+ }
+
+ oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2);
+ oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2);
+
+ mapped_len = oct->mmio[baridx].len;
+ if (!mapped_len)
+ return 1;
+
+ if (max_map_len && (mapped_len > max_map_len))
+ mapped_len = max_map_len;
+
+ oct->mmio[baridx].hw_addr =
+ ioremap(oct->mmio[baridx].start, mapped_len);
+ oct->mmio[baridx].mapped_len = mapped_len;
+
+ dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n",
+ baridx, oct->mmio[baridx].start, mapped_len,
+ oct->mmio[baridx].len);
+
+ if (!oct->mmio[baridx].hw_addr) {
+ dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n",
+ baridx);
+ return 1;
+ }
+ oct->mmio[baridx].done = 1;
+
+ return 0;
+}
+
+static inline void *
+cnnic_alloc_aligned_dma(struct pci_dev *pci_dev,
+ u32 size,
+ u32 *alloc_size,
+ size_t *orig_ptr,
+ size_t *dma_addr __attribute__((unused)))
+{
+ int retries = 0;
+ void *ptr = NULL;
+
+#define OCTEON_MAX_ALLOC_RETRIES 1
+ do {
+ ptr =
+ (void *)__get_free_pages(GFP_KERNEL,
+ get_order(size));
+ if ((unsigned long)ptr & 0x07) {
+ free_pages((unsigned long)ptr, get_order(size));
+ ptr = NULL;
+ /* Increment the size required if the first
+ * attempt failed.
+ */
+ if (!retries)
+ size += 7;
+ }
+ retries++;
+ } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
+
+ *alloc_size = size;
+ *orig_ptr = (unsigned long)ptr;
+ if ((unsigned long)ptr & 0x07)
+ ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
+ return ptr;
+}
+
+#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
+ free_pages(orig_ptr, get_order(size))
+
+static inline void
+sleep_cond(wait_queue_head_t *wait_queue, int *condition)
+{
+ wait_queue_t we;
+
+ init_waitqueue_entry(&we, current);
+ add_wait_queue(wait_queue, &we);
+ while (!(ACCESS_ONCE(*condition))) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current))
+ goto out;
+ schedule();
+ }
+out:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(wait_queue, &we);
+}
+
+static inline void
+sleep_atomic_cond(wait_queue_head_t *waitq, atomic_t *pcond)
+{
+ wait_queue_t we;
+
+ init_waitqueue_entry(&we, current);
+ add_wait_queue(waitq, &we);
+ while (!atomic_read(pcond)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current))
+ goto out;
+ schedule();
+ }
+out:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(waitq, &we);
+}
+
+/* Gives up the CPU for a timeout period.
+ * Check that the condition is not true before we go to sleep for a
+ * timeout period.
+ */
+static inline void
+sleep_timeout_cond(wait_queue_head_t *wait_queue,
+ int *condition,
+ int timeout)
+{
+ wait_queue_t we;
+
+ init_waitqueue_entry(&we, current);
+ add_wait_queue(wait_queue, &we);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!(*condition))
+ schedule_timeout(timeout);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(wait_queue, &we);
+}
+
+#ifndef ROUNDUP4
+#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
+#endif
+
+#ifndef ROUNDUP8
+#define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
+#endif
+
+#ifndef ROUNDUP16
+#define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
+#endif
+
+#ifndef ROUNDUP128
+#define ROUNDUP128(val) (((val) + 127) & 0xffffff80)
+#endif
+
+#endif /* _OCTEON_MAIN_H_ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
new file mode 100644
index 000000000..5aecef870
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -0,0 +1,199 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+#define MEMOPS_IDX MAX_BAR1_MAP_INDEX
+
+static inline void
+octeon_toggle_bar1_swapmode(struct octeon_device *oct __attribute__((unused)),
+ u32 idx __attribute__((unused)))
+{
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 mask;
+
+ mask = oct->fn_list.bar1_idx_read(oct, idx);
+ mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
+ oct->fn_list.bar1_idx_write(oct, idx, mask);
+#endif
+}
+
+static void
+octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
+ u8 *hostbuf, u32 len)
+{
+ while ((len) && ((unsigned long)mapped_addr) & 7) {
+ writeb(*(hostbuf++), mapped_addr++);
+ len--;
+ }
+
+ octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+ while (len >= 8) {
+ writeq(*((u64 *)hostbuf), mapped_addr);
+ mapped_addr += 8;
+ hostbuf += 8;
+ len -= 8;
+ }
+
+ octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+ while (len--)
+ writeb(*(hostbuf++), mapped_addr++);
+}
+
+static void
+octeon_pci_fastread(struct octeon_device *oct, u8 __iomem *mapped_addr,
+ u8 *hostbuf, u32 len)
+{
+ while ((len) && ((unsigned long)mapped_addr) & 7) {
+ *(hostbuf++) = readb(mapped_addr++);
+ len--;
+ }
+
+ octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+ while (len >= 8) {
+ *((u64 *)hostbuf) = readq(mapped_addr);
+ mapped_addr += 8;
+ hostbuf += 8;
+ len -= 8;
+ }
+
+ octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+ while (len--)
+ *(hostbuf++) = readb(mapped_addr++);
+}
+
+/* Core mem read/write with temporary bar1 settings. */
+/* op = 1 to read, op = 0 to write. */
+static void
+__octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr,
+ u8 *hostbuf, u32 len, u32 op)
+{
+ u32 copy_len = 0, index_reg_val = 0;
+ unsigned long flags;
+ u8 __iomem *mapped_addr;
+
+ spin_lock_irqsave(&oct->mem_access_lock, flags);
+
+ /* Save the original index reg value. */
+ index_reg_val = oct->fn_list.bar1_idx_read(oct, MEMOPS_IDX);
+ do {
+ oct->fn_list.bar1_idx_setup(oct, addr, MEMOPS_IDX, 1);
+ mapped_addr = oct->mmio[1].hw_addr
+ + (MEMOPS_IDX << 22) + (addr & 0x3fffff);
+
+ /* If operation crosses a 4MB boundary, split the transfer
+ * at the 4MB
+ * boundary.
+ */
+ if (((addr + len - 1) & ~(0x3fffff)) != (addr & ~(0x3fffff))) {
+ copy_len = (u32)(((addr & ~(0x3fffff)) +
+ (MEMOPS_IDX << 22)) - addr);
+ } else {
+ copy_len = len;
+ }
+
+ if (op) { /* read from core */
+ octeon_pci_fastread(oct, mapped_addr, hostbuf,
+ copy_len);
+ } else {
+ octeon_pci_fastwrite(oct, mapped_addr, hostbuf,
+ copy_len);
+ }
+
+ len -= copy_len;
+ addr += copy_len;
+ hostbuf += copy_len;
+
+ } while (len);
+
+ oct->fn_list.bar1_idx_write(oct, MEMOPS_IDX, index_reg_val);
+
+ spin_unlock_irqrestore(&oct->mem_access_lock, flags);
+}
+
+void
+octeon_pci_read_core_mem(struct octeon_device *oct,
+ u64 coreaddr,
+ u8 *buf,
+ u32 len)
+{
+ __octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 1);
+}
+
+void
+octeon_pci_write_core_mem(struct octeon_device *oct,
+ u64 coreaddr,
+ u8 *buf,
+ u32 len)
+{
+ __octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 0);
+}
+
+u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
+{
+ __be64 ret;
+
+ __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 8, 1);
+
+ return be64_to_cpu(ret);
+}
+
+u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr)
+{
+ __be32 ret;
+
+ __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 4, 1);
+
+ return be32_to_cpu(ret);
+}
+
+void octeon_write_device_mem32(struct octeon_device *oct, u64 coreaddr,
+ u32 val)
+{
+ __be32 t = cpu_to_be32(val);
+
+ __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&t, 4, 0);
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
new file mode 100644
index 000000000..11b183377
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
@@ -0,0 +1,75 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_mem_ops.h
+ * \brief Host Driver: Routines used to read/write Octeon memory.
+ */
+
+#ifndef __OCTEON_MEM_OPS_H__
+#define __OCTEON_MEM_OPS_H__
+
+/** Read a 64-bit value from a BAR1 mapped core memory address.
+ * @param oct - pointer to the octeon device.
+ * @param core_addr - the address to read from.
+ *
+ * The range_idx gives the BAR1 index register for the range of address
+ * in which core_addr is mapped.
+ *
+ * @return 64-bit value read from Core memory
+ */
+u64 octeon_read_device_mem64(struct octeon_device *oct, u64 core_addr);
+
+/** Read a 32-bit value from a BAR1 mapped core memory address.
+ * @param oct - pointer to the octeon device.
+ * @param core_addr - the address to read from.
+ *
+ * @return 32-bit value read from Core memory
+ */
+u32 octeon_read_device_mem32(struct octeon_device *oct, u64 core_addr);
+
+/** Write a 32-bit value to a BAR1 mapped core memory address.
+ * @param oct - pointer to the octeon device.
+ * @param core_addr - the address to write to.
+ * @param val - 32-bit value to write.
+ */
+void
+octeon_write_device_mem32(struct octeon_device *oct,
+ u64 core_addr,
+ u32 val);
+
+/** Read multiple bytes from Octeon memory.
+ */
+void
+octeon_pci_read_core_mem(struct octeon_device *oct,
+ u64 coreaddr,
+ u8 *buf,
+ u32 len);
+
+/** Write multiple bytes into Octeon memory.
+ */
+void
+octeon_pci_write_core_mem(struct octeon_device *oct,
+ u64 coreaddr,
+ u8 *buf,
+ u32 len);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
new file mode 100644
index 000000000..b3abe5818
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -0,0 +1,224 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_network.h
+ * \brief Host NIC Driver: Structure and Macro definitions used by NIC Module.
+ */
+
+#ifndef __OCTEON_NETWORK_H__
+#define __OCTEON_NETWORK_H__
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#include <linux/ptp_clock_kernel.h>
+
+/** LiquidIO per-interface network private data */
+struct lio {
+ /** State of the interface. Rx/Tx happens only in the RUNNING state. */
+ atomic_t ifstate;
+
+ /** Octeon Interface index number. This device will be represented as
+ * oct<ifidx> in the system.
+ */
+ int ifidx;
+
+ /** Octeon Input queue to use to transmit for this network interface. */
+ int txq;
+
+ /** Octeon Output queue from which pkts arrive
+ * for this network interface.
+ */
+ int rxq;
+
+ /** Guards the glist */
+ spinlock_t lock;
+
+ /** Linked list of gather components */
+ struct list_head glist;
+
+ /** Pointer to the NIC properties for the Octeon device this network
+ * interface is associated with.
+ */
+ struct octdev_props *octprops;
+
+ /** Pointer to the octeon device structure. */
+ struct octeon_device *oct_dev;
+
+ struct net_device *netdev;
+
+ /** Link information sent by the core application for this interface. */
+ struct oct_link_info linfo;
+
+ /** Size of Tx queue for this octeon device. */
+ u32 tx_qsize;
+
+ /** Size of Rx queue for this octeon device. */
+ u32 rx_qsize;
+
+ /** Size of MTU this octeon device. */
+ u32 mtu;
+
+ /** msg level flag per interface. */
+ u32 msg_enable;
+
+ /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
+ u64 dev_capability;
+
+ /** Copy of beacaon reg in phy */
+ u32 phy_beacon_val;
+
+ /** Copy of ctrl reg in phy */
+ u32 led_ctrl_val;
+
+ /* PTP clock information */
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ s64 ptp_adjust;
+
+ /* for atomic access to Octeon PTP reg and data struct */
+ spinlock_t ptp_lock;
+
+ /* Interface info */
+ u32 intf_open;
+
+ /* work queue for txq status */
+ struct cavium_wq txq_status_wq;
+
+};
+
+#define LIO_SIZE (sizeof(struct lio))
+#define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
+
+/**
+ * \brief Enable or disable feature
+ * @param netdev pointer to network device
+ * @param cmd Command that just requires acknowledgment
+ */
+int liquidio_set_feature(struct net_device *netdev, int cmd);
+
+/**
+ * \brief Link control command completion callback
+ * @param nctrl_ptr pointer to control packet structure
+ *
+ * This routine is called by the callback function when a ctrl pkt sent to
+ * core app completes. The nctrl_ptr contains a copy of the command type
+ * and data sent to the core app. This routine is only called if the ctrl
+ * pkt was sent successfully to the core app.
+ */
+void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
+
+/**
+ * \brief Register ethtool operations
+ * @param netdev pointer to network device
+ */
+void liquidio_set_ethtool_ops(struct net_device *netdev);
+
+static inline void
+*recv_buffer_alloc(struct octeon_device *oct __attribute__((unused)),
+ u32 q_no __attribute__((unused)), u32 size)
+{
+#define SKB_ADJ_MASK 0x3F
+#define SKB_ADJ (SKB_ADJ_MASK + 1)
+
+ struct sk_buff *skb = dev_alloc_skb(size + SKB_ADJ);
+
+ if ((unsigned long)skb->data & SKB_ADJ_MASK) {
+ u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
+
+ skb_reserve(skb, r);
+ }
+
+ return (void *)skb;
+}
+
+static inline void recv_buffer_free(void *buffer)
+{
+ dev_kfree_skb_any((struct sk_buff *)buffer);
+}
+
+#define lio_dma_alloc(oct, size, dma_addr) \
+ dma_alloc_coherent(&oct->pci_dev->dev, size, dma_addr, GFP_KERNEL)
+#define lio_dma_free(oct, size, virt_addr, dma_addr) \
+ dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr)
+
+#define get_rbd(ptr) (((struct sk_buff *)(ptr))->data)
+
+static inline u64
+lio_map_ring_info(struct octeon_droq *droq, u32 i)
+{
+ dma_addr_t dma_addr;
+ struct octeon_device *oct = droq->oct_dev;
+
+ dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
+ OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
+
+ BUG_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
+
+ return (u64)dma_addr;
+}
+
+static inline void
+lio_unmap_ring_info(struct pci_dev *pci_dev,
+ u64 info_ptr, u32 size)
+{
+ dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE);
+}
+
+static inline u64
+lio_map_ring(struct pci_dev *pci_dev,
+ void *buf, u32 size)
+{
+ dma_addr_t dma_addr;
+
+ dma_addr = dma_map_single(&pci_dev->dev, get_rbd(buf), size,
+ DMA_FROM_DEVICE);
+
+ BUG_ON(dma_mapping_error(&pci_dev->dev, dma_addr));
+
+ return (u64)dma_addr;
+}
+
+static inline void
+lio_unmap_ring(struct pci_dev *pci_dev,
+ u64 buf_ptr, u32 size)
+{
+ dma_unmap_single(&pci_dev->dev,
+ buf_ptr, size,
+ DMA_FROM_DEVICE);
+}
+
+static inline void *octeon_fast_packet_alloc(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 q_no, u32 size)
+{
+ return recv_buffer_alloc(oct, q_no, size);
+}
+
+static inline void octeon_fast_packet_next(struct octeon_droq *droq,
+ struct sk_buff *nicbuf,
+ int copy_len,
+ int idx)
+{
+ memcpy(skb_put(nicbuf, copy_len),
+ get_rbd(droq->recv_buf_list[idx].buffer), copy_len);
+}
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
new file mode 100644
index 000000000..1a0191549
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -0,0 +1,189 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+void *
+octeon_alloc_soft_command_resp(struct octeon_device *oct,
+ struct octeon_instr_64B *cmd,
+ size_t rdatasize)
+{
+ struct octeon_soft_command *sc;
+ struct octeon_instr_ih *ih;
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_rdp *rdp;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, 0, rdatasize, 0);
+
+ if (!sc)
+ return NULL;
+
+ /* Copy existing command structure into the soft command */
+ memcpy(&sc->cmd, cmd, sizeof(struct octeon_instr_64B));
+
+ /* Add in the response related fields. Opcode and Param are already
+ * there.
+ */
+ ih = (struct octeon_instr_ih *)&sc->cmd.ih;
+ ih->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ irh->rflag = 1; /* a response is required */
+ irh->len = 4; /* means four 64-bit words immediately follow irh */
+
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+ rdp->pcie_port = oct->pcie_port;
+ rdp->rlen = rdatasize;
+
+ *sc->status_word = COMPLETION_WORD_INIT;
+
+ sc->wait_time = 1000;
+ sc->timeout = jiffies + sc->wait_time;
+
+ return sc;
+}
+
+int octnet_send_nic_data_pkt(struct octeon_device *oct,
+ struct octnic_data_pkt *ndata,
+ u32 xmit_more)
+{
+ int ring_doorbell;
+
+ ring_doorbell = !xmit_more;
+
+ return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd,
+ ndata->buf, ndata->datasize,
+ ndata->reqtype);
+}
+
+static void octnet_link_ctrl_callback(struct octeon_device *oct,
+ u32 status,
+ void *sc_ptr)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)sc_ptr;
+ struct octnic_ctrl_pkt *nctrl;
+
+ nctrl = (struct octnic_ctrl_pkt *)sc->ctxptr;
+
+ /* Call the callback function if status is OK.
+ * Status is OK only if a response was expected and core returned
+ * success.
+ * If no response was expected, status is OK if the command was posted
+ * successfully.
+ */
+ if (!status && nctrl->cb_fn)
+ nctrl->cb_fn(nctrl);
+
+ octeon_free_soft_command(oct, sc);
+}
+
+static inline struct octeon_soft_command
+*octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct,
+ struct octnic_ctrl_pkt *nctrl,
+ struct octnic_ctrl_params nparams)
+{
+ struct octeon_soft_command *sc = NULL;
+ u8 *data;
+ size_t rdatasize;
+ u32 uddsize = 0, datasize = 0;
+
+ uddsize = (u32)(nctrl->ncmd.s.more * 8);
+
+ datasize = OCTNET_CMD_SIZE + uddsize;
+ rdatasize = (nctrl->wait_time) ? 16 : 0;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, datasize, rdatasize,
+ sizeof(struct octnic_ctrl_pkt));
+
+ if (!sc)
+ return NULL;
+
+ memcpy(sc->ctxptr, nctrl, sizeof(struct octnic_ctrl_pkt));
+
+ data = (u8 *)sc->virtdptr;
+
+ memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE);
+
+ octeon_swap_8B_data((u64 *)data, (OCTNET_CMD_SIZE >> 3));
+
+ if (uddsize) {
+ /* Endian-Swap for UDD should have been done by caller. */
+ memcpy(data + OCTNET_CMD_SIZE, nctrl->udd, uddsize);
+ }
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD,
+ 0, 0, 0);
+
+ sc->callback = octnet_link_ctrl_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = nctrl->wait_time;
+
+ return sc;
+}
+
+int
+octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
+ struct octnic_ctrl_pkt *nctrl,
+ struct octnic_ctrl_params nparams)
+{
+ int retval;
+ struct octeon_soft_command *sc = NULL;
+
+ sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl, nparams);
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n",
+ __func__);
+ return -1;
+ }
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval) {
+ octeon_free_soft_command(oct, sc);
+ dev_err(&oct->pci_dev->dev, "%s soft command send failed status: %x\n",
+ __func__, retval);
+ return -1;
+ }
+
+ return retval;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
new file mode 100644
index 000000000..0238857c8
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -0,0 +1,227 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_nic.h
+ * \brief Host NIC Driver: Routine to send network data &
+ * control packet to Octeon.
+ */
+
+#ifndef __OCTEON_NIC_H__
+#define __OCTEON_NIC_H__
+
+/* Maximum number of 8-byte words can be sent in a NIC control message.
+ */
+#define MAX_NCTRL_UDD 32
+
+typedef void (*octnic_ctrl_pkt_cb_fn_t) (void *);
+
+/* Structure of control information passed by the NIC module to the OSI
+ * layer when sending control commands to Octeon device software.
+ */
+struct octnic_ctrl_pkt {
+ /** Command to be passed to the Octeon device software. */
+ union octnet_cmd ncmd;
+
+ /** Send buffer */
+ void *data;
+ u64 dmadata;
+
+ /** Response buffer */
+ void *rdata;
+ u64 dmardata;
+
+ /** Additional data that may be needed by some commands. */
+ u64 udd[MAX_NCTRL_UDD];
+
+ /** Time to wait for Octeon software to respond to this control command.
+ * If wait_time is 0, OSI assumes no response is expected.
+ */
+ size_t wait_time;
+
+ /** The network device that issued the control command. */
+ u64 netpndev;
+
+ /** Callback function called when the command has been fetched */
+ octnic_ctrl_pkt_cb_fn_t cb_fn;
+};
+
+#define MAX_UDD_SIZE(nctrl) (sizeof(nctrl->udd))
+
+/** Structure of data information passed by the NIC module to the OSI
+ * layer when forwarding data to Octeon device software.
+ */
+struct octnic_data_pkt {
+ /** Pointer to information maintained by NIC module for this packet. The
+ * OSI layer passes this as-is to the driver.
+ */
+ void *buf;
+
+ /** Type of buffer passed in "buf" above. */
+ u32 reqtype;
+
+ /** Total data bytes to be transferred in this command. */
+ u32 datasize;
+
+ /** Command to be passed to the Octeon device software. */
+ struct octeon_instr_64B cmd;
+
+ /** Input queue to use to send this command. */
+ u32 q_no;
+
+};
+
+/** Structure passed by NIC module to OSI layer to prepare a command to send
+ * network data to Octeon.
+ */
+union octnic_cmd_setup {
+ struct {
+ u32 ifidx:8;
+ u32 cksum_offset:7;
+ u32 gather:1;
+ u32 timestamp:1;
+ u32 ipv4opts_ipv6exthdr:2;
+ u32 ip_csum:1;
+ u32 tnl_csum:1;
+
+ u32 rsvd:11;
+ union {
+ u32 datasize;
+ u32 gatherptrs;
+ } u;
+ } s;
+
+ u64 u64;
+
+};
+
+struct octnic_ctrl_params {
+ u32 resp_order;
+};
+
+static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no)
+{
+ return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending)
+ >= (oct->instr_queue[q_no]->max_count - 2));
+}
+
+/** Utility function to prepare a 64B NIC instruction based on a setup command
+ * @param cmd - pointer to instruction to be filled in.
+ * @param setup - pointer to the setup structure
+ * @param q_no - which queue for back pressure
+ *
+ * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
+ */
+static inline void
+octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd,
+ union octnic_cmd_setup *setup, u32 tag)
+{
+ struct octeon_instr_ih *ih;
+ struct octeon_instr_irh *irh;
+ union octnic_packet_params packet_params;
+
+ memset(cmd, 0, sizeof(struct octeon_instr_64B));
+
+ ih = (struct octeon_instr_ih *)&cmd->ih;
+
+ /* assume that rflag is cleared so therefore front data will only have
+ * irh and ossp[1] and ossp[2] for a total of 24 bytes
+ */
+ ih->fsz = 24;
+
+ ih->tagtype = ORDERED_TAG;
+ ih->grp = DEFAULT_POW_GRP;
+
+ if (tag)
+ ih->tag = tag;
+ else
+ ih->tag = LIO_DATA(setup->s.ifidx);
+
+ ih->raw = 1;
+ ih->qos = (setup->s.ifidx & 3) + 4; /* map qos based on interface */
+
+ if (!setup->s.gather) {
+ ih->dlengsz = setup->s.u.datasize;
+ } else {
+ ih->gather = 1;
+ ih->dlengsz = setup->s.u.gatherptrs;
+ }
+
+ irh = (struct octeon_instr_irh *)&cmd->irh;
+
+ irh->opcode = OPCODE_NIC;
+ irh->subcode = OPCODE_NIC_NW_DATA;
+
+ packet_params.u32 = 0;
+
+ if (setup->s.cksum_offset) {
+ packet_params.s.csoffset = setup->s.cksum_offset;
+ packet_params.s.ipv4opts_ipv6exthdr =
+ setup->s.ipv4opts_ipv6exthdr;
+ }
+
+ packet_params.s.ip_csum = setup->s.ip_csum;
+ packet_params.s.tnl_csum = setup->s.tnl_csum;
+ packet_params.s.ifidx = setup->s.ifidx;
+ packet_params.s.tsflag = setup->s.timestamp;
+
+ irh->ossp = packet_params.u32;
+}
+
+/** Allocate and a soft command with space for a response immediately following
+ * the commnad.
+ * @param oct - octeon device pointer
+ * @param cmd - pointer to the command structure, pre-filled for everything
+ * except the response.
+ * @param rdatasize - size in bytes of the response.
+ *
+ * @returns pointer to allocated buffer with command copied into it, and
+ * response space immediately following.
+ */
+void *
+octeon_alloc_soft_command_resp(struct octeon_device *oct,
+ struct octeon_instr_64B *cmd,
+ size_t rdatasize);
+
+/** Send a NIC data packet to the device
+ * @param oct - octeon device pointer
+ * @param ndata - control structure with queueing, and buffer information
+ *
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * queue should be stopped, and IQ_SEND_OK if it sent okay.
+ */
+int octnet_send_nic_data_pkt(struct octeon_device *oct,
+ struct octnic_data_pkt *ndata, u32 xmit_more);
+
+/** Send a NIC control packet to the device
+ * @param oct - octeon device pointer
+ * @param nctrl - control structure with command, timout, and callback info
+ * @param nparams - response control structure
+ *
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * queue should be stopped, and IQ_SEND_OK if it sent okay.
+ */
+int
+octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
+ struct octnic_ctrl_pkt *nctrl,
+ struct octnic_ctrl_params nparams);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
new file mode 100644
index 000000000..a2a24652c
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -0,0 +1,765 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+
+#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
+ (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
+
+struct iq_post_status {
+ int status;
+ int index;
+};
+
+static void check_db_timeout(struct work_struct *work);
+static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no);
+
+static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
+
+static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
+{
+ struct octeon_instr_queue *iq =
+ (struct octeon_instr_queue *)oct->instr_queue[iq_no];
+ return iq->iqcmd_64B;
+}
+
+#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
+
+/* Define this to return the request status comaptible to old code */
+/*#define OCTEON_USE_OLD_REQ_STATUS*/
+
+/* Return 0 on success, 1 on failure */
+int octeon_init_instr_queue(struct octeon_device *oct,
+ u32 iq_no, u32 num_descs)
+{
+ struct octeon_instr_queue *iq;
+ struct octeon_iq_config *conf = NULL;
+ u32 q_size;
+ struct cavium_wq *db_wq;
+
+ if (OCTEON_CN6XXX(oct))
+ conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
+
+ if (!conf) {
+ dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
+ oct->chip_id);
+ return 1;
+ }
+
+ if (num_descs & (num_descs - 1)) {
+ dev_err(&oct->pci_dev->dev,
+ "Number of descriptors for instr queue %d not in power of 2.\n",
+ iq_no);
+ return 1;
+ }
+
+ q_size = (u32)conf->instr_type * num_descs;
+
+ iq = oct->instr_queue[iq_no];
+
+ iq->base_addr = lio_dma_alloc(oct, q_size,
+ (dma_addr_t *)&iq->base_addr_dma);
+ if (!iq->base_addr) {
+ dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
+ iq_no);
+ return 1;
+ }
+
+ iq->max_count = num_descs;
+
+ /* Initialize a list to holds requests that have been posted to Octeon
+ * but has yet to be fetched by octeon
+ */
+ iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs);
+ if (!iq->request_list) {
+ lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
+ dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
+ iq_no);
+ return 1;
+ }
+
+ memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
+
+ dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
+ iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
+
+ iq->iq_no = iq_no;
+ iq->fill_threshold = (u32)conf->db_min;
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->octeon_read_index = 0;
+ iq->flush_index = 0;
+ iq->last_db_time = 0;
+ iq->do_auto_flush = 1;
+ iq->db_timeout = (u32)conf->db_timeout;
+ atomic_set(&iq->instr_pending, 0);
+
+ /* Initialize the spinlock for this instruction queue */
+ spin_lock_init(&iq->lock);
+
+ oct->io_qmask.iq |= (1 << iq_no);
+
+ /* Set the 32B/64B mode for each input queue */
+ oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
+ iq->iqcmd_64B = (conf->instr_type == 64);
+
+ oct->fn_list.setup_iq_regs(oct, iq_no);
+
+ oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db");
+ if (!oct->check_db_wq[iq_no].wq) {
+ lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
+ dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
+ iq_no);
+ return 1;
+ }
+
+ db_wq = &oct->check_db_wq[iq_no];
+
+ INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
+ db_wq->wk.ctxptr = oct;
+ db_wq->wk.ctxul = iq_no;
+ queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
+
+ return 0;
+}
+
+int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
+{
+ u64 desc_size = 0, q_size;
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+
+ cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
+ flush_workqueue(oct->check_db_wq[iq_no].wq);
+ destroy_workqueue(oct->check_db_wq[iq_no].wq);
+
+ if (OCTEON_CN6XXX(oct))
+ desc_size =
+ CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf));
+
+ vfree(iq->request_list);
+
+ if (iq->base_addr) {
+ q_size = iq->max_count * desc_size;
+ lio_dma_free(oct, (u32)q_size, iq->base_addr,
+ iq->base_addr_dma);
+ return 0;
+ }
+ return 1;
+}
+
+/* Return 0 on success, 1 on failure */
+int octeon_setup_iq(struct octeon_device *oct,
+ u32 iq_no,
+ u32 num_descs,
+ void *app_ctx)
+{
+ if (oct->instr_queue[iq_no]) {
+ dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
+ iq_no);
+ oct->instr_queue[iq_no]->app_ctx = app_ctx;
+ return 0;
+ }
+ oct->instr_queue[iq_no] =
+ vmalloc(sizeof(struct octeon_instr_queue));
+ if (!oct->instr_queue[iq_no])
+ return 1;
+
+ memset(oct->instr_queue[iq_no], 0,
+ sizeof(struct octeon_instr_queue));
+
+ oct->instr_queue[iq_no]->app_ctx = app_ctx;
+ if (octeon_init_instr_queue(oct, iq_no, num_descs)) {
+ vfree(oct->instr_queue[iq_no]);
+ oct->instr_queue[iq_no] = NULL;
+ return 1;
+ }
+
+ oct->num_iqs++;
+ oct->fn_list.enable_io_queues(oct);
+ return 0;
+}
+
+int lio_wait_for_instr_fetch(struct octeon_device *oct)
+{
+ int i, retry = 1000, pending, instr_cnt = 0;
+
+ do {
+ instr_cnt = 0;
+
+ /*for (i = 0; i < oct->num_iqs; i++) {*/
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ if (!(oct->io_qmask.iq & (1UL << i)))
+ continue;
+ pending =
+ atomic_read(&oct->
+ instr_queue[i]->instr_pending);
+ if (pending)
+ __check_db_timeout(oct, i);
+ instr_cnt += pending;
+ }
+
+ if (instr_cnt == 0)
+ break;
+
+ schedule_timeout_uninterruptible(1);
+
+ } while (retry-- && instr_cnt);
+
+ return instr_cnt;
+}
+
+static inline void
+ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
+{
+ if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
+ writel(iq->fill_cnt, iq->doorbell_reg);
+ /* make sure doorbell write goes through */
+ mmiowb();
+ iq->fill_cnt = 0;
+ iq->last_db_time = jiffies;
+ return;
+ }
+}
+
+static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
+ u8 *cmd)
+{
+ u8 *iqptr, cmdsize;
+
+ cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
+ iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
+
+ memcpy(iqptr, cmd, cmdsize);
+}
+
+static inline int
+__post_command(struct octeon_device *octeon_dev __attribute__((unused)),
+ struct octeon_instr_queue *iq,
+ u32 force_db __attribute__((unused)), u8 *cmd)
+{
+ u32 index = -1;
+
+ /* This ensures that the read index does not wrap around to the same
+ * position if queue gets full before Octeon could fetch any instr.
+ */
+ if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1))
+ return -1;
+
+ __copy_cmd_into_iq(iq, cmd);
+
+ /* "index" is returned, host_write_index is modified. */
+ index = iq->host_write_index;
+ INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
+ iq->fill_cnt++;
+
+ /* Flush the command into memory. We need to be sure the data is in
+ * memory before indicating that the instruction is pending.
+ */
+ wmb();
+
+ atomic_inc(&iq->instr_pending);
+
+ return index;
+}
+
+static inline struct iq_post_status
+__post_command2(struct octeon_device *octeon_dev __attribute__((unused)),
+ struct octeon_instr_queue *iq,
+ u32 force_db __attribute__((unused)), u8 *cmd)
+{
+ struct iq_post_status st;
+
+ st.status = IQ_SEND_OK;
+
+ /* This ensures that the read index does not wrap around to the same
+ * position if queue gets full before Octeon could fetch any instr.
+ */
+ if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
+ st.status = IQ_SEND_FAILED;
+ st.index = -1;
+ return st;
+ }
+
+ if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
+ st.status = IQ_SEND_STOP;
+
+ __copy_cmd_into_iq(iq, cmd);
+
+ /* "index" is returned, host_write_index is modified. */
+ st.index = iq->host_write_index;
+ INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
+ iq->fill_cnt++;
+
+ /* Flush the command into memory. We need to be sure the data is in
+ * memory before indicating that the instruction is pending.
+ */
+ wmb();
+
+ atomic_inc(&iq->instr_pending);
+
+ return st;
+}
+
+int
+octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
+ void (*fn)(void *))
+{
+ if (reqtype > REQTYPE_LAST) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
+ __func__, reqtype);
+ return -EINVAL;
+ }
+
+ reqtype_free_fn[oct->octeon_id][reqtype] = fn;
+
+ return 0;
+}
+
+static inline void
+__add_to_request_list(struct octeon_instr_queue *iq,
+ int idx, void *buf, int reqtype)
+{
+ iq->request_list[idx].buf = buf;
+ iq->request_list[idx].reqtype = reqtype;
+}
+
+int
+lio_process_iq_request_list(struct octeon_device *oct,
+ struct octeon_instr_queue *iq)
+{
+ int reqtype;
+ void *buf;
+ u32 old = iq->flush_index;
+ u32 inst_count = 0;
+ unsigned pkts_compl = 0, bytes_compl = 0;
+ struct octeon_soft_command *sc;
+ struct octeon_instr_irh *irh;
+
+ while (old != iq->octeon_read_index) {
+ reqtype = iq->request_list[old].reqtype;
+ buf = iq->request_list[old].buf;
+
+ if (reqtype == REQTYPE_NONE)
+ goto skip_this;
+
+ octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
+ &bytes_compl);
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ case REQTYPE_RESP_NET_SG:
+ reqtype_free_fn[oct->octeon_id][reqtype](buf);
+ break;
+ case REQTYPE_RESP_NET:
+ case REQTYPE_SOFT_COMMAND:
+ sc = buf;
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ if (irh->rflag) {
+ /* We're expecting a response from Octeon.
+ * It's up to lio_process_ordered_list() to
+ * process sc. Add sc to the ordered soft
+ * command response list because we expect
+ * a response from Octeon.
+ */
+ spin_lock_bh(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].lock);
+ atomic_inc(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].
+ pending_req_count);
+ list_add_tail(&sc->node, &oct->response_list
+ [OCTEON_ORDERED_SC_LIST].head);
+ spin_unlock_bh(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].lock);
+ } else {
+ if (sc->callback) {
+ sc->callback(oct, OCTEON_REQUEST_DONE,
+ sc->callback_arg);
+ }
+ }
+ break;
+ default:
+ dev_err(&oct->pci_dev->dev,
+ "%s Unknown reqtype: %d buf: %p at idx %d\n",
+ __func__, reqtype, buf, old);
+ }
+
+ iq->request_list[old].buf = NULL;
+ iq->request_list[old].reqtype = 0;
+
+ skip_this:
+ inst_count++;
+ INCR_INDEX_BY1(old, iq->max_count);
+ }
+ if (bytes_compl)
+ octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
+ bytes_compl);
+ iq->flush_index = old;
+
+ return inst_count;
+}
+
+static inline void
+update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq)
+{
+ u32 inst_processed = 0;
+
+ /* Calculate how many commands Octeon has read and move the read index
+ * accordingly.
+ */
+ iq->octeon_read_index = oct->fn_list.update_iq_read_idx(oct, iq);
+
+ /* Move the NORESPONSE requests to the per-device completion list. */
+ if (iq->flush_index != iq->octeon_read_index)
+ inst_processed = lio_process_iq_request_list(oct, iq);
+
+ if (inst_processed) {
+ atomic_sub(inst_processed, &iq->instr_pending);
+ iq->stats.instr_processed += inst_processed;
+ }
+}
+
+static void
+octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
+ u32 pending_thresh)
+{
+ if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
+ spin_lock_bh(&iq->lock);
+ update_iq_indices(oct, iq);
+ spin_unlock_bh(&iq->lock);
+ }
+}
+
+static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no)
+{
+ struct octeon_instr_queue *iq;
+ u64 next_time;
+
+ if (!oct)
+ return;
+ iq = oct->instr_queue[iq_no];
+ if (!iq)
+ return;
+
+ /* If jiffies - last_db_time < db_timeout do nothing */
+ next_time = iq->last_db_time + iq->db_timeout;
+ if (!time_after(jiffies, (unsigned long)next_time))
+ return;
+ iq->last_db_time = jiffies;
+
+ /* Get the lock and prevent tasklets. This routine gets called from
+ * the poll thread. Instructions can now be posted in tasklet context
+ */
+ spin_lock_bh(&iq->lock);
+ if (iq->fill_cnt != 0)
+ ring_doorbell(oct, iq);
+
+ spin_unlock_bh(&iq->lock);
+
+ /* Flush the instruction queue */
+ if (iq->do_auto_flush)
+ octeon_flush_iq(oct, iq, 1);
+}
+
+/* Called by the Poll thread at regular intervals to check the instruction
+ * queue for commands to be posted and for commands that were fetched by Octeon.
+ */
+static void check_db_timeout(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
+ unsigned long iq_no = wk->ctxul;
+ struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
+
+ __check_db_timeout(oct, iq_no);
+ queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
+}
+
+int
+octeon_send_command(struct octeon_device *oct, u32 iq_no,
+ u32 force_db, void *cmd, void *buf,
+ u32 datasize, u32 reqtype)
+{
+ struct iq_post_status st;
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+
+ spin_lock_bh(&iq->lock);
+
+ st = __post_command2(oct, iq, force_db, cmd);
+
+ if (st.status != IQ_SEND_FAILED) {
+ octeon_report_sent_bytes_to_bql(buf, reqtype);
+ __add_to_request_list(iq, st.index, buf, reqtype);
+ INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
+ INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
+
+ if (iq->fill_cnt >= iq->fill_threshold || force_db)
+ ring_doorbell(oct, iq);
+ } else {
+ INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
+ }
+
+ spin_unlock_bh(&iq->lock);
+
+ if (iq->do_auto_flush)
+ octeon_flush_iq(oct, iq, 2);
+
+ return st.status;
+}
+
+void
+octeon_prepare_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc,
+ u8 opcode,
+ u8 subcode,
+ u32 irh_ossp,
+ u64 ossp0,
+ u64 ossp1)
+{
+ struct octeon_config *oct_cfg;
+ struct octeon_instr_ih *ih;
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_rdp *rdp;
+
+ BUG_ON(opcode > 15);
+ BUG_ON(subcode > 127);
+
+ oct_cfg = octeon_get_conf(oct);
+
+ ih = (struct octeon_instr_ih *)&sc->cmd.ih;
+ ih->tagtype = ATOMIC_TAG;
+ ih->tag = LIO_CONTROL;
+ ih->raw = 1;
+ ih->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
+
+ if (sc->datasize) {
+ ih->dlengsz = sc->datasize;
+ ih->rs = 1;
+ }
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ irh->opcode = opcode;
+ irh->subcode = subcode;
+
+ /* opcode/subcode specific parameters (ossp) */
+ irh->ossp = irh_ossp;
+ sc->cmd.ossp[0] = ossp0;
+ sc->cmd.ossp[1] = ossp1;
+
+ if (sc->rdatasize) {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+ rdp->pcie_port = oct->pcie_port;
+ rdp->rlen = sc->rdatasize;
+
+ irh->rflag = 1;
+ irh->len = 4;
+ ih->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
+ } else {
+ irh->rflag = 0;
+ irh->len = 2;
+ ih->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
+ }
+
+ while (!(oct->io_qmask.iq & (1 << sc->iq_no)))
+ sc->iq_no++;
+}
+
+int octeon_send_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc)
+{
+ struct octeon_instr_ih *ih;
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_rdp *rdp;
+
+ ih = (struct octeon_instr_ih *)&sc->cmd.ih;
+ if (ih->dlengsz) {
+ BUG_ON(!sc->dmadptr);
+ sc->cmd.dptr = sc->dmadptr;
+ }
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ if (irh->rflag) {
+ BUG_ON(!sc->dmarptr);
+ BUG_ON(!sc->status_word);
+ *sc->status_word = COMPLETION_WORD_INIT;
+
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+
+ sc->cmd.rptr = sc->dmarptr;
+ }
+
+ if (sc->wait_time)
+ sc->timeout = jiffies + sc->wait_time;
+
+ return octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
+ (u32)ih->dlengsz, REQTYPE_SOFT_COMMAND);
+}
+
+int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
+{
+ int i;
+ u64 dma_addr;
+ struct octeon_soft_command *sc;
+
+ INIT_LIST_HEAD(&oct->sc_buf_pool.head);
+ spin_lock_init(&oct->sc_buf_pool.lock);
+ atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
+
+ for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
+ sc = (struct octeon_soft_command *)
+ lio_dma_alloc(oct,
+ SOFT_COMMAND_BUFFER_SIZE,
+ (dma_addr_t *)&dma_addr);
+ if (!sc)
+ return 1;
+
+ sc->dma_addr = dma_addr;
+ sc->size = SOFT_COMMAND_BUFFER_SIZE;
+
+ list_add_tail(&sc->node, &oct->sc_buf_pool.head);
+ }
+
+ return 0;
+}
+
+int octeon_free_sc_buffer_pool(struct octeon_device *oct)
+{
+ struct list_head *tmp, *tmp2;
+ struct octeon_soft_command *sc;
+
+ spin_lock(&oct->sc_buf_pool.lock);
+
+ list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
+ list_del(tmp);
+
+ sc = (struct octeon_soft_command *)tmp;
+
+ lio_dma_free(oct, sc->size, sc, sc->dma_addr);
+ }
+
+ INIT_LIST_HEAD(&oct->sc_buf_pool.head);
+
+ spin_unlock(&oct->sc_buf_pool.lock);
+
+ return 0;
+}
+
+struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
+ u32 datasize,
+ u32 rdatasize,
+ u32 ctxsize)
+{
+ u64 dma_addr;
+ u32 size;
+ u32 offset = sizeof(struct octeon_soft_command);
+ struct octeon_soft_command *sc = NULL;
+ struct list_head *tmp;
+
+ BUG_ON((offset + datasize + rdatasize + ctxsize) >
+ SOFT_COMMAND_BUFFER_SIZE);
+
+ spin_lock(&oct->sc_buf_pool.lock);
+
+ if (list_empty(&oct->sc_buf_pool.head)) {
+ spin_unlock(&oct->sc_buf_pool.lock);
+ return NULL;
+ }
+
+ list_for_each(tmp, &oct->sc_buf_pool.head)
+ break;
+
+ list_del(tmp);
+
+ atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
+
+ spin_unlock(&oct->sc_buf_pool.lock);
+
+ sc = (struct octeon_soft_command *)tmp;
+
+ dma_addr = sc->dma_addr;
+ size = sc->size;
+
+ memset(sc, 0, sc->size);
+
+ sc->dma_addr = dma_addr;
+ sc->size = size;
+
+ if (ctxsize) {
+ sc->ctxptr = (u8 *)sc + offset;
+ sc->ctxsize = ctxsize;
+ }
+
+ /* Start data at 128 byte boundary */
+ offset = (offset + ctxsize + 127) & 0xffffff80;
+
+ if (datasize) {
+ sc->virtdptr = (u8 *)sc + offset;
+ sc->dmadptr = dma_addr + offset;
+ sc->datasize = datasize;
+ }
+
+ /* Start rdata at 128 byte boundary */
+ offset = (offset + datasize + 127) & 0xffffff80;
+
+ if (rdatasize) {
+ BUG_ON(rdatasize < 16);
+ sc->virtrptr = (u8 *)sc + offset;
+ sc->dmarptr = dma_addr + offset;
+ sc->rdatasize = rdatasize;
+ sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
+ }
+
+ return sc;
+}
+
+void octeon_free_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc)
+{
+ spin_lock(&oct->sc_buf_pool.lock);
+
+ list_add_tail(&sc->node, &oct->sc_buf_pool.head);
+
+ atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
+
+ spin_unlock(&oct->sc_buf_pool.lock);
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
new file mode 100644
index 000000000..091f537a9
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -0,0 +1,178 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+
+static void oct_poll_req_completion(struct work_struct *work);
+
+int octeon_setup_response_list(struct octeon_device *oct)
+{
+ int i, ret = 0;
+ struct cavium_wq *cwq;
+
+ for (i = 0; i < MAX_RESPONSE_LISTS; i++) {
+ INIT_LIST_HEAD(&oct->response_list[i].head);
+ spin_lock_init(&oct->response_list[i].lock);
+ atomic_set(&oct->response_list[i].pending_req_count, 0);
+ }
+
+ oct->dma_comp_wq.wq = create_workqueue("dma-comp");
+ if (!oct->dma_comp_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
+ return -ENOMEM;
+ }
+
+ cwq = &oct->dma_comp_wq;
+ INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
+ cwq->wk.ctxptr = oct;
+ queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
+
+ return ret;
+}
+
+void octeon_delete_response_list(struct octeon_device *oct)
+{
+ cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work);
+ flush_workqueue(oct->dma_comp_wq.wq);
+ destroy_workqueue(oct->dma_comp_wq.wq);
+}
+
+int lio_process_ordered_list(struct octeon_device *octeon_dev,
+ u32 force_quit)
+{
+ struct octeon_response_list *ordered_sc_list;
+ struct octeon_soft_command *sc;
+ int request_complete = 0;
+ int resp_to_process = MAX_ORD_REQS_TO_PROCESS;
+ u32 status;
+ u64 status64;
+ struct octeon_instr_rdp *rdp;
+
+ ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
+
+ do {
+ spin_lock_bh(&ordered_sc_list->lock);
+
+ if (ordered_sc_list->head.next == &ordered_sc_list->head) {
+ /* ordered_sc_list is empty; there is
+ * nothing to process
+ */
+ spin_unlock_bh
+ (&ordered_sc_list->lock);
+ return 1;
+ }
+
+ sc = (struct octeon_soft_command *)ordered_sc_list->
+ head.next;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+
+ status = OCTEON_REQUEST_PENDING;
+
+ /* check if octeon has finished DMA'ing a response
+ * to where rptr is pointing to
+ */
+ dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev,
+ sc->cmd.rptr, rdp->rlen,
+ DMA_FROM_DEVICE);
+ status64 = *sc->status_word;
+
+ if (status64 != COMPLETION_WORD_INIT) {
+ if ((status64 & 0xff) != 0xff) {
+ octeon_swap_8B_data(&status64, 1);
+ if (((status64 & 0xff) != 0xff)) {
+ status = (u32)(status64 &
+ 0xffffffffULL);
+ }
+ }
+ } else if (force_quit || (sc->timeout &&
+ time_after(jiffies, (unsigned long)sc->timeout))) {
+ status = OCTEON_REQUEST_TIMEOUT;
+ }
+
+ if (status != OCTEON_REQUEST_PENDING) {
+ /* we have received a response or we have timed out */
+ /* remove node from linked list */
+ list_del(&sc->node);
+ atomic_dec(&octeon_dev->response_list
+ [OCTEON_ORDERED_SC_LIST].
+ pending_req_count);
+ spin_unlock_bh
+ (&ordered_sc_list->lock);
+
+ if (sc->callback)
+ sc->callback(octeon_dev, status,
+ sc->callback_arg);
+
+ request_complete++;
+
+ } else {
+ /* no response yet */
+ request_complete = 0;
+ spin_unlock_bh
+ (&ordered_sc_list->lock);
+ }
+
+ /* If we hit the Max Ordered requests to process every loop,
+ * we quit
+ * and let this function be invoked the next time the poll
+ * thread runs
+ * to process the remaining requests. This function can take up
+ * the entire CPU if there is no upper limit to the requests
+ * processed.
+ */
+ if (request_complete >= resp_to_process)
+ break;
+ } while (request_complete);
+
+ return 0;
+}
+
+static void oct_poll_req_completion(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
+ struct cavium_wq *cwq = &oct->dma_comp_wq;
+
+ lio_process_ordered_list(oct, 0);
+
+ queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h
new file mode 100644
index 000000000..7a48752dc
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h
@@ -0,0 +1,140 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file response_manager.h
+ * \brief Host Driver: Response queues for host instructions.
+ */
+
+#ifndef __RESPONSE_MANAGER_H__
+#define __RESPONSE_MANAGER_H__
+
+/** Maximum ordered requests to process in every invocation of
+ * lio_process_ordered_list(). The function will continue to process requests
+ * as long as it can find one that has finished processing. If it keeps
+ * finding requests that have completed, the function can run for ever. The
+ * value defined here sets an upper limit on the number of requests it can
+ * process before it returns control to the poll thread.
+ */
+#define MAX_ORD_REQS_TO_PROCESS 4096
+
+/** Head of a response list. There are several response lists in the
+ * system. One for each response order- Unordered, ordered
+ * and 1 for noresponse entries on each instruction queue.
+ */
+struct octeon_response_list {
+ /** List structure to add delete pending entries to */
+ struct list_head head;
+
+ /** A lock for this response list */
+ spinlock_t lock;
+
+ atomic_t pending_req_count;
+};
+
+/** The type of response list.
+ */
+enum {
+ OCTEON_ORDERED_LIST = 0,
+ OCTEON_UNORDERED_NONBLOCKING_LIST = 1,
+ OCTEON_UNORDERED_BLOCKING_LIST = 2,
+ OCTEON_ORDERED_SC_LIST = 3
+};
+
+/** Response Order values for a Octeon Request. */
+enum {
+ OCTEON_RESP_ORDERED = 0,
+ OCTEON_RESP_UNORDERED = 1,
+ OCTEON_RESP_NORESPONSE = 2
+};
+
+/** Error codes used in Octeon Host-Core communication.
+ *
+ * 31 16 15 0
+ * ---------------------------------
+ * | | |
+ * ---------------------------------
+ * Error codes are 32-bit wide. The upper 16-bits, called Major Error Number,
+ * are reserved to identify the group to which the error code belongs. The
+ * lower 16-bits, called Minor Error Number, carry the actual code.
+ *
+ * So error codes are (MAJOR NUMBER << 16)| MINOR_NUMBER.
+ */
+
+/*------------ Error codes used by host driver -----------------*/
+#define DRIVER_MAJOR_ERROR_CODE 0x0000
+
+/** A value of 0x00000000 indicates no error i.e. success */
+#define DRIVER_ERROR_NONE 0x00000000
+
+/** (Major number: 0x0000; Minor Number: 0x0001) */
+#define DRIVER_ERROR_REQ_PENDING 0x00000001
+#define DRIVER_ERROR_REQ_TIMEOUT 0x00000003
+#define DRIVER_ERROR_REQ_EINTR 0x00000004
+#define DRIVER_ERROR_REQ_ENXIO 0x00000006
+#define DRIVER_ERROR_REQ_ENOMEM 0x0000000C
+#define DRIVER_ERROR_REQ_EINVAL 0x00000016
+#define DRIVER_ERROR_REQ_FAILED 0x000000ff
+
+/** Status for a request.
+ * If a request is not queued to Octeon by the driver, the driver returns
+ * an error condition that's describe by one of the OCTEON_REQ_ERR_* value
+ * below. If the request is successfully queued, the driver will return
+ * a OCTEON_REQUEST_PENDING status. OCTEON_REQUEST_TIMEOUT and
+ * OCTEON_REQUEST_INTERRUPTED are only returned by the driver if the
+ * response for request failed to arrive before a time-out period or if
+ * the request processing * got interrupted due to a signal respectively.
+ */
+enum {
+ OCTEON_REQUEST_DONE = (DRIVER_ERROR_NONE),
+ OCTEON_REQUEST_PENDING = (DRIVER_ERROR_REQ_PENDING),
+ OCTEON_REQUEST_TIMEOUT = (DRIVER_ERROR_REQ_TIMEOUT),
+ OCTEON_REQUEST_INTERRUPTED = (DRIVER_ERROR_REQ_EINTR),
+ OCTEON_REQUEST_NO_DEVICE = (0x00000021),
+ OCTEON_REQUEST_NOT_RUNNING,
+ OCTEON_REQUEST_INVALID_IQ,
+ OCTEON_REQUEST_INVALID_BUFCNT,
+ OCTEON_REQUEST_INVALID_RESP_ORDER,
+ OCTEON_REQUEST_NO_MEMORY,
+ OCTEON_REQUEST_INVALID_BUFSIZE,
+ OCTEON_REQUEST_NO_PENDING_ENTRY,
+ OCTEON_REQUEST_NO_IQ_SPACE = (0x7FFFFFFF)
+
+};
+
+/** Initialize the response lists. The number of response lists to create is
+ * given by count.
+ * @param octeon_dev - the octeon device structure.
+ */
+int octeon_setup_response_list(struct octeon_device *octeon_dev);
+
+void octeon_delete_response_list(struct octeon_device *octeon_dev);
+
+/** Check the status of first entry in the ordered list. If the instruction at
+ * that entry finished processing or has timed-out, the entry is cleaned.
+ * @param octeon_dev - the octeon device structure.
+ * @param force_quit - the request is forced to timeout if this is 1
+ * @return 1 if the ordered list is empty, 0 otherwise.
+ */
+int lio_process_ordered_list(struct octeon_device *octeon_dev,
+ u32 force_quit);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/thunder/Makefile b/drivers/net/ethernet/cavium/thunder/Makefile
new file mode 100644
index 000000000..5c4615cca
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for Cavium's Thunder ethernet device
+#
+
+obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o
+obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o
+obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o
+
+nicpf-y := nic_main.o
+nicvf-y := nicvf_main.o nicvf_queues.o
+nicvf-y += nicvf_ethtool.o
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
new file mode 100644
index 000000000..8aee25090
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NIC_H
+#define NIC_H
+
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include "thunder_bgx.h"
+
+/* PCI device IDs */
+#define PCI_DEVICE_ID_THUNDER_NIC_PF 0xA01E
+#define PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF 0x0011
+#define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034
+#define PCI_DEVICE_ID_THUNDER_BGX 0xA026
+
+/* PCI BAR nos */
+#define PCI_CFG_REG_BAR_NUM 0
+#define PCI_MSIX_REG_BAR_NUM 4
+
+/* NIC SRIOV VF count */
+#define MAX_NUM_VFS_SUPPORTED 128
+#define DEFAULT_NUM_VF_ENABLED 8
+
+#define NIC_TNS_BYPASS_MODE 0
+#define NIC_TNS_MODE 1
+
+/* NIC priv flags */
+#define NIC_SRIOV_ENABLED BIT(0)
+
+/* Min/Max packet size */
+#define NIC_HW_MIN_FRS 64
+#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */
+
+/* Max pkinds */
+#define NIC_MAX_PKIND 16
+
+/* Rx Channels */
+/* Receive channel configuration in TNS bypass mode
+ * Below is configuration in TNS bypass mode
+ * BGX0-LMAC0-CHAN0 - VNIC CHAN0
+ * BGX0-LMAC1-CHAN0 - VNIC CHAN16
+ * ...
+ * BGX1-LMAC0-CHAN0 - VNIC CHAN128
+ * ...
+ * BGX1-LMAC3-CHAN0 - VNIC CHAN174
+ */
+#define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */
+#define NIC_CHANS_PER_INF 128
+#define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF)
+#define NIC_CPI_COUNT 2048 /* No of channel parse indices */
+
+/* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */
+#define NIC_MAX_BGX MAX_BGX_PER_CN88XX
+#define NIC_CPI_PER_BGX (NIC_CPI_COUNT / NIC_MAX_BGX)
+#define NIC_MAX_CPI_PER_LMAC 64 /* Max when CPI_ALG is IP diffserv */
+#define NIC_RSSI_PER_BGX (NIC_RSSI_COUNT / NIC_MAX_BGX)
+
+/* Tx scheduling */
+#define NIC_MAX_TL4 1024
+#define NIC_MAX_TL4_SHAPERS 256 /* 1 shaper for 4 TL4s */
+#define NIC_MAX_TL3 256
+#define NIC_MAX_TL3_SHAPERS 64 /* 1 shaper for 4 TL3s */
+#define NIC_MAX_TL2 64
+#define NIC_MAX_TL2_SHAPERS 2 /* 1 shaper for 32 TL2s */
+#define NIC_MAX_TL1 2
+
+/* TNS bypass mode */
+#define NIC_TL2_PER_BGX 32
+#define NIC_TL4_PER_BGX (NIC_MAX_TL4 / NIC_MAX_BGX)
+#define NIC_TL4_PER_LMAC (NIC_MAX_TL4 / NIC_CHANS_PER_INF)
+
+/* NIC VF Interrupts */
+#define NICVF_INTR_CQ 0
+#define NICVF_INTR_SQ 1
+#define NICVF_INTR_RBDR 2
+#define NICVF_INTR_PKT_DROP 3
+#define NICVF_INTR_TCP_TIMER 4
+#define NICVF_INTR_MBOX 5
+#define NICVF_INTR_QS_ERR 6
+
+#define NICVF_INTR_CQ_SHIFT 0
+#define NICVF_INTR_SQ_SHIFT 8
+#define NICVF_INTR_RBDR_SHIFT 16
+#define NICVF_INTR_PKT_DROP_SHIFT 20
+#define NICVF_INTR_TCP_TIMER_SHIFT 21
+#define NICVF_INTR_MBOX_SHIFT 22
+#define NICVF_INTR_QS_ERR_SHIFT 23
+
+#define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
+#define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
+#define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
+#define NICVF_INTR_PKT_DROP_MASK BIT(NICVF_INTR_PKT_DROP_SHIFT)
+#define NICVF_INTR_TCP_TIMER_MASK BIT(NICVF_INTR_TCP_TIMER_SHIFT)
+#define NICVF_INTR_MBOX_MASK BIT(NICVF_INTR_MBOX_SHIFT)
+#define NICVF_INTR_QS_ERR_MASK BIT(NICVF_INTR_QS_ERR_SHIFT)
+
+/* MSI-X interrupts */
+#define NIC_PF_MSIX_VECTORS 10
+#define NIC_VF_MSIX_VECTORS 20
+
+#define NIC_PF_INTR_ID_ECC0_SBE 0
+#define NIC_PF_INTR_ID_ECC0_DBE 1
+#define NIC_PF_INTR_ID_ECC1_SBE 2
+#define NIC_PF_INTR_ID_ECC1_DBE 3
+#define NIC_PF_INTR_ID_ECC2_SBE 4
+#define NIC_PF_INTR_ID_ECC2_DBE 5
+#define NIC_PF_INTR_ID_ECC3_SBE 6
+#define NIC_PF_INTR_ID_ECC3_DBE 7
+#define NIC_PF_INTR_ID_MBOX0 8
+#define NIC_PF_INTR_ID_MBOX1 9
+
+/* Global timer for CQ timer thresh interrupts
+ * Calculated for SCLK of 700Mhz
+ * value written should be a 1/16th of what is expected
+ *
+ * 1 tick per 0.05usec = value of 2.2
+ * This 10% would be covered in CQ timer thresh value
+ */
+#define NICPF_CLK_PER_INT_TICK 2
+
+/* Time to wait before we decide that a SQ is stuck.
+ *
+ * Since both pkt rx and tx notifications are done with same CQ,
+ * when packets are being received at very high rate (eg: L2 forwarding)
+ * then freeing transmitted skbs will be delayed and watchdog
+ * will kick in, resetting interface. Hence keeping this value high.
+ */
+#define NICVF_TX_TIMEOUT (50 * HZ)
+
+struct nicvf_cq_poll {
+ u8 cq_idx; /* Completion queue index */
+ struct napi_struct napi;
+};
+
+#define NIC_RSSI_COUNT 4096 /* Total no of RSS indices */
+#define NIC_MAX_RSS_HASH_BITS 8
+#define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
+#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
+
+struct nicvf_rss_info {
+ bool enable;
+#define RSS_L2_EXTENDED_HASH_ENA BIT(0)
+#define RSS_IP_HASH_ENA BIT(1)
+#define RSS_TCP_HASH_ENA BIT(2)
+#define RSS_TCP_SYN_DIS BIT(3)
+#define RSS_UDP_HASH_ENA BIT(4)
+#define RSS_L4_EXTENDED_HASH_ENA BIT(5)
+#define RSS_ROCE_ENA BIT(6)
+#define RSS_L3_BI_DIRECTION_ENA BIT(7)
+#define RSS_L4_BI_DIRECTION_ENA BIT(8)
+ u64 cfg;
+ u8 hash_bits;
+ u16 rss_size;
+ u8 ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+ u64 key[RSS_HASH_KEY_SIZE];
+} ____cacheline_aligned_in_smp;
+
+enum rx_stats_reg_offset {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_RED = 0x4,
+ RX_RED_OCTS = 0x5,
+ RX_ORUN = 0x6,
+ RX_ORUN_OCTS = 0x7,
+ RX_FCS = 0x8,
+ RX_L2ERR = 0x9,
+ RX_DRP_BCAST = 0xa,
+ RX_DRP_MCAST = 0xb,
+ RX_DRP_L3BCAST = 0xc,
+ RX_DRP_L3MCAST = 0xd,
+ RX_STATS_ENUM_LAST,
+};
+
+enum tx_stats_reg_offset {
+ TX_OCTS = 0x0,
+ TX_UCAST = 0x1,
+ TX_BCAST = 0x2,
+ TX_MCAST = 0x3,
+ TX_DROP = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+struct nicvf_hw_stats {
+ u64 rx_bytes_ok;
+ u64 rx_ucast_frames_ok;
+ u64 rx_bcast_frames_ok;
+ u64 rx_mcast_frames_ok;
+ u64 rx_fcs_errors;
+ u64 rx_l2_errors;
+ u64 rx_drop_red;
+ u64 rx_drop_red_bytes;
+ u64 rx_drop_overrun;
+ u64 rx_drop_overrun_bytes;
+ u64 rx_drop_bcast;
+ u64 rx_drop_mcast;
+ u64 rx_drop_l3_bcast;
+ u64 rx_drop_l3_mcast;
+ u64 tx_bytes_ok;
+ u64 tx_ucast_frames_ok;
+ u64 tx_bcast_frames_ok;
+ u64 tx_mcast_frames_ok;
+ u64 tx_drops;
+};
+
+struct nicvf_drv_stats {
+ /* Rx */
+ u64 rx_frames_ok;
+ u64 rx_frames_64;
+ u64 rx_frames_127;
+ u64 rx_frames_255;
+ u64 rx_frames_511;
+ u64 rx_frames_1023;
+ u64 rx_frames_1518;
+ u64 rx_frames_jumbo;
+ u64 rx_drops;
+ /* Tx */
+ u64 tx_frames_ok;
+ u64 tx_drops;
+ u64 tx_tso;
+ u64 txq_stop;
+ u64 txq_wake;
+};
+
+struct nicvf {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ u8 vf_id;
+ u8 node;
+ u8 tns_mode;
+ u16 mtu;
+ struct queue_set *qs;
+ void __iomem *reg_base;
+ bool link_up;
+ u8 duplex;
+ u32 speed;
+ struct page *rb_page;
+ u32 rb_page_offset;
+ bool rb_alloc_fail;
+ bool rb_work_scheduled;
+ struct delayed_work rbdr_work;
+ struct tasklet_struct rbdr_task;
+ struct tasklet_struct qs_err_task;
+ struct tasklet_struct cq_task;
+ struct nicvf_cq_poll *napi[8];
+ struct nicvf_rss_info rss_info;
+ u8 cpi_alg;
+ /* Interrupt coalescing settings */
+ u32 cq_coalesce_usecs;
+
+ u32 msg_enable;
+ struct nicvf_hw_stats stats;
+ struct nicvf_drv_stats drv_stats;
+ struct bgx_stats bgx_stats;
+ struct work_struct reset_task;
+
+ /* MSI-X */
+ bool msix_enabled;
+ u8 num_vec;
+ struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
+ char irq_name[NIC_VF_MSIX_VECTORS][20];
+ bool irq_allocated[NIC_VF_MSIX_VECTORS];
+
+ bool pf_ready_to_rcv_msg;
+ bool pf_acked;
+ bool pf_nacked;
+ bool bgx_stats_acked;
+ bool set_mac_pending;
+} ____cacheline_aligned_in_smp;
+
+/* PF <--> VF Mailbox communication
+ * Eight 64bit registers are shared between PF and VF.
+ * Separate set for each VF.
+ * Writing '1' into last register mbx7 means end of message.
+ */
+
+/* PF <--> VF mailbox communication */
+#define NIC_PF_VF_MAILBOX_SIZE 2
+#define NIC_MBOX_MSG_TIMEOUT 2000 /* ms */
+
+/* Mailbox message types */
+#define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */
+#define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */
+#define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */
+#define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */
+#define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */
+#define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */
+#define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */
+#define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */
+#define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */
+#define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */
+#define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */
+#define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */
+#define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */
+#define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */
+#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */
+#define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */
+#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */
+#define NIC_MBOX_MSG_CFG_DONE 0x12 /* VF configuration done */
+#define NIC_MBOX_MSG_SHUTDOWN 0x13 /* VF is being shutdown */
+
+struct nic_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 tns_mode;
+ u8 node_id;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Qset configuration */
+struct qs_cfg_msg {
+ u8 msg;
+ u8 num;
+ u64 cfg;
+};
+
+/* Receive queue configuration */
+struct rq_cfg_msg {
+ u8 msg;
+ u8 qs_num;
+ u8 rq_num;
+ u64 cfg;
+};
+
+/* Send queue configuration */
+struct sq_cfg_msg {
+ u8 msg;
+ u8 qs_num;
+ u8 sq_num;
+ u64 cfg;
+};
+
+/* Set VF's MAC address */
+struct set_mac_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Set Maximum frame size */
+struct set_frs_msg {
+ u8 msg;
+ u8 vf_id;
+ u16 max_frs;
+};
+
+/* Set CPI algorithm type */
+struct cpi_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 rq_cnt;
+ u8 cpi_alg;
+};
+
+/* Get RSS table size */
+struct rss_sz_msg {
+ u8 msg;
+ u8 vf_id;
+ u16 ind_tbl_size;
+};
+
+/* Set RSS configuration */
+struct rss_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 hash_bits;
+ u8 tbl_len;
+ u8 tbl_offset;
+#define RSS_IND_TBL_LEN_PER_MBX_MSG 8
+ u8 ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
+};
+
+struct bgx_stats_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 rx;
+ u8 idx;
+ u64 stats;
+};
+
+/* Physical interface link status */
+struct bgx_link_status {
+ u8 msg;
+ u8 link_up;
+ u8 duplex;
+ u32 speed;
+};
+
+/* 128 bit shared memory between PF and each VF */
+union nic_mbx {
+ struct { u8 msg; } msg;
+ struct nic_cfg_msg nic_cfg;
+ struct qs_cfg_msg qs;
+ struct rq_cfg_msg rq;
+ struct sq_cfg_msg sq;
+ struct set_mac_msg mac;
+ struct set_frs_msg frs;
+ struct cpi_cfg_msg cpi_cfg;
+ struct rss_sz_msg rss_size;
+ struct rss_cfg_msg rss_cfg;
+ struct bgx_stats_msg bgx_stats;
+ struct bgx_link_status link_status;
+};
+
+#define NIC_NODE_ID_MASK 0x03
+#define NIC_NODE_ID_SHIFT 44
+
+static inline int nic_get_node_id(struct pci_dev *pdev)
+{
+ u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM);
+ return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
+}
+
+int nicvf_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues);
+int nicvf_open(struct net_device *netdev);
+int nicvf_stop(struct net_device *netdev);
+int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
+void nicvf_config_rss(struct nicvf *nic);
+void nicvf_set_rss_key(struct nicvf *nic);
+void nicvf_set_ethtool_ops(struct net_device *netdev);
+void nicvf_update_stats(struct nicvf *nic);
+void nicvf_update_lmac_stats(struct nicvf *nic);
+
+#endif /* NIC_H */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
new file mode 100644
index 000000000..6e0c03169
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -0,0 +1,932 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/of.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-nic"
+#define DRV_VERSION "1.0"
+
+struct nicpf {
+ struct pci_dev *pdev;
+ u8 rev_id;
+ u8 node;
+ unsigned int flags;
+ u8 num_vf_en; /* No of VF enabled */
+ bool vf_enabled[MAX_NUM_VFS_SUPPORTED];
+ void __iomem *reg_base; /* Register start address */
+ struct pkind_cfg pkind;
+#define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
+#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
+#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
+ u8 vf_lmac_map[MAX_LMAC];
+ struct delayed_work dwork;
+ struct workqueue_struct *check_link;
+ u8 link[MAX_LMAC];
+ u8 duplex[MAX_LMAC];
+ u32 speed[MAX_LMAC];
+ u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
+ u16 rss_ind_tbl_size;
+ bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
+
+ /* MSI-X */
+ bool msix_enabled;
+ u8 num_vec;
+ struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS];
+ bool irq_allocated[NIC_PF_MSIX_VECTORS];
+};
+
+/* Supported devices */
+static const struct pci_device_id nic_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Sunil Goutham");
+MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, nic_id_table);
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation. All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver. The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val)
+{
+ writeq_relaxed(val, nic->reg_base + offset);
+}
+
+static u64 nic_reg_read(struct nicpf *nic, u64 offset)
+{
+ return readq_relaxed(nic->reg_base + offset);
+}
+
+/* PF -> VF mailbox communication APIs */
+static void nic_enable_mbx_intr(struct nicpf *nic)
+{
+ /* Enable mailbox interrupt for all 128 VFs */
+ nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0ull);
+ nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), ~0ull);
+}
+
+static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
+{
+ nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf));
+}
+
+static u64 nic_get_mbx_addr(int vf)
+{
+ return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT);
+}
+
+/* Send a mailbox message to VF
+ * @vf: vf to which this message to be sent
+ * @mbx: Message to be sent
+ */
+static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
+{
+ void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf);
+ u64 *msg = (u64 *)mbx;
+
+ /* In first revision HW, mbox interrupt is triggerred
+ * when PF writes to MBOX(1), in next revisions when
+ * PF writes to MBOX(0)
+ */
+ if (nic->rev_id == 0) {
+ /* see the comment for nic_reg_write()/nic_reg_read()
+ * functions above
+ */
+ writeq_relaxed(msg[0], mbx_addr);
+ writeq_relaxed(msg[1], mbx_addr + 8);
+ } else {
+ writeq_relaxed(msg[1], mbx_addr + 8);
+ writeq_relaxed(msg[0], mbx_addr);
+ }
+}
+
+/* Responds to VF's READY message with VF's
+ * ID, node, MAC address e.t.c
+ * @vf: VF which sent READY message
+ */
+static void nic_mbx_send_ready(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+ int bgx_idx, lmac;
+ const char *mac;
+
+ mbx.nic_cfg.msg = NIC_MBOX_MSG_READY;
+ mbx.nic_cfg.vf_id = vf;
+
+ mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
+ if (mac)
+ ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
+
+ mbx.nic_cfg.node_id = nic->node;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* ACKs VF's mailbox message
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_ack(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_ACK;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* NACKs VF's mailbox message that PF is not able to
+ * complete the action
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_nack(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_NACK;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* Flush all in flight receive packets to memory and
+ * bring down an active RQ
+ */
+static int nic_rcv_queue_sw_sync(struct nicpf *nic)
+{
+ u16 timeout = ~0x00;
+
+ nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01);
+ /* Wait till sync cycle is finished */
+ while (timeout) {
+ if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1)
+ break;
+ timeout--;
+ }
+ nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00);
+ if (!timeout) {
+ dev_err(&nic->pdev->dev, "Receive queue software sync failed");
+ return 1;
+ }
+ return 0;
+}
+
+/* Get BGX Rx/Tx stats and respond to VF's request */
+static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
+{
+ int bgx_idx, lmac;
+ union nic_mbx mbx = {};
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
+
+ mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
+ mbx.bgx_stats.vf_id = bgx->vf_id;
+ mbx.bgx_stats.rx = bgx->rx;
+ mbx.bgx_stats.idx = bgx->idx;
+ if (bgx->rx)
+ mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx,
+ lmac, bgx->idx);
+ else
+ mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx,
+ lmac, bgx->idx);
+ nic_send_msg_to_vf(nic, bgx->vf_id, &mbx);
+}
+
+/* Update hardware min/max frame size */
+static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
+{
+ if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) {
+ dev_err(&nic->pdev->dev,
+ "Invalid MTU setting from VF%d rejected, should be between %d and %d\n",
+ vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS);
+ return 1;
+ }
+ new_frs += ETH_HLEN;
+ if (new_frs <= nic->pkind.maxlen)
+ return 0;
+
+ nic->pkind.maxlen = new_frs;
+ nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind);
+ return 0;
+}
+
+/* Set minimum transmit packet size */
+static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
+{
+ int lmac;
+ u64 lmac_cfg;
+
+ /* Max value that can be set is 60 */
+ if (size > 60)
+ size = 60;
+
+ for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
+ lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
+ lmac_cfg &= ~(0xF << 2);
+ lmac_cfg |= ((size / 4) << 2);
+ nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg);
+ }
+}
+
+/* Function to check number of LMACs present and set VF::LMAC mapping.
+ * Mapping will be used while initializing channels.
+ */
+static void nic_set_lmac_vf_mapping(struct nicpf *nic)
+{
+ unsigned bgx_map = bgx_get_map(nic->node);
+ int bgx, next_bgx_lmac = 0;
+ int lmac, lmac_cnt = 0;
+ u64 lmac_credit;
+
+ nic->num_vf_en = 0;
+
+ for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
+ if (!(bgx_map & (1 << bgx)))
+ continue;
+ lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
+ for (lmac = 0; lmac < lmac_cnt; lmac++)
+ nic->vf_lmac_map[next_bgx_lmac++] =
+ NIC_SET_VF_LMAC_MAP(bgx, lmac);
+ nic->num_vf_en += lmac_cnt;
+
+ /* Program LMAC credits */
+ lmac_credit = (1ull << 1); /* channel credit enable */
+ lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */
+ /* 48KB BGX Tx buffer size, each unit is of size 16bytes */
+ lmac_credit |= (((((48 * 1024) / lmac_cnt) -
+ NIC_HW_MAX_FRS) / 16) << 12);
+ lmac = bgx * MAX_LMAC_PER_BGX;
+ for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++)
+ nic_reg_write(nic,
+ NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
+ lmac_credit);
+ }
+}
+
+#define BGX0_BLOCK 8
+#define BGX1_BLOCK 9
+
+static void nic_init_hw(struct nicpf *nic)
+{
+ int i;
+
+ /* Reset NIC, in case the driver is repeatedly inserted and removed */
+ nic_reg_write(nic, NIC_PF_SOFT_RESET, 1);
+
+ /* Enable NIC HW block */
+ nic_reg_write(nic, NIC_PF_CFG, 0x3);
+
+ /* Enable backpressure */
+ nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
+
+ /* Disable TNS mode on both interfaces */
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
+ (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
+ (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
+ (1ULL << 63) | BGX0_BLOCK);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
+ (1ULL << 63) | BGX1_BLOCK);
+
+ /* PKIND configuration */
+ nic->pkind.minlen = 0;
+ nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
+ nic->pkind.lenerr_en = 1;
+ nic->pkind.rx_hdr = 0;
+ nic->pkind.hdr_sl = 0;
+
+ for (i = 0; i < NIC_MAX_PKIND; i++)
+ nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3),
+ *(u64 *)&nic->pkind);
+
+ nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS);
+
+ /* Timer config */
+ nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
+}
+
+/* Channel parse index configuration */
+static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
+{
+ u32 vnic, bgx, lmac, chan;
+ u32 padd, cpi_count = 0;
+ u64 cpi_base, cpi, rssi_base, rssi;
+ u8 qset, rq_idx = 0;
+
+ vnic = cfg->vf_id;
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+
+ chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
+ cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX);
+ rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX);
+
+ /* Rx channel configuration */
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
+ (1ull << 63) | (vnic << 0));
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3),
+ ((u64)cfg->cpi_alg << 62) | (cpi_base << 48));
+
+ if (cfg->cpi_alg == CPI_ALG_NONE)
+ cpi_count = 1;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */
+ cpi_count = 8;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */
+ cpi_count = 16;
+ else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */
+ cpi_count = NIC_MAX_CPI_PER_LMAC;
+
+ /* RSS Qset, Qidx mapping */
+ qset = cfg->vf_id;
+ rssi = rssi_base;
+ for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) {
+ nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
+ (qset << 3) | rq_idx);
+ rq_idx++;
+ }
+
+ rssi = 0;
+ cpi = cpi_base;
+ for (; cpi < (cpi_base + cpi_count); cpi++) {
+ /* Determine port to channel adder */
+ if (cfg->cpi_alg != CPI_ALG_DIFF)
+ padd = cpi % cpi_count;
+ else
+ padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
+
+ /* Leave RSS_SIZE as '0' to disable RSS */
+ nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+ (vnic << 24) | (padd << 16) | (rssi_base + rssi));
+
+ if ((rssi + 1) >= cfg->rq_cnt)
+ continue;
+
+ if (cfg->cpi_alg == CPI_ALG_VLAN)
+ rssi++;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN16)
+ rssi = ((cpi - cpi_base) & 0xe) >> 1;
+ else if (cfg->cpi_alg == CPI_ALG_DIFF)
+ rssi = ((cpi - cpi_base) & 0x38) >> 3;
+ }
+ nic->cpi_base[cfg->vf_id] = cpi_base;
+}
+
+/* Responsds to VF with its RSS indirection table size */
+static void nic_send_rss_size(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+ u64 *msg;
+
+ msg = (u64 *)&mbx;
+
+ mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
+ mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* Receive side scaling configuration
+ * configure:
+ * - RSS index
+ * - indir table i.e hash::RQ mapping
+ * - no of hash bits to consider
+ */
+static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
+{
+ u8 qset, idx = 0;
+ u64 cpi_cfg, cpi_base, rssi_base, rssi;
+
+ cpi_base = nic->cpi_base[cfg->vf_id];
+ cpi_cfg = nic_reg_read(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3));
+ rssi_base = (cpi_cfg & 0x0FFF) + cfg->tbl_offset;
+
+ rssi = rssi_base;
+ qset = cfg->vf_id;
+
+ for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
+ nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
+ (qset << 3) | (cfg->ind_tbl[idx] & 0x7));
+ idx++;
+ }
+
+ cpi_cfg &= ~(0xFULL << 20);
+ cpi_cfg |= (cfg->hash_bits << 20);
+ nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3), cpi_cfg);
+}
+
+/* 4 level transmit side scheduler configutation
+ * for TNS bypass mode
+ *
+ * Sample configuration for SQ0
+ * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0
+ * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0
+ * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0
+ * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0
+ * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1
+ * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1
+ * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
+ * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
+ */
+static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx)
+{
+ u32 bgx, lmac, chan;
+ u32 tl2, tl3, tl4;
+ u32 rr_quantum;
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+ /* 24 bytes for FCS, IPG and preamble */
+ rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
+
+ tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+ tl4 += sq_idx;
+ tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
+ nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
+ ((u64)vnic << NIC_QS_ID_SHIFT) |
+ ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
+ nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3),
+ ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
+
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
+ chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
+ /* Enable backpressure on the channel */
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
+
+ tl2 = tl3 >> 2;
+ nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2);
+ nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
+ /* No priorities as of now */
+ nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
+}
+
+/* Interrupt handler to handle mailbox messages from VFs */
+static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+ u64 *mbx_data;
+ u64 mbx_addr;
+ u64 reg_addr;
+ int bgx, lmac;
+ int i;
+ int ret = 0;
+
+ nic->mbx_lock[vf] = true;
+
+ mbx_addr = nic_get_mbx_addr(vf);
+ mbx_data = (u64 *)&mbx;
+
+ for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+ *mbx_data = nic_reg_read(nic, mbx_addr);
+ mbx_data++;
+ mbx_addr += sizeof(u64);
+ }
+
+ dev_dbg(&nic->pdev->dev, "%s: Mailbox msg %d from VF%d\n",
+ __func__, mbx.msg.msg, vf);
+ switch (mbx.msg.msg) {
+ case NIC_MBOX_MSG_READY:
+ nic_mbx_send_ready(nic, vf);
+ nic->link[vf] = 0;
+ nic->duplex[vf] = 0;
+ nic->speed[vf] = 0;
+ ret = 1;
+ break;
+ case NIC_MBOX_MSG_QS_CFG:
+ reg_addr = NIC_PF_QSET_0_127_CFG |
+ (mbx.qs.num << NIC_QS_ID_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.qs.cfg);
+ break;
+ case NIC_MBOX_MSG_RQ_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ break;
+ case NIC_MBOX_MSG_RQ_BP_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ break;
+ case NIC_MBOX_MSG_RQ_SW_SYNC:
+ ret = nic_rcv_queue_sw_sync(nic);
+ break;
+ case NIC_MBOX_MSG_RQ_DROP_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ break;
+ case NIC_MBOX_MSG_SQ_CFG:
+ reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG |
+ (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.sq.cfg);
+ nic_tx_channel_cfg(nic, mbx.qs.num, mbx.sq.sq_num);
+ break;
+ case NIC_MBOX_MSG_SET_MAC:
+ lmac = mbx.mac.vf_id;
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+ bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr);
+ break;
+ case NIC_MBOX_MSG_SET_MAX_FRS:
+ ret = nic_update_hw_frs(nic, mbx.frs.max_frs,
+ mbx.frs.vf_id);
+ break;
+ case NIC_MBOX_MSG_CPI_CFG:
+ nic_config_cpi(nic, &mbx.cpi_cfg);
+ break;
+ case NIC_MBOX_MSG_RSS_SIZE:
+ nic_send_rss_size(nic, vf);
+ goto unlock;
+ case NIC_MBOX_MSG_RSS_CFG:
+ case NIC_MBOX_MSG_RSS_CFG_CONT:
+ nic_config_rss(nic, &mbx.rss_cfg);
+ break;
+ case NIC_MBOX_MSG_CFG_DONE:
+ /* Last message of VF config msg sequence */
+ nic->vf_enabled[vf] = true;
+ goto unlock;
+ case NIC_MBOX_MSG_SHUTDOWN:
+ /* First msg in VF teardown sequence */
+ nic->vf_enabled[vf] = false;
+ break;
+ case NIC_MBOX_MSG_BGX_STATS:
+ nic_get_bgx_stats(nic, &mbx.bgx_stats);
+ goto unlock;
+ default:
+ dev_err(&nic->pdev->dev,
+ "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
+ break;
+ }
+
+ if (!ret)
+ nic_mbx_send_ack(nic, vf);
+ else if (mbx.msg.msg != NIC_MBOX_MSG_READY)
+ nic_mbx_send_nack(nic, vf);
+unlock:
+ nic->mbx_lock[vf] = false;
+}
+
+static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
+{
+ u64 intr;
+ u8 vf, vf_per_mbx_reg = 64;
+
+ intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
+ dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
+ for (vf = 0; vf < vf_per_mbx_reg; vf++) {
+ if (intr & (1ULL << vf)) {
+ dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
+ vf + (mbx * vf_per_mbx_reg));
+ if ((vf + (mbx * vf_per_mbx_reg)) > nic->num_vf_en)
+ break;
+ nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg));
+ nic_clear_mbx_intr(nic, vf, mbx);
+ }
+ }
+}
+
+static irqreturn_t nic_mbx0_intr_handler (int irq, void *nic_irq)
+{
+ struct nicpf *nic = (struct nicpf *)nic_irq;
+
+ nic_mbx_intr_handler(nic, 0);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nic_mbx1_intr_handler (int irq, void *nic_irq)
+{
+ struct nicpf *nic = (struct nicpf *)nic_irq;
+
+ nic_mbx_intr_handler(nic, 1);
+
+ return IRQ_HANDLED;
+}
+
+static int nic_enable_msix(struct nicpf *nic)
+{
+ int i, ret;
+
+ nic->num_vec = NIC_PF_MSIX_VECTORS;
+
+ for (i = 0; i < nic->num_vec; i++)
+ nic->msix_entries[i].entry = i;
+
+ ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
+ if (ret) {
+ dev_err(&nic->pdev->dev,
+ "Request for #%d msix vectors failed\n",
+ nic->num_vec);
+ return ret;
+ }
+
+ nic->msix_enabled = 1;
+ return 0;
+}
+
+static void nic_disable_msix(struct nicpf *nic)
+{
+ if (nic->msix_enabled) {
+ pci_disable_msix(nic->pdev);
+ nic->msix_enabled = 0;
+ nic->num_vec = 0;
+ }
+}
+
+static void nic_free_all_interrupts(struct nicpf *nic)
+{
+ int irq;
+
+ for (irq = 0; irq < nic->num_vec; irq++) {
+ if (nic->irq_allocated[irq])
+ free_irq(nic->msix_entries[irq].vector, nic);
+ nic->irq_allocated[irq] = false;
+ }
+}
+
+static int nic_register_interrupts(struct nicpf *nic)
+{
+ int ret;
+
+ /* Enable MSI-X */
+ ret = nic_enable_msix(nic);
+ if (ret)
+ return ret;
+
+ /* Register mailbox interrupt handlers */
+ ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector,
+ nic_mbx0_intr_handler, 0, "NIC Mbox0", nic);
+ if (ret)
+ goto fail;
+
+ nic->irq_allocated[NIC_PF_INTR_ID_MBOX0] = true;
+
+ ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX1].vector,
+ nic_mbx1_intr_handler, 0, "NIC Mbox1", nic);
+ if (ret)
+ goto fail;
+
+ nic->irq_allocated[NIC_PF_INTR_ID_MBOX1] = true;
+
+ /* Enable mailbox interrupt */
+ nic_enable_mbx_intr(nic);
+ return 0;
+
+fail:
+ dev_err(&nic->pdev->dev, "Request irq failed\n");
+ nic_free_all_interrupts(nic);
+ return ret;
+}
+
+static void nic_unregister_interrupts(struct nicpf *nic)
+{
+ nic_free_all_interrupts(nic);
+ nic_disable_msix(nic);
+}
+
+static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
+{
+ int pos = 0;
+ int err;
+ u16 total_vf_cnt;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n");
+ return -ENODEV;
+ }
+
+ pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt);
+ if (total_vf_cnt < nic->num_vf_en)
+ nic->num_vf_en = total_vf_cnt;
+
+ if (!total_vf_cnt)
+ return 0;
+
+ err = pci_enable_sriov(pdev, nic->num_vf_en);
+ if (err) {
+ dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
+ nic->num_vf_en);
+ nic->num_vf_en = 0;
+ return err;
+ }
+
+ dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
+ nic->num_vf_en);
+
+ nic->flags |= NIC_SRIOV_ENABLED;
+ return 0;
+}
+
+/* Poll for BGX LMAC link status and update corresponding VF
+ * if there is a change, valid only if internal L2 switch
+ * is not present otherwise VF link is always treated as up
+ */
+static void nic_poll_for_link(struct work_struct *work)
+{
+ union nic_mbx mbx = {};
+ struct nicpf *nic;
+ struct bgx_link_status link;
+ u8 vf, bgx, lmac;
+
+ nic = container_of(work, struct nicpf, dwork.work);
+
+ mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
+
+ for (vf = 0; vf < nic->num_vf_en; vf++) {
+ /* Poll only if VF is UP */
+ if (!nic->vf_enabled[vf])
+ continue;
+
+ /* Get BGX, LMAC indices for the VF */
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ /* Get interface link status */
+ bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
+
+ /* Inform VF only if link status changed */
+ if (nic->link[vf] == link.link_up)
+ continue;
+
+ if (!nic->mbx_lock[vf]) {
+ nic->link[vf] = link.link_up;
+ nic->duplex[vf] = link.duplex;
+ nic->speed[vf] = link.speed;
+
+ /* Send a mbox message to VF with current link status */
+ mbx.link_status.link_up = link.link_up;
+ mbx.link_status.duplex = link.duplex;
+ mbx.link_status.speed = link.speed;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+ }
+ }
+ queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
+}
+
+static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct nicpf *nic;
+ int err;
+
+ BUILD_BUG_ON(sizeof(union nic_mbx) > 16);
+
+ nic = devm_kzalloc(dev, sizeof(*nic), GFP_KERNEL);
+ if (!nic)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, nic);
+
+ nic->pdev = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto err_release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
+ goto err_release_regions;
+ }
+
+ /* MAP PF's configuration registers */
+ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!nic->reg_base) {
+ dev_err(dev, "Cannot map config register space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &nic->rev_id);
+
+ nic->node = nic_get_node_id(pdev);
+
+ nic_set_lmac_vf_mapping(nic);
+
+ /* Initialize hardware */
+ nic_init_hw(nic);
+
+ /* Set RSS TBL size for each VF */
+ nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
+
+ /* Register interrupts */
+ err = nic_register_interrupts(nic);
+ if (err)
+ goto err_release_regions;
+
+ /* Configure SRIOV */
+ err = nic_sriov_init(pdev, nic);
+ if (err)
+ goto err_unregister_interrupts;
+
+ /* Register a physical link status poll fn() */
+ nic->check_link = alloc_workqueue("check_link_status",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!nic->check_link) {
+ err = -ENOMEM;
+ goto err_disable_sriov;
+ }
+
+ INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
+ queue_delayed_work(nic->check_link, &nic->dwork, 0);
+
+ return 0;
+
+err_disable_sriov:
+ if (nic->flags & NIC_SRIOV_ENABLED)
+ pci_disable_sriov(pdev);
+err_unregister_interrupts:
+ nic_unregister_interrupts(nic);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void nic_remove(struct pci_dev *pdev)
+{
+ struct nicpf *nic = pci_get_drvdata(pdev);
+
+ if (nic->flags & NIC_SRIOV_ENABLED)
+ pci_disable_sriov(pdev);
+
+ if (nic->check_link) {
+ /* Destroy work Queue */
+ cancel_delayed_work(&nic->dwork);
+ flush_workqueue(nic->check_link);
+ destroy_workqueue(nic->check_link);
+ }
+
+ nic_unregister_interrupts(nic);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver nic_driver = {
+ .name = DRV_NAME,
+ .id_table = nic_id_table,
+ .probe = nic_probe,
+ .remove = nic_remove,
+};
+
+static int __init nic_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&nic_driver);
+}
+
+static void __exit nic_cleanup_module(void)
+{
+ pci_unregister_driver(&nic_driver);
+}
+
+module_init(nic_init_module);
+module_exit(nic_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
new file mode 100644
index 000000000..58197bb2f
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NIC_REG_H
+#define NIC_REG_H
+
+#define NIC_PF_REG_COUNT 29573
+#define NIC_VF_REG_COUNT 249
+
+/* Physical function register offsets */
+#define NIC_PF_CFG (0x0000)
+#define NIC_PF_STATUS (0x0010)
+#define NIC_PF_INTR_TIMER_CFG (0x0030)
+#define NIC_PF_BIST_STATUS (0x0040)
+#define NIC_PF_SOFT_RESET (0x0050)
+#define NIC_PF_TCP_TIMER (0x0060)
+#define NIC_PF_BP_CFG (0x0080)
+#define NIC_PF_RRM_CFG (0x0088)
+#define NIC_PF_CQM_CF (0x00A0)
+#define NIC_PF_CNM_CF (0x00A8)
+#define NIC_PF_CNM_STATUS (0x00B0)
+#define NIC_PF_CQ_AVG_CFG (0x00C0)
+#define NIC_PF_RRM_AVG_CFG (0x00C8)
+#define NIC_PF_INTF_0_1_SEND_CFG (0x0200)
+#define NIC_PF_INTF_0_1_BP_CFG (0x0208)
+#define NIC_PF_INTF_0_1_BP_DIS_0_1 (0x0210)
+#define NIC_PF_INTF_0_1_BP_SW_0_1 (0x0220)
+#define NIC_PF_RBDR_BP_STATE_0_3 (0x0240)
+#define NIC_PF_MAILBOX_INT (0x0410)
+#define NIC_PF_MAILBOX_INT_W1S (0x0430)
+#define NIC_PF_MAILBOX_ENA_W1C (0x0450)
+#define NIC_PF_MAILBOX_ENA_W1S (0x0470)
+#define NIC_PF_RX_ETYPE_0_7 (0x0500)
+#define NIC_PF_PKIND_0_15_CFG (0x0600)
+#define NIC_PF_ECC0_FLIP0 (0x1000)
+#define NIC_PF_ECC1_FLIP0 (0x1008)
+#define NIC_PF_ECC2_FLIP0 (0x1010)
+#define NIC_PF_ECC3_FLIP0 (0x1018)
+#define NIC_PF_ECC0_FLIP1 (0x1080)
+#define NIC_PF_ECC1_FLIP1 (0x1088)
+#define NIC_PF_ECC2_FLIP1 (0x1090)
+#define NIC_PF_ECC3_FLIP1 (0x1098)
+#define NIC_PF_ECC0_CDIS (0x1100)
+#define NIC_PF_ECC1_CDIS (0x1108)
+#define NIC_PF_ECC2_CDIS (0x1110)
+#define NIC_PF_ECC3_CDIS (0x1118)
+#define NIC_PF_BIST0_STATUS (0x1280)
+#define NIC_PF_BIST1_STATUS (0x1288)
+#define NIC_PF_BIST2_STATUS (0x1290)
+#define NIC_PF_BIST3_STATUS (0x1298)
+#define NIC_PF_ECC0_SBE_INT (0x2000)
+#define NIC_PF_ECC0_SBE_INT_W1S (0x2008)
+#define NIC_PF_ECC0_SBE_ENA_W1C (0x2010)
+#define NIC_PF_ECC0_SBE_ENA_W1S (0x2018)
+#define NIC_PF_ECC0_DBE_INT (0x2100)
+#define NIC_PF_ECC0_DBE_INT_W1S (0x2108)
+#define NIC_PF_ECC0_DBE_ENA_W1C (0x2110)
+#define NIC_PF_ECC0_DBE_ENA_W1S (0x2118)
+#define NIC_PF_ECC1_SBE_INT (0x2200)
+#define NIC_PF_ECC1_SBE_INT_W1S (0x2208)
+#define NIC_PF_ECC1_SBE_ENA_W1C (0x2210)
+#define NIC_PF_ECC1_SBE_ENA_W1S (0x2218)
+#define NIC_PF_ECC1_DBE_INT (0x2300)
+#define NIC_PF_ECC1_DBE_INT_W1S (0x2308)
+#define NIC_PF_ECC1_DBE_ENA_W1C (0x2310)
+#define NIC_PF_ECC1_DBE_ENA_W1S (0x2318)
+#define NIC_PF_ECC2_SBE_INT (0x2400)
+#define NIC_PF_ECC2_SBE_INT_W1S (0x2408)
+#define NIC_PF_ECC2_SBE_ENA_W1C (0x2410)
+#define NIC_PF_ECC2_SBE_ENA_W1S (0x2418)
+#define NIC_PF_ECC2_DBE_INT (0x2500)
+#define NIC_PF_ECC2_DBE_INT_W1S (0x2508)
+#define NIC_PF_ECC2_DBE_ENA_W1C (0x2510)
+#define NIC_PF_ECC2_DBE_ENA_W1S (0x2518)
+#define NIC_PF_ECC3_SBE_INT (0x2600)
+#define NIC_PF_ECC3_SBE_INT_W1S (0x2608)
+#define NIC_PF_ECC3_SBE_ENA_W1C (0x2610)
+#define NIC_PF_ECC3_SBE_ENA_W1S (0x2618)
+#define NIC_PF_ECC3_DBE_INT (0x2700)
+#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
+#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
+#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
+#define NIC_PF_CPI_0_2047_CFG (0x200000)
+#define NIC_PF_RSSI_0_4097_RQ (0x220000)
+#define NIC_PF_LMAC_0_7_CFG (0x240000)
+#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
+#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
+#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
+#define NIC_PF_CHAN_0_255_RX_CFG (0x420000)
+#define NIC_PF_CHAN_0_255_SW_XOFF (0x440000)
+#define NIC_PF_CHAN_0_255_CREDIT (0x460000)
+#define NIC_PF_CHAN_0_255_RX_BP_CFG (0x480000)
+#define NIC_PF_SW_SYNC_RX (0x490000)
+#define NIC_PF_SW_SYNC_RX_DONE (0x490008)
+#define NIC_PF_TL2_0_63_CFG (0x500000)
+#define NIC_PF_TL2_0_63_PRI (0x520000)
+#define NIC_PF_TL2_0_63_SH_STATUS (0x580000)
+#define NIC_PF_TL3A_0_63_CFG (0x5F0000)
+#define NIC_PF_TL3_0_255_CFG (0x600000)
+#define NIC_PF_TL3_0_255_CHAN (0x620000)
+#define NIC_PF_TL3_0_255_PIR (0x640000)
+#define NIC_PF_TL3_0_255_SW_XOFF (0x660000)
+#define NIC_PF_TL3_0_255_CNM_RATE (0x680000)
+#define NIC_PF_TL3_0_255_SH_STATUS (0x6A0000)
+#define NIC_PF_TL4A_0_255_CFG (0x6F0000)
+#define NIC_PF_TL4_0_1023_CFG (0x800000)
+#define NIC_PF_TL4_0_1023_SW_XOFF (0x820000)
+#define NIC_PF_TL4_0_1023_SH_STATUS (0x840000)
+#define NIC_PF_TL4A_0_1023_CNM_RATE (0x880000)
+#define NIC_PF_TL4A_0_1023_CNM_STATUS (0x8A0000)
+#define NIC_PF_VF_0_127_MAILBOX_0_1 (0x20002030)
+#define NIC_PF_VNIC_0_127_TX_STAT_0_4 (0x20004000)
+#define NIC_PF_VNIC_0_127_RX_STAT_0_13 (0x20004100)
+#define NIC_PF_QSET_0_127_LOCK_0_15 (0x20006000)
+#define NIC_PF_QSET_0_127_CFG (0x20010000)
+#define NIC_PF_QSET_0_127_RQ_0_7_CFG (0x20010400)
+#define NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG (0x20010420)
+#define NIC_PF_QSET_0_127_RQ_0_7_BP_CFG (0x20010500)
+#define NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1 (0x20010600)
+#define NIC_PF_QSET_0_127_SQ_0_7_CFG (0x20010C00)
+#define NIC_PF_QSET_0_127_SQ_0_7_CFG2 (0x20010C08)
+#define NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1 (0x20010D00)
+
+#define NIC_PF_MSIX_VEC_0_18_ADDR (0x000000)
+#define NIC_PF_MSIX_VEC_0_CTL (0x000008)
+#define NIC_PF_MSIX_PBA_0 (0x0F0000)
+
+/* Virtual function register offsets */
+#define NIC_VNIC_CFG (0x000020)
+#define NIC_VF_PF_MAILBOX_0_1 (0x000130)
+#define NIC_VF_INT (0x000200)
+#define NIC_VF_INT_W1S (0x000220)
+#define NIC_VF_ENA_W1C (0x000240)
+#define NIC_VF_ENA_W1S (0x000260)
+
+#define NIC_VNIC_RSS_CFG (0x0020E0)
+#define NIC_VNIC_RSS_KEY_0_4 (0x002200)
+#define NIC_VNIC_TX_STAT_0_4 (0x004000)
+#define NIC_VNIC_RX_STAT_0_13 (0x004100)
+#define NIC_QSET_RQ_GEN_CFG (0x010010)
+
+#define NIC_QSET_CQ_0_7_CFG (0x010400)
+#define NIC_QSET_CQ_0_7_CFG2 (0x010408)
+#define NIC_QSET_CQ_0_7_THRESH (0x010410)
+#define NIC_QSET_CQ_0_7_BASE (0x010420)
+#define NIC_QSET_CQ_0_7_HEAD (0x010428)
+#define NIC_QSET_CQ_0_7_TAIL (0x010430)
+#define NIC_QSET_CQ_0_7_DOOR (0x010438)
+#define NIC_QSET_CQ_0_7_STATUS (0x010440)
+#define NIC_QSET_CQ_0_7_STATUS2 (0x010448)
+#define NIC_QSET_CQ_0_7_DEBUG (0x010450)
+
+#define NIC_QSET_RQ_0_7_CFG (0x010600)
+#define NIC_QSET_RQ_0_7_STAT_0_1 (0x010700)
+
+#define NIC_QSET_SQ_0_7_CFG (0x010800)
+#define NIC_QSET_SQ_0_7_THRESH (0x010810)
+#define NIC_QSET_SQ_0_7_BASE (0x010820)
+#define NIC_QSET_SQ_0_7_HEAD (0x010828)
+#define NIC_QSET_SQ_0_7_TAIL (0x010830)
+#define NIC_QSET_SQ_0_7_DOOR (0x010838)
+#define NIC_QSET_SQ_0_7_STATUS (0x010840)
+#define NIC_QSET_SQ_0_7_DEBUG (0x010848)
+#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860)
+#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900)
+
+#define NIC_QSET_RBDR_0_1_CFG (0x010C00)
+#define NIC_QSET_RBDR_0_1_THRESH (0x010C10)
+#define NIC_QSET_RBDR_0_1_BASE (0x010C20)
+#define NIC_QSET_RBDR_0_1_HEAD (0x010C28)
+#define NIC_QSET_RBDR_0_1_TAIL (0x010C30)
+#define NIC_QSET_RBDR_0_1_DOOR (0x010C38)
+#define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40)
+#define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48)
+#define NIC_QSET_RBDR_0_1_PREFETCH_STATUS (0x010C50)
+
+#define NIC_VF_MSIX_VECTOR_0_19_ADDR (0x000000)
+#define NIC_VF_MSIX_VECTOR_0_19_CTL (0x000008)
+#define NIC_VF_MSIX_PBA (0x0F0000)
+
+/* Offsets within registers */
+#define NIC_MSIX_VEC_SHIFT 4
+#define NIC_Q_NUM_SHIFT 18
+#define NIC_QS_ID_SHIFT 21
+#define NIC_VF_NUM_SHIFT 21
+
+/* Port kind configuration register */
+struct pkind_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_42_63:22;
+ u64 hdr_sl:5; /* Header skip length */
+ u64 rx_hdr:3; /* TNS Receive header present */
+ u64 lenerr_en:1;/* L2 length error check enable */
+ u64 reserved_32_32:1;
+ u64 maxlen:16; /* Max frame size */
+ u64 minlen:16; /* Min frame size */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 minlen:16;
+ u64 maxlen:16;
+ u64 reserved_32_32:1;
+ u64 lenerr_en:1;
+ u64 rx_hdr:3;
+ u64 hdr_sl:5;
+ u64 reserved_42_63:22;
+#endif
+};
+
+#endif /* NIC_REG_H */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
new file mode 100644
index 000000000..a4228e664
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -0,0 +1,606 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+/* ETHTOOL Support for VNIC_VF Device*/
+
+#include <linux/pci.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "nicvf_queues.h"
+#include "q_struct.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-nicvf"
+#define DRV_VERSION "1.0"
+
+struct nicvf_stat {
+ char name[ETH_GSTRING_LEN];
+ unsigned int index;
+};
+
+#define NICVF_HW_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \
+}
+
+#define NICVF_DRV_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \
+}
+
+static const struct nicvf_stat nicvf_hw_stats[] = {
+ NICVF_HW_STAT(rx_bytes_ok),
+ NICVF_HW_STAT(rx_ucast_frames_ok),
+ NICVF_HW_STAT(rx_bcast_frames_ok),
+ NICVF_HW_STAT(rx_mcast_frames_ok),
+ NICVF_HW_STAT(rx_fcs_errors),
+ NICVF_HW_STAT(rx_l2_errors),
+ NICVF_HW_STAT(rx_drop_red),
+ NICVF_HW_STAT(rx_drop_red_bytes),
+ NICVF_HW_STAT(rx_drop_overrun),
+ NICVF_HW_STAT(rx_drop_overrun_bytes),
+ NICVF_HW_STAT(rx_drop_bcast),
+ NICVF_HW_STAT(rx_drop_mcast),
+ NICVF_HW_STAT(rx_drop_l3_bcast),
+ NICVF_HW_STAT(rx_drop_l3_mcast),
+ NICVF_HW_STAT(tx_bytes_ok),
+ NICVF_HW_STAT(tx_ucast_frames_ok),
+ NICVF_HW_STAT(tx_bcast_frames_ok),
+ NICVF_HW_STAT(tx_mcast_frames_ok),
+};
+
+static const struct nicvf_stat nicvf_drv_stats[] = {
+ NICVF_DRV_STAT(rx_frames_ok),
+ NICVF_DRV_STAT(rx_frames_64),
+ NICVF_DRV_STAT(rx_frames_127),
+ NICVF_DRV_STAT(rx_frames_255),
+ NICVF_DRV_STAT(rx_frames_511),
+ NICVF_DRV_STAT(rx_frames_1023),
+ NICVF_DRV_STAT(rx_frames_1518),
+ NICVF_DRV_STAT(rx_frames_jumbo),
+ NICVF_DRV_STAT(rx_drops),
+ NICVF_DRV_STAT(tx_frames_ok),
+ NICVF_DRV_STAT(tx_tso),
+ NICVF_DRV_STAT(tx_drops),
+ NICVF_DRV_STAT(txq_stop),
+ NICVF_DRV_STAT(txq_wake),
+};
+
+static const struct nicvf_stat nicvf_queue_stats[] = {
+ { "bytes", 0 },
+ { "frames", 1 },
+};
+
+static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats);
+static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats);
+static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats);
+
+static int nicvf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ cmd->supported = 0;
+ cmd->transceiver = XCVR_EXTERNAL;
+ if (nic->speed <= 1000) {
+ cmd->port = PORT_MII;
+ cmd->autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->port = PORT_FIBRE;
+ cmd->autoneg = AUTONEG_DISABLE;
+ }
+ cmd->duplex = nic->duplex;
+ ethtool_cmd_speed_set(cmd, nic->speed);
+
+ return 0;
+}
+
+static void nicvf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
+}
+
+static u32 nicvf_get_msglevel(struct net_device *netdev)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ return nic->msg_enable;
+}
+
+static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ nic->msg_enable = lvl;
+}
+
+static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ int stats, qidx;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (stats = 0; stats < nicvf_n_hw_stats; stats++) {
+ memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < nicvf_n_drv_stats; stats++) {
+ memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
+ for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+ sprintf(data, "rxq%d: %s", qidx,
+ nicvf_queue_stats[stats].name);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
+ for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+ sprintf(data, "txq%d: %s", qidx,
+ nicvf_queue_stats[stats].name);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
+ sprintf(data, "bgx_rxstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) {
+ sprintf(data, "bgx_txstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+}
+
+static int nicvf_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (sset != ETH_SS_STATS)
+ return -EINVAL;
+
+ return nicvf_n_hw_stats + nicvf_n_drv_stats +
+ (nicvf_n_queue_stats *
+ (nic->qs->rq_cnt + nic->qs->sq_cnt)) +
+ BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
+}
+
+static void nicvf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ int stat, qidx;
+
+ nicvf_update_stats(nic);
+
+ /* Update LMAC stats */
+ nicvf_update_lmac_stats(nic);
+
+ for (stat = 0; stat < nicvf_n_hw_stats; stat++)
+ *(data++) = ((u64 *)&nic->stats)
+ [nicvf_hw_stats[stat].index];
+ for (stat = 0; stat < nicvf_n_drv_stats; stat++)
+ *(data++) = ((u64 *)&nic->drv_stats)
+ [nicvf_drv_stats[stat].index];
+
+ for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
+ for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+ *(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
+ [nicvf_queue_stats[stat].index];
+ }
+
+ for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
+ for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+ *(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
+ [nicvf_queue_stats[stat].index];
+ }
+
+ for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
+ *(data++) = nic->bgx_stats.rx_stats[stat];
+ for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++)
+ *(data++) = nic->bgx_stats.tx_stats[stat];
+}
+
+static int nicvf_get_regs_len(struct net_device *dev)
+{
+ return sizeof(u64) * NIC_VF_REG_COUNT;
+}
+
+static void nicvf_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *reg)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ u64 *p = (u64 *)reg;
+ u64 reg_offset;
+ int mbox, key, stat, q;
+ int i = 0;
+
+ regs->version = 0;
+ memset(p, 0, NIC_VF_REG_COUNT);
+
+ p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG);
+ /* Mailbox registers */
+ for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++)
+ p[i++] = nicvf_reg_read(nic,
+ NIC_VF_PF_MAILBOX_0_1 | (mbox << 3));
+
+ p[i++] = nicvf_reg_read(nic, NIC_VF_INT);
+ p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S);
+ p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C);
+ p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+ p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+
+ for (key = 0; key < RSS_HASH_KEY_SIZE; key++)
+ p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3));
+
+ /* Tx/Rx statistics */
+ for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++)
+ p[i++] = nicvf_reg_read(nic,
+ NIC_VNIC_TX_STAT_0_4 | (stat << 3));
+
+ for (i = 0; i < RX_STATS_ENUM_LAST; i++)
+ p[i++] = nicvf_reg_read(nic,
+ NIC_VNIC_RX_STAT_0_13 | (stat << 3));
+
+ p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG);
+
+ /* All completion queue's registers */
+ for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q);
+ }
+
+ /* All receive queue's registers */
+ for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic,
+ NIC_QSET_RQ_0_7_STAT_0_1, q);
+ reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3);
+ p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+ }
+
+ for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
+ reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
+ p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+ }
+
+ for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q);
+ p[i++] = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_STATUS0, q);
+ p[i++] = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_STATUS1, q);
+ reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS;
+ p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+ }
+}
+
+static int nicvf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs;
+ return 0;
+}
+
+static void nicvf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+
+ ring->rx_max_pending = MAX_RCV_BUF_COUNT;
+ ring->rx_pending = qs->rbdr_len;
+ ring->tx_max_pending = MAX_SND_QUEUE_LEN;
+ ring->tx_pending = qs->sq_len;
+}
+
+static int nicvf_get_rss_hash_opts(struct nicvf *nic,
+ struct ethtool_rxnfc *info)
+{
+ info->data = 0;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nicvf_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rules)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = nic->qs->rq_cnt;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ return nicvf_get_rss_hash_opts(nic, info);
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int nicvf_set_rss_hash_opts(struct nicvf *nic,
+ struct ethtool_rxnfc *info)
+{
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+
+ if (!rss->enable)
+ netdev_err(nic->netdev,
+ "RSS is disabled, hash cannot be set\n");
+
+ netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
+ info->flow_type, info->data);
+
+ if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
+ return -EINVAL;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_cfg &= ~(1ULL << RSS_HASH_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= (1ULL << RSS_HASH_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_cfg &= ~(1ULL << RSS_HASH_UDP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= (1ULL << RSS_HASH_UDP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_cfg &= ~(1ULL << RSS_HASH_L4ETC);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= (1ULL << RSS_HASH_L4ETC);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ rss_cfg = RSS_HASH_IP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg);
+ return 0;
+}
+
+static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ return nicvf_set_rss_hash_opts(nic, info);
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
+{
+ return RSS_HASH_KEY_SIZE * sizeof(u64);
+}
+
+static u32 nicvf_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ return nic->rss_info.rss_size;
+}
+
+static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
+ u8 *hfunc)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int idx;
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ indir[idx] = rss->ind_tbl[idx];
+ }
+
+ if (hkey)
+ memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, u8 hfunc)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int idx;
+
+ if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) {
+ rss->enable = false;
+ rss->hash_bits = 0;
+ return -EIO;
+ }
+
+ /* We do not allow change in unsupported parameters */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ rss->enable = true;
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss->ind_tbl[idx] = indir[idx];
+ }
+
+ if (hkey) {
+ memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64));
+ nicvf_set_rss_key(nic);
+ }
+
+ nicvf_config_rss(nic);
+ return 0;
+}
+
+/* Get no of queues device supports and current queue count */
+static void nicvf_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ memset(channel, 0, sizeof(*channel));
+
+ channel->max_rx = MAX_RCV_QUEUES_PER_QS;
+ channel->max_tx = MAX_SND_QUEUES_PER_QS;
+
+ channel->rx_count = nic->qs->rq_cnt;
+ channel->tx_count = nic->qs->sq_cnt;
+}
+
+/* Set no of Tx, Rx queues to be used */
+static int nicvf_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ int err = 0;
+ bool if_up = netif_running(dev);
+
+ if (!channel->rx_count || !channel->tx_count)
+ return -EINVAL;
+ if (channel->rx_count > MAX_RCV_QUEUES_PER_QS)
+ return -EINVAL;
+ if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
+ return -EINVAL;
+
+ if (if_up)
+ nicvf_stop(dev);
+
+ nic->qs->rq_cnt = channel->rx_count;
+ nic->qs->sq_cnt = channel->tx_count;
+ nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
+
+ err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt);
+ if (err)
+ return err;
+
+ if (if_up)
+ nicvf_open(dev);
+
+ netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
+ nic->qs->sq_cnt, nic->qs->rq_cnt);
+
+ return err;
+}
+
+static const struct ethtool_ops nicvf_ethtool_ops = {
+ .get_settings = nicvf_get_settings,
+ .get_link = ethtool_op_get_link,
+ .get_drvinfo = nicvf_get_drvinfo,
+ .get_msglevel = nicvf_get_msglevel,
+ .set_msglevel = nicvf_set_msglevel,
+ .get_strings = nicvf_get_strings,
+ .get_sset_count = nicvf_get_sset_count,
+ .get_ethtool_stats = nicvf_get_ethtool_stats,
+ .get_regs_len = nicvf_get_regs_len,
+ .get_regs = nicvf_get_regs,
+ .get_coalesce = nicvf_get_coalesce,
+ .get_ringparam = nicvf_get_ringparam,
+ .get_rxnfc = nicvf_get_rxnfc,
+ .set_rxnfc = nicvf_set_rxnfc,
+ .get_rxfh_key_size = nicvf_get_rxfh_key_size,
+ .get_rxfh_indir_size = nicvf_get_rxfh_indir_size,
+ .get_rxfh = nicvf_get_rxfh,
+ .set_rxfh = nicvf_set_rxfh,
+ .get_channels = nicvf_get_channels,
+ .set_channels = nicvf_set_channels,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+void nicvf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &nicvf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
new file mode 100644
index 000000000..3b90afb8c
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -0,0 +1,1364 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/log2.h>
+#include <linux/prefetch.h>
+#include <linux/irq.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "nicvf_queues.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-nicvf"
+#define DRV_VERSION "1.0"
+
+/* Supported devices */
+static const struct pci_device_id nicvf_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Sunil Goutham");
+MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, nicvf_id_table);
+
+static int debug = 0x00;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug message level bitmap");
+
+static int cpi_alg = CPI_ALG_NONE;
+module_param(cpi_alg, int, S_IRUGO);
+MODULE_PARM_DESC(cpi_alg,
+ "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
+
+static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
+ struct sk_buff *skb)
+{
+ if (skb->len <= 64)
+ nic->drv_stats.rx_frames_64++;
+ else if (skb->len <= 127)
+ nic->drv_stats.rx_frames_127++;
+ else if (skb->len <= 255)
+ nic->drv_stats.rx_frames_255++;
+ else if (skb->len <= 511)
+ nic->drv_stats.rx_frames_511++;
+ else if (skb->len <= 1023)
+ nic->drv_stats.rx_frames_1023++;
+ else if (skb->len <= 1518)
+ nic->drv_stats.rx_frames_1518++;
+ else
+ nic->drv_stats.rx_frames_jumbo++;
+}
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation. All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver. The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
+{
+ writeq_relaxed(val, nic->reg_base + offset);
+}
+
+u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
+{
+ return readq_relaxed(nic->reg_base + offset);
+}
+
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+ u64 qidx, u64 val)
+{
+ void __iomem *addr = nic->reg_base + offset;
+
+ writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
+}
+
+u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
+{
+ void __iomem *addr = nic->reg_base + offset;
+
+ return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
+}
+
+/* VF -> PF mailbox communication */
+
+static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
+{
+ u64 *msg = (u64 *)mbx;
+
+ nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
+ nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
+}
+
+int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
+{
+ int timeout = NIC_MBOX_MSG_TIMEOUT;
+ int sleep = 10;
+
+ nic->pf_acked = false;
+ nic->pf_nacked = false;
+
+ nicvf_write_to_mbx(nic, mbx);
+
+ /* Wait for previous message to be acked, timeout 2sec */
+ while (!nic->pf_acked) {
+ if (nic->pf_nacked)
+ return -EINVAL;
+ msleep(sleep);
+ if (nic->pf_acked)
+ break;
+ timeout -= sleep;
+ if (!timeout) {
+ netdev_err(nic->netdev,
+ "PF didn't ack to mbox msg %d from VF%d\n",
+ (mbx->msg.msg & 0xFF), nic->vf_id);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+/* Checks if VF is able to comminicate with PF
+* and also gets the VNIC number this VF is associated to.
+*/
+static int nicvf_check_pf_ready(struct nicvf *nic)
+{
+ int timeout = 5000, sleep = 20;
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_READY;
+
+ nic->pf_ready_to_rcv_msg = false;
+
+ nicvf_write_to_mbx(nic, &mbx);
+
+ while (!nic->pf_ready_to_rcv_msg) {
+ msleep(sleep);
+ if (nic->pf_ready_to_rcv_msg)
+ break;
+ timeout -= sleep;
+ if (!timeout) {
+ netdev_err(nic->netdev,
+ "PF didn't respond to READY msg\n");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
+{
+ if (bgx->rx)
+ nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
+ else
+ nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
+}
+
+static void nicvf_handle_mbx_intr(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+ u64 *mbx_data;
+ u64 mbx_addr;
+ int i;
+
+ mbx_addr = NIC_VF_PF_MAILBOX_0_1;
+ mbx_data = (u64 *)&mbx;
+
+ for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+ *mbx_data = nicvf_reg_read(nic, mbx_addr);
+ mbx_data++;
+ mbx_addr += sizeof(u64);
+ }
+
+ netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
+ switch (mbx.msg.msg) {
+ case NIC_MBOX_MSG_READY:
+ nic->pf_ready_to_rcv_msg = true;
+ nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
+ nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
+ nic->node = mbx.nic_cfg.node_id;
+ if (!nic->set_mac_pending)
+ ether_addr_copy(nic->netdev->dev_addr,
+ mbx.nic_cfg.mac_addr);
+ nic->link_up = false;
+ nic->duplex = 0;
+ nic->speed = 0;
+ break;
+ case NIC_MBOX_MSG_ACK:
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_NACK:
+ nic->pf_nacked = true;
+ break;
+ case NIC_MBOX_MSG_RSS_SIZE:
+ nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_BGX_STATS:
+ nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
+ nic->pf_acked = true;
+ nic->bgx_stats_acked = true;
+ break;
+ case NIC_MBOX_MSG_BGX_LINK_CHANGE:
+ nic->pf_acked = true;
+ nic->link_up = mbx.link_status.link_up;
+ nic->duplex = mbx.link_status.duplex;
+ nic->speed = mbx.link_status.speed;
+ if (nic->link_up) {
+ netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
+ nic->netdev->name, nic->speed,
+ nic->duplex == DUPLEX_FULL ?
+ "Full duplex" : "Half duplex");
+ netif_carrier_on(nic->netdev);
+ netif_tx_start_all_queues(nic->netdev);
+ } else {
+ netdev_info(nic->netdev, "%s: Link is Down\n",
+ nic->netdev->name);
+ netif_carrier_off(nic->netdev);
+ netif_tx_stop_all_queues(nic->netdev);
+ }
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
+ break;
+ }
+ nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
+}
+
+static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
+{
+ union nic_mbx mbx = {};
+
+ mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
+ mbx.mac.vf_id = nic->vf_id;
+ ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_config_cpi(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
+ mbx.cpi_cfg.vf_id = nic->vf_id;
+ mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
+ mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
+
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_get_rss_size(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
+ mbx.rss_size.vf_id = nic->vf_id;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+void nicvf_config_rss(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int ind_tbl_len = rss->rss_size;
+ int i, nextq = 0;
+
+ mbx.rss_cfg.vf_id = nic->vf_id;
+ mbx.rss_cfg.hash_bits = rss->hash_bits;
+ while (ind_tbl_len) {
+ mbx.rss_cfg.tbl_offset = nextq;
+ mbx.rss_cfg.tbl_len = min(ind_tbl_len,
+ RSS_IND_TBL_LEN_PER_MBX_MSG);
+ mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
+ NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
+
+ for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
+ mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
+
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ ind_tbl_len -= mbx.rss_cfg.tbl_len;
+ }
+}
+
+void nicvf_set_rss_key(struct nicvf *nic)
+{
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
+ int idx;
+
+ for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
+ nicvf_reg_write(nic, key_addr, rss->key[idx]);
+ key_addr += sizeof(u64);
+ }
+}
+
+static int nicvf_rss_init(struct nicvf *nic)
+{
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int idx;
+
+ nicvf_get_rss_size(nic);
+
+ if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) {
+ rss->enable = false;
+ rss->hash_bits = 0;
+ return 0;
+ }
+
+ rss->enable = true;
+
+ /* Using the HW reset value for now */
+ rss->key[0] = 0xFEED0BADFEED0BADULL;
+ rss->key[1] = 0xFEED0BADFEED0BADULL;
+ rss->key[2] = 0xFEED0BADFEED0BADULL;
+ rss->key[3] = 0xFEED0BADFEED0BADULL;
+ rss->key[4] = 0xFEED0BADFEED0BADULL;
+
+ nicvf_set_rss_key(nic);
+
+ rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
+ nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
+
+ rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size));
+
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
+ nic->qs->rq_cnt);
+ nicvf_config_rss(nic);
+ return 1;
+}
+
+int nicvf_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues)
+{
+ int err = 0;
+
+ err = netif_set_real_num_tx_queues(netdev, tx_queues);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to set no of Tx queues: %d\n", tx_queues);
+ return err;
+ }
+
+ err = netif_set_real_num_rx_queues(netdev, rx_queues);
+ if (err)
+ netdev_err(netdev,
+ "Failed to set no of Rx queues: %d\n", rx_queues);
+ return err;
+}
+
+static int nicvf_init_resources(struct nicvf *nic)
+{
+ int err;
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+
+ /* Enable Qset */
+ nicvf_qset_config(nic, true);
+
+ /* Initialize queues and HW for data transfer */
+ err = nicvf_config_data_transfer(nic, true);
+ if (err) {
+ netdev_err(nic->netdev,
+ "Failed to alloc/config VF's QSet resources\n");
+ return err;
+ }
+
+ /* Send VF config done msg to PF */
+ nicvf_write_to_mbx(nic, &mbx);
+
+ return 0;
+}
+
+static void nicvf_snd_pkt_handler(struct net_device *netdev,
+ struct cmp_queue *cq,
+ struct cqe_send_t *cqe_tx, int cqe_type)
+{
+ struct sk_buff *skb = NULL;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct snd_queue *sq;
+ struct sq_hdr_subdesc *hdr;
+
+ sq = &nic->qs->sq[cqe_tx->sq_idx];
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
+ if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
+ return;
+
+ netdev_dbg(nic->netdev,
+ "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
+ __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
+ cqe_tx->sqe_ptr, hdr->subdesc_cnt);
+
+ nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+ nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
+ skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
+ /* For TSO offloaded packets only one head SKB needs to be freed */
+ if (skb) {
+ prefetch(skb);
+ dev_consume_skb_any(skb);
+ sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
+ }
+}
+
+static void nicvf_rcv_pkt_handler(struct net_device *netdev,
+ struct napi_struct *napi,
+ struct cmp_queue *cq,
+ struct cqe_rx_t *cqe_rx, int cqe_type)
+{
+ struct sk_buff *skb;
+ struct nicvf *nic = netdev_priv(netdev);
+ int err = 0;
+
+ /* Check for errors */
+ err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
+ if (err && !cqe_rx->rb_cnt)
+ return;
+
+ skb = nicvf_get_rcv_skb(nic, cqe_rx);
+ if (!skb) {
+ netdev_dbg(nic->netdev, "Packet not received\n");
+ return;
+ }
+
+ if (netif_msg_pktdata(nic)) {
+ netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
+ skb, skb->len);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb->len, true);
+ }
+
+ nicvf_set_rx_frame_cnt(nic, skb);
+
+ skb_record_rx_queue(skb, cqe_rx->rq_idx);
+ if (netdev->hw_features & NETIF_F_RXCSUM) {
+ /* HW by default verifies TCP/UDP/SCTP checksums */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb_checksum_none_assert(skb);
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (napi && (netdev->features & NETIF_F_GRO))
+ napi_gro_receive(napi, skb);
+ else
+ netif_receive_skb(skb);
+}
+
+static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
+ struct napi_struct *napi, int budget)
+{
+ int processed_cqe, work_done = 0, tx_done = 0;
+ int cqe_count, cqe_head;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ struct cmp_queue *cq = &qs->cq[cq_idx];
+ struct cqe_rx_t *cq_desc;
+ struct netdev_queue *txq;
+
+ spin_lock_bh(&cq->lock);
+loop:
+ processed_cqe = 0;
+ /* Get no of valid CQ entries to process */
+ cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
+ cqe_count &= CQ_CQE_COUNT;
+ if (!cqe_count)
+ goto done;
+
+ /* Get head of the valid CQ entries */
+ cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
+ cqe_head &= 0xFFFF;
+
+ netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
+ __func__, cq_idx, cqe_count, cqe_head);
+ while (processed_cqe < cqe_count) {
+ /* Get the CQ descriptor */
+ cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
+ cqe_head++;
+ cqe_head &= (cq->dmem.q_len - 1);
+ /* Initiate prefetch for next descriptor */
+ prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
+
+ if ((work_done >= budget) && napi &&
+ (cq_desc->cqe_type != CQE_TYPE_SEND)) {
+ break;
+ }
+
+ netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
+ cq_idx, cq_desc->cqe_type);
+ switch (cq_desc->cqe_type) {
+ case CQE_TYPE_RX:
+ nicvf_rcv_pkt_handler(netdev, napi, cq,
+ cq_desc, CQE_TYPE_RX);
+ work_done++;
+ break;
+ case CQE_TYPE_SEND:
+ nicvf_snd_pkt_handler(netdev, cq,
+ (void *)cq_desc, CQE_TYPE_SEND);
+ tx_done++;
+ break;
+ case CQE_TYPE_INVALID:
+ case CQE_TYPE_RX_SPLIT:
+ case CQE_TYPE_RX_TCP:
+ case CQE_TYPE_SEND_PTP:
+ /* Ignore for now */
+ break;
+ }
+ processed_cqe++;
+ }
+ netdev_dbg(nic->netdev,
+ "%s CQ%d processed_cqe %d work_done %d budget %d\n",
+ __func__, cq_idx, processed_cqe, work_done, budget);
+
+ /* Ring doorbell to inform H/W to reuse processed CQEs */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
+ cq_idx, processed_cqe);
+
+ if ((work_done < budget) && napi)
+ goto loop;
+
+done:
+ /* Wakeup TXQ if its stopped earlier due to SQ full */
+ if (tx_done) {
+ txq = netdev_get_tx_queue(netdev, cq_idx);
+ if (netif_tx_queue_stopped(txq)) {
+ netif_tx_start_queue(txq);
+ nic->drv_stats.txq_wake++;
+ if (netif_msg_tx_err(nic))
+ netdev_warn(netdev,
+ "%s: Transmit queue wakeup SQ%d\n",
+ netdev->name, cq_idx);
+ }
+ }
+
+ spin_unlock_bh(&cq->lock);
+ return work_done;
+}
+
+static int nicvf_poll(struct napi_struct *napi, int budget)
+{
+ u64 cq_head;
+ int work_done = 0;
+ struct net_device *netdev = napi->dev;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf_cq_poll *cq;
+
+ cq = container_of(napi, struct nicvf_cq_poll, napi);
+ work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
+
+ if (work_done < budget) {
+ /* Slow packet rate, exit polling */
+ napi_complete(napi);
+ /* Re-enable interrupts */
+ cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
+ cq->cq_idx);
+ nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
+ cq->cq_idx, cq_head);
+ nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
+ }
+ return work_done;
+}
+
+/* Qset error interrupt handler
+ *
+ * As of now only CQ errors are handled
+ */
+static void nicvf_handle_qs_err(unsigned long data)
+{
+ struct nicvf *nic = (struct nicvf *)data;
+ struct queue_set *qs = nic->qs;
+ int qidx;
+ u64 status;
+
+ netif_tx_disable(nic->netdev);
+
+ /* Check if it is CQ err */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
+ qidx);
+ if (!(status & CQ_ERR_MASK))
+ continue;
+ /* Process already queued CQEs and reconfig CQ */
+ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+ nicvf_sq_disable(nic, qidx);
+ nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
+ nicvf_cmp_queue_config(nic, qs, qidx, true);
+ nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
+ nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
+
+ nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
+ }
+
+ netif_tx_start_all_queues(nic->netdev);
+ /* Re-enable Qset error interrupt */
+ nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
+}
+
+static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
+{
+ struct nicvf *nic = (struct nicvf *)nicvf_irq;
+ u64 intr;
+
+ intr = nicvf_reg_read(nic, NIC_VF_INT);
+ /* Check for spurious interrupt */
+ if (!(intr & NICVF_INTR_MBOX_MASK))
+ return IRQ_HANDLED;
+
+ nicvf_handle_mbx_intr(nic);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq)
+{
+ u64 qidx, intr, clear_intr = 0;
+ u64 cq_intr, rbdr_intr, qs_err_intr;
+ struct nicvf *nic = (struct nicvf *)nicvf_irq;
+ struct queue_set *qs = nic->qs;
+ struct nicvf_cq_poll *cq_poll = NULL;
+
+ intr = nicvf_reg_read(nic, NIC_VF_INT);
+ if (netif_msg_intr(nic))
+ netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
+ nic->netdev->name, intr);
+
+ qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK;
+ if (qs_err_intr) {
+ /* Disable Qset err interrupt and schedule softirq */
+ nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+ tasklet_hi_schedule(&nic->qs_err_task);
+ clear_intr |= qs_err_intr;
+ }
+
+ /* Disable interrupts and start polling */
+ cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT;
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ if (!(cq_intr & (1 << qidx)))
+ continue;
+ if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx))
+ continue;
+
+ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+ clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT);
+
+ cq_poll = nic->napi[qidx];
+ /* Schedule NAPI */
+ if (cq_poll)
+ napi_schedule(&cq_poll->napi);
+ }
+
+ /* Handle RBDR interrupts */
+ rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT;
+ if (rbdr_intr) {
+ /* Disable RBDR interrupt and schedule softirq */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+ if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
+ continue;
+ nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+ tasklet_hi_schedule(&nic->rbdr_task);
+ clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT);
+ }
+ }
+
+ /* Clear interrupts */
+ nicvf_reg_write(nic, NIC_VF_INT, clear_intr);
+ return IRQ_HANDLED;
+}
+
+static int nicvf_enable_msix(struct nicvf *nic)
+{
+ int ret, vec;
+
+ nic->num_vec = NIC_VF_MSIX_VECTORS;
+
+ for (vec = 0; vec < nic->num_vec; vec++)
+ nic->msix_entries[vec].entry = vec;
+
+ ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
+ if (ret) {
+ netdev_err(nic->netdev,
+ "Req for #%d msix vectors failed\n", nic->num_vec);
+ return 0;
+ }
+ nic->msix_enabled = 1;
+ return 1;
+}
+
+static void nicvf_disable_msix(struct nicvf *nic)
+{
+ if (nic->msix_enabled) {
+ pci_disable_msix(nic->pdev);
+ nic->msix_enabled = 0;
+ nic->num_vec = 0;
+ }
+}
+
+static int nicvf_register_interrupts(struct nicvf *nic)
+{
+ int irq, free, ret = 0;
+ int vector;
+
+ for_each_cq_irq(irq)
+ sprintf(nic->irq_name[irq], "NICVF%d CQ%d",
+ nic->vf_id, irq);
+
+ for_each_sq_irq(irq)
+ sprintf(nic->irq_name[irq], "NICVF%d SQ%d",
+ nic->vf_id, irq - NICVF_INTR_ID_SQ);
+
+ for_each_rbdr_irq(irq)
+ sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
+ nic->vf_id, irq - NICVF_INTR_ID_RBDR);
+
+ /* Register all interrupts except mailbox */
+ for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) {
+ vector = nic->msix_entries[irq].vector;
+ ret = request_irq(vector, nicvf_intr_handler,
+ 0, nic->irq_name[irq], nic);
+ if (ret)
+ break;
+ nic->irq_allocated[irq] = true;
+ }
+
+ for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) {
+ vector = nic->msix_entries[irq].vector;
+ ret = request_irq(vector, nicvf_intr_handler,
+ 0, nic->irq_name[irq], nic);
+ if (ret)
+ break;
+ nic->irq_allocated[irq] = true;
+ }
+
+ sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
+ "NICVF%d Qset error", nic->vf_id);
+ if (!ret) {
+ vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector;
+ irq = NICVF_INTR_ID_QS_ERR;
+ ret = request_irq(vector, nicvf_intr_handler,
+ 0, nic->irq_name[irq], nic);
+ if (!ret)
+ nic->irq_allocated[irq] = true;
+ }
+
+ if (ret) {
+ netdev_err(nic->netdev, "Request irq failed\n");
+ for (free = 0; free < irq; free++)
+ free_irq(nic->msix_entries[free].vector, nic);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void nicvf_unregister_interrupts(struct nicvf *nic)
+{
+ int irq;
+
+ /* Free registered interrupts */
+ for (irq = 0; irq < nic->num_vec; irq++) {
+ if (nic->irq_allocated[irq])
+ free_irq(nic->msix_entries[irq].vector, nic);
+ nic->irq_allocated[irq] = false;
+ }
+
+ /* Disable MSI-X */
+ nicvf_disable_msix(nic);
+}
+
+/* Initialize MSIX vectors and register MISC interrupt.
+ * Send READY message to PF to check if its alive
+ */
+static int nicvf_register_misc_interrupt(struct nicvf *nic)
+{
+ int ret = 0;
+ int irq = NICVF_INTR_ID_MISC;
+
+ /* Return if mailbox interrupt is already registered */
+ if (nic->msix_enabled)
+ return 0;
+
+ /* Enable MSI-X */
+ if (!nicvf_enable_msix(nic))
+ return 1;
+
+ sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
+ /* Register Misc interrupt */
+ ret = request_irq(nic->msix_entries[irq].vector,
+ nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
+
+ if (ret)
+ return ret;
+ nic->irq_allocated[irq] = true;
+
+ /* Enable mailbox interrupt */
+ nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
+
+ /* Check if VF is able to communicate with PF */
+ if (!nicvf_check_pf_ready(nic)) {
+ nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+ nicvf_unregister_interrupts(nic);
+ return 1;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ int qid = skb_get_queue_mapping(skb);
+ struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
+
+ /* Check for minimum packet length */
+ if (skb->len <= ETH_HLEN) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
+ netif_tx_stop_queue(txq);
+ nic->drv_stats.txq_stop++;
+ if (netif_msg_tx_err(nic))
+ netdev_warn(netdev,
+ "%s: Transmit ring full, stopping SQ%d\n",
+ netdev->name, qid);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+int nicvf_stop(struct net_device *netdev)
+{
+ int irq, qidx;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ struct nicvf_cq_poll *cq_poll = NULL;
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ netif_carrier_off(netdev);
+
+ /* Disable RBDR & QS error interrupts */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+ nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+ nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
+ }
+ nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+ nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
+
+ /* Wait for pending IRQ handlers to finish */
+ for (irq = 0; irq < nic->num_vec; irq++)
+ synchronize_irq(nic->msix_entries[irq].vector);
+
+ tasklet_kill(&nic->rbdr_task);
+ tasklet_kill(&nic->qs_err_task);
+ if (nic->rb_work_scheduled)
+ cancel_delayed_work_sync(&nic->rbdr_work);
+
+ for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
+ cq_poll = nic->napi[qidx];
+ if (!cq_poll)
+ continue;
+ nic->napi[qidx] = NULL;
+ napi_synchronize(&cq_poll->napi);
+ /* CQ intr is enabled while napi_complete,
+ * so disable it now
+ */
+ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+ nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
+ napi_disable(&cq_poll->napi);
+ netif_napi_del(&cq_poll->napi);
+ kfree(cq_poll);
+ }
+
+ netif_tx_disable(netdev);
+
+ /* Free resources */
+ nicvf_config_data_transfer(nic, false);
+
+ /* Disable HW Qset */
+ nicvf_qset_config(nic, false);
+
+ /* disable mailbox interrupt */
+ nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+
+ nicvf_unregister_interrupts(nic);
+
+ return 0;
+}
+
+int nicvf_open(struct net_device *netdev)
+{
+ int err, qidx;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ struct nicvf_cq_poll *cq_poll = NULL;
+
+ nic->mtu = netdev->mtu;
+
+ netif_carrier_off(netdev);
+
+ err = nicvf_register_misc_interrupt(nic);
+ if (err)
+ return err;
+
+ /* Register NAPI handler for processing CQEs */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
+ if (!cq_poll) {
+ err = -ENOMEM;
+ goto napi_del;
+ }
+ cq_poll->cq_idx = qidx;
+ netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&cq_poll->napi);
+ nic->napi[qidx] = cq_poll;
+ }
+
+ /* Check if we got MAC address from PF or else generate a radom MAC */
+ if (is_zero_ether_addr(netdev->dev_addr)) {
+ eth_hw_addr_random(netdev);
+ nicvf_hw_set_mac_addr(nic, netdev);
+ }
+
+ if (nic->set_mac_pending) {
+ nic->set_mac_pending = false;
+ nicvf_hw_set_mac_addr(nic, netdev);
+ }
+
+ /* Init tasklet for handling Qset err interrupt */
+ tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
+ (unsigned long)nic);
+
+ /* Init RBDR tasklet which will refill RBDR */
+ tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
+ (unsigned long)nic);
+ INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
+
+ /* Configure CPI alorithm */
+ nic->cpi_alg = cpi_alg;
+ nicvf_config_cpi(nic);
+
+ /* Configure receive side scaling */
+ nicvf_rss_init(nic);
+
+ err = nicvf_register_interrupts(nic);
+ if (err)
+ goto cleanup;
+
+ /* Initialize the queues */
+ err = nicvf_init_resources(nic);
+ if (err)
+ goto cleanup;
+
+ /* Make sure queue initialization is written */
+ wmb();
+
+ nicvf_reg_write(nic, NIC_VF_INT, -1);
+ /* Enable Qset err interrupt */
+ nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
+
+ /* Enable completion queue interrupt */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
+
+ /* Enable RBDR threshold interrupt */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
+
+ nic->drv_stats.txq_stop = 0;
+ nic->drv_stats.txq_wake = 0;
+
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ return 0;
+cleanup:
+ nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+ nicvf_unregister_interrupts(nic);
+napi_del:
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ cq_poll = nic->napi[qidx];
+ if (!cq_poll)
+ continue;
+ napi_disable(&cq_poll->napi);
+ netif_napi_del(&cq_poll->napi);
+ kfree(cq_poll);
+ nic->napi[qidx] = NULL;
+ }
+ return err;
+}
+
+static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
+{
+ union nic_mbx mbx = {};
+
+ mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
+ mbx.frs.max_frs = mtu;
+ mbx.frs.vf_id = nic->vf_id;
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (new_mtu > NIC_HW_MAX_FRS)
+ return -EINVAL;
+
+ if (new_mtu < NIC_HW_MIN_FRS)
+ return -EINVAL;
+
+ if (nicvf_update_hw_max_frs(nic, new_mtu))
+ return -EINVAL;
+ netdev->mtu = new_mtu;
+ nic->mtu = new_mtu;
+
+ return 0;
+}
+
+static int nicvf_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ if (nic->msix_enabled) {
+ if (nicvf_hw_set_mac_addr(nic, netdev))
+ return -EBUSY;
+ } else {
+ nic->set_mac_pending = true;
+ }
+
+ return 0;
+}
+
+void nicvf_update_lmac_stats(struct nicvf *nic)
+{
+ int stat = 0;
+ union nic_mbx mbx = {};
+ int timeout;
+
+ if (!netif_running(nic->netdev))
+ return;
+
+ mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
+ mbx.bgx_stats.vf_id = nic->vf_id;
+ /* Rx stats */
+ mbx.bgx_stats.rx = 1;
+ while (stat < BGX_RX_STATS_COUNT) {
+ nic->bgx_stats_acked = 0;
+ mbx.bgx_stats.idx = stat;
+ nicvf_send_msg_to_pf(nic, &mbx);
+ timeout = 0;
+ while ((!nic->bgx_stats_acked) && (timeout < 10)) {
+ msleep(2);
+ timeout++;
+ }
+ stat++;
+ }
+
+ stat = 0;
+
+ /* Tx stats */
+ mbx.bgx_stats.rx = 0;
+ while (stat < BGX_TX_STATS_COUNT) {
+ nic->bgx_stats_acked = 0;
+ mbx.bgx_stats.idx = stat;
+ nicvf_send_msg_to_pf(nic, &mbx);
+ timeout = 0;
+ while ((!nic->bgx_stats_acked) && (timeout < 10)) {
+ msleep(2);
+ timeout++;
+ }
+ stat++;
+ }
+}
+
+void nicvf_update_stats(struct nicvf *nic)
+{
+ int qidx;
+ struct nicvf_hw_stats *stats = &nic->stats;
+ struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
+ struct queue_set *qs = nic->qs;
+
+#define GET_RX_STATS(reg) \
+ nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
+#define GET_TX_STATS(reg) \
+ nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
+
+ stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS);
+ stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST);
+ stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST);
+ stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST);
+ stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
+ stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
+ stats->rx_drop_red = GET_RX_STATS(RX_RED);
+ stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
+ stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
+ stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
+ stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
+ stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
+
+ stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
+ stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
+ stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
+ stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
+ stats->tx_drops = GET_TX_STATS(TX_DROP);
+
+ drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok +
+ stats->rx_bcast_frames_ok +
+ stats->rx_mcast_frames_ok;
+ drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
+ stats->tx_bcast_frames_ok +
+ stats->tx_mcast_frames_ok;
+ drv_stats->rx_drops = stats->rx_drop_red +
+ stats->rx_drop_overrun;
+ drv_stats->tx_drops = stats->tx_drops;
+
+ /* Update RQ and SQ stats */
+ for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+ nicvf_update_rq_stats(nic, qidx);
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_update_sq_stats(nic, qidx);
+}
+
+static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf_hw_stats *hw_stats = &nic->stats;
+ struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
+
+ nicvf_update_stats(nic);
+
+ stats->rx_bytes = hw_stats->rx_bytes_ok;
+ stats->rx_packets = drv_stats->rx_frames_ok;
+ stats->rx_dropped = drv_stats->rx_drops;
+
+ stats->tx_bytes = hw_stats->tx_bytes_ok;
+ stats->tx_packets = drv_stats->tx_frames_ok;
+ stats->tx_dropped = drv_stats->tx_drops;
+
+ return stats;
+}
+
+static void nicvf_tx_timeout(struct net_device *dev)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ if (netif_msg_tx_err(nic))
+ netdev_warn(dev, "%s: Transmit timed out, resetting\n",
+ dev->name);
+
+ schedule_work(&nic->reset_task);
+}
+
+static void nicvf_reset_task(struct work_struct *work)
+{
+ struct nicvf *nic;
+
+ nic = container_of(work, struct nicvf, reset_task);
+
+ if (!netif_running(nic->netdev))
+ return;
+
+ nicvf_stop(nic->netdev);
+ nicvf_open(nic->netdev);
+ nic->netdev->trans_start = jiffies;
+}
+
+static const struct net_device_ops nicvf_netdev_ops = {
+ .ndo_open = nicvf_open,
+ .ndo_stop = nicvf_stop,
+ .ndo_start_xmit = nicvf_xmit,
+ .ndo_change_mtu = nicvf_change_mtu,
+ .ndo_set_mac_address = nicvf_set_mac_address,
+ .ndo_get_stats64 = nicvf_get_stats64,
+ .ndo_tx_timeout = nicvf_tx_timeout,
+};
+
+static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct nicvf *nic;
+ struct queue_set *qs;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto err_release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
+ goto err_release_regions;
+ }
+
+ netdev = alloc_etherdev_mqs(sizeof(struct nicvf),
+ MAX_RCV_QUEUES_PER_QS,
+ MAX_SND_QUEUES_PER_QS);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_set_drvdata(pdev, netdev);
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ nic = netdev_priv(netdev);
+ nic->netdev = netdev;
+ nic->pdev = pdev;
+
+ /* MAP VF's configuration registers */
+ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!nic->reg_base) {
+ dev_err(dev, "Cannot map config register space, aborting\n");
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ err = nicvf_set_qset_resources(nic);
+ if (err)
+ goto err_free_netdev;
+
+ qs = nic->qs;
+
+ err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt);
+ if (err)
+ goto err_free_netdev;
+
+ /* Check if PF is alive and get MAC address for this VF */
+ err = nicvf_register_misc_interrupt(nic);
+ if (err)
+ goto err_free_netdev;
+
+ netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_GRO);
+ netdev->hw_features = netdev->features;
+
+ netdev->netdev_ops = &nicvf_netdev_ops;
+ netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
+
+ INIT_WORK(&nic->reset_task, nicvf_reset_task);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(dev, "Failed to register netdevice\n");
+ goto err_unregister_interrupts;
+ }
+
+ nic->msg_enable = debug;
+
+ nicvf_set_ethtool_ops(netdev);
+
+ return 0;
+
+err_unregister_interrupts:
+ nicvf_unregister_interrupts(nic);
+err_free_netdev:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void nicvf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct nicvf *nic = netdev_priv(netdev);
+
+ unregister_netdev(netdev);
+ nicvf_unregister_interrupts(nic);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static void nicvf_shutdown(struct pci_dev *pdev)
+{
+ nicvf_remove(pdev);
+}
+
+static struct pci_driver nicvf_driver = {
+ .name = DRV_NAME,
+ .id_table = nicvf_id_table,
+ .probe = nicvf_probe,
+ .remove = nicvf_remove,
+ .shutdown = nicvf_shutdown,
+};
+
+static int __init nicvf_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&nicvf_driver);
+}
+
+static void __exit nicvf_cleanup_module(void)
+{
+ pci_unregister_driver(&nicvf_driver);
+}
+
+module_init(nicvf_init_module);
+module_exit(nicvf_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
new file mode 100644
index 000000000..ca4240aa6
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -0,0 +1,1550 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/etherdevice.h>
+#include <net/ip.h>
+#include <net/tso.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+#include "nicvf_queues.h"
+
+struct rbuf_info {
+ struct page *page;
+ void *data;
+ u64 offset;
+};
+
+#define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES))
+
+/* Poll a register for a specific value */
+static int nicvf_poll_reg(struct nicvf *nic, int qidx,
+ u64 reg, int bit_pos, int bits, int val)
+{
+ u64 bit_mask;
+ u64 reg_val;
+ int timeout = 10;
+
+ bit_mask = (1ULL << bits) - 1;
+ bit_mask = (bit_mask << bit_pos);
+
+ while (timeout) {
+ reg_val = nicvf_queue_reg_read(nic, reg, qidx);
+ if (((reg_val & bit_mask) >> bit_pos) == val)
+ return 0;
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+ netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
+ return 1;
+}
+
+/* Allocate memory for a queue's descriptors */
+static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
+ int q_len, int desc_size, int align_bytes)
+{
+ dmem->q_len = q_len;
+ dmem->size = (desc_size * q_len) + align_bytes;
+ /* Save address, need it while freeing */
+ dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
+ &dmem->dma, GFP_KERNEL);
+ if (!dmem->unalign_base)
+ return -ENOMEM;
+
+ /* Align memory address for 'align_bytes' */
+ dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
+ dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
+ return 0;
+}
+
+/* Free queue's descriptor memory */
+static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
+{
+ if (!dmem)
+ return;
+
+ dma_free_coherent(&nic->pdev->dev, dmem->size,
+ dmem->unalign_base, dmem->dma);
+ dmem->unalign_base = NULL;
+ dmem->base = NULL;
+}
+
+/* Allocate buffer for packet reception
+ * HW returns memory address where packet is DMA'ed but not a pointer
+ * into RBDR ring, so save buffer address at the start of fragment and
+ * align the start address to a cache aligned address
+ */
+static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
+ u32 buf_len, u64 **rbuf)
+{
+ u64 data;
+ struct rbuf_info *rinfo;
+ int order = get_order(buf_len);
+
+ /* Check if request can be accomodated in previous allocated page */
+ if (nic->rb_page) {
+ if ((nic->rb_page_offset + buf_len + buf_len) >
+ (PAGE_SIZE << order)) {
+ nic->rb_page = NULL;
+ } else {
+ nic->rb_page_offset += buf_len;
+ get_page(nic->rb_page);
+ }
+ }
+
+ /* Allocate a new page */
+ if (!nic->rb_page) {
+ nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
+ order);
+ if (!nic->rb_page) {
+ netdev_err(nic->netdev,
+ "Failed to allocate new rcv buffer\n");
+ return -ENOMEM;
+ }
+ nic->rb_page_offset = 0;
+ }
+
+ data = (u64)page_address(nic->rb_page) + nic->rb_page_offset;
+
+ /* Align buffer addr to cache line i.e 128 bytes */
+ rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data));
+ /* Save page address for reference updation */
+ rinfo->page = nic->rb_page;
+ /* Store start address for later retrieval */
+ rinfo->data = (void *)data;
+ /* Store alignment offset */
+ rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data);
+
+ data += rinfo->offset;
+
+ /* Give next aligned address to hw for DMA */
+ *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES);
+ return 0;
+}
+
+/* Retrieve actual buffer start address and build skb for received packet */
+static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
+ u64 rb_ptr, int len)
+{
+ struct sk_buff *skb;
+ struct rbuf_info *rinfo;
+
+ rb_ptr = (u64)phys_to_virt(rb_ptr);
+ /* Get buffer start address and alignment offset */
+ rinfo = GET_RBUF_INFO(rb_ptr);
+
+ /* Now build an skb to give to stack */
+ skb = build_skb(rinfo->data, RCV_FRAG_LEN);
+ if (!skb) {
+ put_page(rinfo->page);
+ return NULL;
+ }
+
+ /* Set correct skb->data */
+ skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES);
+
+ prefetch((void *)rb_ptr);
+ return skb;
+}
+
+/* Allocate RBDR ring and populate receive buffers */
+static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
+ int ring_len, int buf_size)
+{
+ int idx;
+ u64 *rbuf;
+ struct rbdr_entry_t *desc;
+ int err;
+
+ err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
+ sizeof(struct rbdr_entry_t),
+ NICVF_RCV_BUF_ALIGN_BYTES);
+ if (err)
+ return err;
+
+ rbdr->desc = rbdr->dmem.base;
+ /* Buffer size has to be in multiples of 128 bytes */
+ rbdr->dma_size = buf_size;
+ rbdr->enable = true;
+ rbdr->thresh = RBDR_THRESH;
+
+ nic->rb_page = NULL;
+ for (idx = 0; idx < ring_len; idx++) {
+ err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
+ &rbuf);
+ if (err)
+ return err;
+
+ desc = GET_RBDR_DESC(rbdr, idx);
+ desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+ }
+ return 0;
+}
+
+/* Free RBDR ring and its receive buffers */
+static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
+{
+ int head, tail;
+ u64 buf_addr;
+ struct rbdr_entry_t *desc;
+ struct rbuf_info *rinfo;
+
+ if (!rbdr)
+ return;
+
+ rbdr->enable = false;
+ if (!rbdr->dmem.base)
+ return;
+
+ head = rbdr->head;
+ tail = rbdr->tail;
+
+ /* Free SKBs */
+ while (head != tail) {
+ desc = GET_RBDR_DESC(rbdr, head);
+ buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
+ rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
+ put_page(rinfo->page);
+ head++;
+ head &= (rbdr->dmem.q_len - 1);
+ }
+ /* Free SKB of tail desc */
+ desc = GET_RBDR_DESC(rbdr, tail);
+ buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
+ rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
+ put_page(rinfo->page);
+
+ /* Free RBDR ring */
+ nicvf_free_q_desc_mem(nic, &rbdr->dmem);
+}
+
+/* Refill receive buffer descriptors with new buffers.
+ */
+static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
+{
+ struct queue_set *qs = nic->qs;
+ int rbdr_idx = qs->rbdr_cnt;
+ int tail, qcount;
+ int refill_rb_cnt;
+ struct rbdr *rbdr;
+ struct rbdr_entry_t *desc;
+ u64 *rbuf;
+ int new_rb = 0;
+
+refill:
+ if (!rbdr_idx)
+ return;
+ rbdr_idx--;
+ rbdr = &qs->rbdr[rbdr_idx];
+ /* Check if it's enabled */
+ if (!rbdr->enable)
+ goto next_rbdr;
+
+ /* Get no of desc's to be refilled */
+ qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
+ qcount &= 0x7FFFF;
+ /* Doorbell can be ringed with a max of ring size minus 1 */
+ if (qcount >= (qs->rbdr_len - 1))
+ goto next_rbdr;
+ else
+ refill_rb_cnt = qs->rbdr_len - qcount - 1;
+
+ /* Start filling descs from tail */
+ tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
+ while (refill_rb_cnt) {
+ tail++;
+ tail &= (rbdr->dmem.q_len - 1);
+
+ if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf))
+ break;
+
+ desc = GET_RBDR_DESC(rbdr, tail);
+ desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+ refill_rb_cnt--;
+ new_rb++;
+ }
+
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Check if buffer allocation failed */
+ if (refill_rb_cnt)
+ nic->rb_alloc_fail = true;
+ else
+ nic->rb_alloc_fail = false;
+
+ /* Notify HW */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
+ rbdr_idx, new_rb);
+next_rbdr:
+ /* Re-enable RBDR interrupts only if buffer allocation is success */
+ if (!nic->rb_alloc_fail && rbdr->enable)
+ nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
+
+ if (rbdr_idx)
+ goto refill;
+}
+
+/* Alloc rcv buffers in non-atomic mode for better success */
+void nicvf_rbdr_work(struct work_struct *work)
+{
+ struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
+
+ nicvf_refill_rbdr(nic, GFP_KERNEL);
+ if (nic->rb_alloc_fail)
+ schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
+ else
+ nic->rb_work_scheduled = false;
+}
+
+/* In Softirq context, alloc rcv buffers in atomic mode */
+void nicvf_rbdr_task(unsigned long data)
+{
+ struct nicvf *nic = (struct nicvf *)data;
+
+ nicvf_refill_rbdr(nic, GFP_ATOMIC);
+ if (nic->rb_alloc_fail) {
+ nic->rb_work_scheduled = true;
+ schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
+ }
+}
+
+/* Initialize completion queue */
+static int nicvf_init_cmp_queue(struct nicvf *nic,
+ struct cmp_queue *cq, int q_len)
+{
+ int err;
+
+ err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
+ NICVF_CQ_BASE_ALIGN_BYTES);
+ if (err)
+ return err;
+
+ cq->desc = cq->dmem.base;
+ cq->thresh = CMP_QUEUE_CQE_THRESH;
+ nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
+
+ return 0;
+}
+
+static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
+{
+ if (!cq)
+ return;
+ if (!cq->dmem.base)
+ return;
+
+ nicvf_free_q_desc_mem(nic, &cq->dmem);
+}
+
+/* Initialize transmit queue */
+static int nicvf_init_snd_queue(struct nicvf *nic,
+ struct snd_queue *sq, int q_len)
+{
+ int err;
+
+ err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
+ NICVF_SQ_BASE_ALIGN_BYTES);
+ if (err)
+ return err;
+
+ sq->desc = sq->dmem.base;
+ sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
+ if (!sq->skbuff)
+ return -ENOMEM;
+ sq->head = 0;
+ sq->tail = 0;
+ atomic_set(&sq->free_cnt, q_len - 1);
+ sq->thresh = SND_QUEUE_THRESH;
+
+ /* Preallocate memory for TSO segment's header */
+ sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
+ q_len * TSO_HEADER_SIZE,
+ &sq->tso_hdrs_phys, GFP_KERNEL);
+ if (!sq->tso_hdrs)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
+{
+ if (!sq)
+ return;
+ if (!sq->dmem.base)
+ return;
+
+ if (sq->tso_hdrs)
+ dma_free_coherent(&nic->pdev->dev,
+ sq->dmem.q_len * TSO_HEADER_SIZE,
+ sq->tso_hdrs, sq->tso_hdrs_phys);
+
+ kfree(sq->skbuff);
+ nicvf_free_q_desc_mem(nic, &sq->dmem);
+}
+
+static void nicvf_reclaim_snd_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ /* Disable send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
+ /* Check if SQ is stopped */
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
+ return;
+ /* Reset send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+}
+
+static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ union nic_mbx mbx = {};
+
+ /* Make sure all packets in the pipeline are written back into mem */
+ mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ /* Disable timer threshold (doesn't get reset upon CQ reset */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
+ /* Disable completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
+ /* Reset completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+}
+
+static void nicvf_reclaim_rbdr(struct nicvf *nic,
+ struct rbdr *rbdr, int qidx)
+{
+ u64 tmp, fifo_state;
+ int timeout = 10;
+
+ /* Save head and tail pointers for feeing up buffers */
+ rbdr->head = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_HEAD,
+ qidx) >> 3;
+ rbdr->tail = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_TAIL,
+ qidx) >> 3;
+
+ /* If RBDR FIFO is in 'FAIL' state then do a reset first
+ * before relaiming.
+ */
+ fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
+ if (((fifo_state >> 62) & 0x03) == 0x3)
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, NICVF_RBDR_RESET);
+
+ /* Disable RBDR */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+ return;
+ while (1) {
+ tmp = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
+ qidx);
+ if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
+ break;
+ usleep_range(1000, 2000);
+ timeout--;
+ if (!timeout) {
+ netdev_err(nic->netdev,
+ "Failed polling on prefetch status\n");
+ return;
+ }
+ }
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, NICVF_RBDR_RESET);
+
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
+ return;
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+ return;
+}
+
+/* Configures receive queue */
+static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct rcv_queue *rq;
+ struct rq_cfg rq_cfg;
+
+ rq = &qs->rq[qidx];
+ rq->enable = enable;
+
+ /* Disable receive queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
+
+ if (!rq->enable) {
+ nicvf_reclaim_rcv_queue(nic, qs, qidx);
+ return;
+ }
+
+ rq->cq_qs = qs->vnic_id;
+ rq->cq_idx = qidx;
+ rq->start_rbdr_qs = qs->vnic_id;
+ rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
+ rq->cont_rbdr_qs = qs->vnic_id;
+ rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
+ /* all writes of RBDR data to be loaded into L2 Cache as well*/
+ rq->caching = 1;
+
+ /* Send a mailbox msg to PF to config RQ */
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
+ mbx.rq.qs_num = qs->vnic_id;
+ mbx.rq.rq_num = qidx;
+ mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
+ (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
+ (rq->cont_qs_rbdr_idx << 8) |
+ (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
+ mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ /* RQ drop config
+ * Enable CQ drop to reserve sufficient CQEs for all tx packets
+ */
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
+ mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00);
+
+ /* Enable Receive queue */
+ rq_cfg.ena = 1;
+ rq_cfg.tcp_ena = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
+}
+
+/* Configures completion queue */
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ struct cmp_queue *cq;
+ struct cq_cfg cq_cfg;
+
+ cq = &qs->cq[qidx];
+ cq->enable = enable;
+
+ if (!cq->enable) {
+ nicvf_reclaim_cmp_queue(nic, qs, qidx);
+ return;
+ }
+
+ /* Reset completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+
+ if (!cq->enable)
+ return;
+
+ spin_lock_init(&cq->lock);
+ /* Set completion queue base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
+ qidx, (u64)(cq->dmem.phys_base));
+
+ /* Enable Completion queue */
+ cq_cfg.ena = 1;
+ cq_cfg.reset = 0;
+ cq_cfg.caching = 0;
+ cq_cfg.qsize = CMP_QSIZE;
+ cq_cfg.avg_con = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
+ qidx, nic->cq_coalesce_usecs);
+}
+
+/* Configures transmit queue */
+static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct snd_queue *sq;
+ struct sq_cfg sq_cfg;
+
+ sq = &qs->sq[qidx];
+ sq->enable = enable;
+
+ if (!sq->enable) {
+ nicvf_reclaim_snd_queue(nic, qs, qidx);
+ return;
+ }
+
+ /* Reset send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+
+ sq->cq_qs = qs->vnic_id;
+ sq->cq_idx = qidx;
+
+ /* Send a mailbox msg to PF to config SQ */
+ mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
+ mbx.sq.qs_num = qs->vnic_id;
+ mbx.sq.sq_num = qidx;
+ mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ /* Set queue base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
+ qidx, (u64)(sq->dmem.phys_base));
+
+ /* Enable send queue & set queue size */
+ sq_cfg.ena = 1;
+ sq_cfg.reset = 0;
+ sq_cfg.ldwb = 0;
+ sq_cfg.qsize = SND_QSIZE;
+ sq_cfg.tstmp_bgx_intf = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
+
+ /* Set queue:cpu affinity for better load distribution */
+ if (cpu_online(qidx)) {
+ cpumask_set_cpu(qidx, &sq->affinity_mask);
+ netif_set_xps_queue(nic->netdev,
+ &sq->affinity_mask, qidx);
+ }
+}
+
+/* Configures receive buffer descriptor ring */
+static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ struct rbdr *rbdr;
+ struct rbdr_cfg rbdr_cfg;
+
+ rbdr = &qs->rbdr[qidx];
+ nicvf_reclaim_rbdr(nic, rbdr, qidx);
+ if (!enable)
+ return;
+
+ /* Set descriptor base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
+ qidx, (u64)(rbdr->dmem.phys_base));
+
+ /* Enable RBDR & set queue size */
+ /* Buffer size should be in multiples of 128 bytes */
+ rbdr_cfg.ena = 1;
+ rbdr_cfg.reset = 0;
+ rbdr_cfg.ldwb = 0;
+ rbdr_cfg.qsize = RBDR_SIZE;
+ rbdr_cfg.avg_con = 0;
+ rbdr_cfg.lines = rbdr->dma_size / 128;
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, *(u64 *)&rbdr_cfg);
+
+ /* Notify HW */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
+ qidx, qs->rbdr_len - 1);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
+ qidx, rbdr->thresh - 1);
+}
+
+/* Requests PF to assign and enable Qset */
+void nicvf_qset_config(struct nicvf *nic, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct queue_set *qs = nic->qs;
+ struct qs_cfg *qs_cfg;
+
+ if (!qs) {
+ netdev_warn(nic->netdev,
+ "Qset is still not allocated, don't init queues\n");
+ return;
+ }
+
+ qs->enable = enable;
+ qs->vnic_id = nic->vf_id;
+
+ /* Send a mailbox msg to PF to config Qset */
+ mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
+ mbx.qs.num = qs->vnic_id;
+
+ mbx.qs.cfg = 0;
+ qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
+ if (qs->enable) {
+ qs_cfg->ena = 1;
+#ifdef __BIG_ENDIAN
+ qs_cfg->be = 1;
+#endif
+ qs_cfg->vnic = qs->vnic_id;
+ }
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_free_resources(struct nicvf *nic)
+{
+ int qidx;
+ struct queue_set *qs = nic->qs;
+
+ /* Free receive buffer descriptor ring */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
+
+ /* Free completion queue */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
+
+ /* Free send queue */
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_free_snd_queue(nic, &qs->sq[qidx]);
+}
+
+static int nicvf_alloc_resources(struct nicvf *nic)
+{
+ int qidx;
+ struct queue_set *qs = nic->qs;
+
+ /* Alloc receive buffer descriptor ring */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+ if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
+ DMA_BUFFER_LEN))
+ goto alloc_fail;
+ }
+
+ /* Alloc send queue */
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
+ if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
+ goto alloc_fail;
+ }
+
+ /* Alloc completion queue */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
+ goto alloc_fail;
+ }
+
+ return 0;
+alloc_fail:
+ nicvf_free_resources(nic);
+ return -ENOMEM;
+}
+
+int nicvf_set_qset_resources(struct nicvf *nic)
+{
+ struct queue_set *qs;
+
+ qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
+ if (!qs)
+ return -ENOMEM;
+ nic->qs = qs;
+
+ /* Set count of each queue */
+ qs->rbdr_cnt = RBDR_CNT;
+ qs->rq_cnt = RCV_QUEUE_CNT;
+ qs->sq_cnt = SND_QUEUE_CNT;
+ qs->cq_cnt = CMP_QUEUE_CNT;
+
+ /* Set queue lengths */
+ qs->rbdr_len = RCV_BUF_COUNT;
+ qs->sq_len = SND_QUEUE_LEN;
+ qs->cq_len = CMP_QUEUE_LEN;
+ return 0;
+}
+
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
+{
+ bool disable = false;
+ struct queue_set *qs = nic->qs;
+ int qidx;
+
+ if (!qs)
+ return 0;
+
+ if (enable) {
+ if (nicvf_alloc_resources(nic))
+ return -ENOMEM;
+
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_snd_queue_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_cmp_queue_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_rbdr_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+ nicvf_rcv_queue_config(nic, qs, qidx, enable);
+ } else {
+ for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+ nicvf_rcv_queue_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_rbdr_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_snd_queue_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_cmp_queue_config(nic, qs, qidx, disable);
+
+ nicvf_free_resources(nic);
+ }
+
+ return 0;
+}
+
+/* Get a free desc from SQ
+ * returns descriptor ponter & descriptor number
+ */
+static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+ int qentry;
+
+ qentry = sq->tail;
+ atomic_sub(desc_cnt, &sq->free_cnt);
+ sq->tail += desc_cnt;
+ sq->tail &= (sq->dmem.q_len - 1);
+
+ return qentry;
+}
+
+/* Free descriptor back to SQ for future use */
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+ atomic_add(desc_cnt, &sq->free_cnt);
+ sq->head += desc_cnt;
+ sq->head &= (sq->dmem.q_len - 1);
+}
+
+static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
+{
+ qentry++;
+ qentry &= (sq->dmem.q_len - 1);
+ return qentry;
+}
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
+{
+ u64 sq_cfg;
+
+ sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+ sq_cfg |= NICVF_SQ_EN;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+ /* Ring doorbell so that H/W restarts processing SQEs */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
+}
+
+void nicvf_sq_disable(struct nicvf *nic, int qidx)
+{
+ u64 sq_cfg;
+
+ sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+ sq_cfg &= ~NICVF_SQ_EN;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+}
+
+void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
+ int qidx)
+{
+ u64 head, tail;
+ struct sk_buff *skb;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct sq_hdr_subdesc *hdr;
+
+ head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
+ tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
+ while (sq->head != head) {
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
+ if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
+ nicvf_put_sq_desc(sq, 1);
+ continue;
+ }
+ skb = (struct sk_buff *)sq->skbuff[sq->head];
+ if (skb)
+ dev_kfree_skb_any(skb);
+ atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
+ atomic64_add(hdr->tot_len,
+ (atomic64_t *)&netdev->stats.tx_bytes);
+ nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+ }
+}
+
+/* Calculate no of SQ subdescriptors needed to transmit all
+ * segments of this TSO packet.
+ * Taken from 'Tilera network driver' with a minor modification.
+ */
+static int nicvf_tso_count_subdescs(struct sk_buff *skb)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int data_len = skb->len - sh_len;
+ unsigned int p_len = sh->gso_size;
+ long f_id = -1; /* id of the current fragment */
+ long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
+ long f_used = 0; /* bytes used from the current fragment */
+ long n; /* size of the current piece of payload */
+ int num_edescs = 0;
+ int segment;
+
+ for (segment = 0; segment < sh->gso_segs; segment++) {
+ unsigned int p_used = 0;
+
+ /* One edesc for header and for each piece of the payload. */
+ for (num_edescs++; p_used < p_len; num_edescs++) {
+ /* Advance as needed. */
+ while (f_used >= f_size) {
+ f_id++;
+ f_size = skb_frag_size(&sh->frags[f_id]);
+ f_used = 0;
+ }
+
+ /* Use bytes from the current fragment. */
+ n = p_len - p_used;
+ if (n > f_size - f_used)
+ n = f_size - f_used;
+ f_used += n;
+ p_used += n;
+ }
+
+ /* The last segment may be less than gso_size. */
+ data_len -= p_len;
+ if (data_len < p_len)
+ p_len = data_len;
+ }
+
+ /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
+ return num_edescs + sh->gso_segs;
+}
+
+/* Get the number of SQ descriptors needed to xmit this skb */
+static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
+{
+ int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
+
+ if (skb_shinfo(skb)->gso_size) {
+ subdesc_cnt = nicvf_tso_count_subdescs(skb);
+ return subdesc_cnt;
+ }
+
+ if (skb_shinfo(skb)->nr_frags)
+ subdesc_cnt += skb_shinfo(skb)->nr_frags;
+
+ return subdesc_cnt;
+}
+
+/* Add SQ HEADER subdescriptor.
+ * First subdescriptor for every send descriptor.
+ */
+static inline void
+nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
+ int subdesc_cnt, struct sk_buff *skb, int len)
+{
+ int proto;
+ struct sq_hdr_subdesc *hdr;
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+ sq->skbuff[qentry] = (u64)skb;
+
+ memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+ hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+ /* Enable notification via CQE after processing SQE */
+ hdr->post_cqe = 1;
+ /* No of subdescriptors following this */
+ hdr->subdesc_cnt = subdesc_cnt;
+ hdr->tot_len = len;
+
+ /* Offload checksum calculation to HW */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->protocol != htons(ETH_P_IP))
+ return;
+
+ hdr->csum_l3 = 1; /* Enable IP csum calculation */
+ hdr->l3_offset = skb_network_offset(skb);
+ hdr->l4_offset = skb_transport_offset(skb);
+
+ proto = ip_hdr(skb)->protocol;
+ switch (proto) {
+ case IPPROTO_TCP:
+ hdr->csum_l4 = SEND_L4_CSUM_TCP;
+ break;
+ case IPPROTO_UDP:
+ hdr->csum_l4 = SEND_L4_CSUM_UDP;
+ break;
+ case IPPROTO_SCTP:
+ hdr->csum_l4 = SEND_L4_CSUM_SCTP;
+ break;
+ }
+ }
+}
+
+/* SQ GATHER subdescriptor
+ * Must follow HDR descriptor
+ */
+static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
+ int size, u64 data)
+{
+ struct sq_gather_subdesc *gather;
+
+ qentry &= (sq->dmem.q_len - 1);
+ gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
+
+ memset(gather, 0, SND_QUEUE_DESC_SIZE);
+ gather->subdesc_type = SQ_DESC_TYPE_GATHER;
+ gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
+ gather->size = size;
+ gather->addr = data;
+}
+
+/* Segment a TSO packet into 'gso_size' segments and append
+ * them to SQ for transfer
+ */
+static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
+ int qentry, struct sk_buff *skb)
+{
+ struct tso_t tso;
+ int seg_subdescs = 0, desc_cnt = 0;
+ int seg_len, total_len, data_left;
+ int hdr_qentry = qentry;
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ tso_start(skb, &tso);
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ char *hdr;
+
+ /* Save Qentry for adding HDR_SUBDESC at the end */
+ hdr_qentry = qentry;
+
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+
+ /* Add segment's header */
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+ nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
+ sq->tso_hdrs_phys +
+ qentry * TSO_HEADER_SIZE);
+ /* HDR_SUDESC + GATHER */
+ seg_subdescs = 2;
+ seg_len = hdr_len;
+
+ /* Add segment's payload fragments */
+ while (data_left > 0) {
+ int size;
+
+ size = min_t(int, tso.size, data_left);
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ nicvf_sq_add_gather_subdesc(sq, qentry, size,
+ virt_to_phys(tso.data));
+ seg_subdescs++;
+ seg_len += size;
+
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+ nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
+ seg_subdescs - 1, skb, seg_len);
+ sq->skbuff[hdr_qentry] = (u64)NULL;
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+
+ desc_cnt += seg_subdescs;
+ }
+ /* Save SKB in the last segment for freeing */
+ sq->skbuff[hdr_qentry] = (u64)skb;
+
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Inform HW to xmit all TSO segments */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ skb_get_queue_mapping(skb), desc_cnt);
+ nic->drv_stats.tx_tso++;
+ return 1;
+}
+
+/* Append an skb to a SQ for packet transfer. */
+int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
+{
+ int i, size;
+ int subdesc_cnt;
+ int sq_num, qentry;
+ struct queue_set *qs = nic->qs;
+ struct snd_queue *sq;
+
+ sq_num = skb_get_queue_mapping(skb);
+ sq = &qs->sq[sq_num];
+
+ subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
+ if (subdesc_cnt > atomic_read(&sq->free_cnt))
+ goto append_fail;
+
+ qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
+
+ /* Check if its a TSO packet */
+ if (skb_shinfo(skb)->gso_size)
+ return nicvf_sq_append_tso(nic, sq, qentry, skb);
+
+ /* Add SQ header subdesc */
+ nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
+
+ /* Add SQ gather subdescs */
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
+ nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
+
+ /* Check for scattered buffer */
+ if (!skb_is_nonlinear(skb))
+ goto doorbell;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[i];
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ size = skb_frag_size(frag);
+ nicvf_sq_add_gather_subdesc(sq, qentry, size,
+ virt_to_phys(
+ skb_frag_address(frag)));
+ }
+
+doorbell:
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Inform HW to xmit new packet */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ sq_num, subdesc_cnt);
+ return 1;
+
+append_fail:
+ netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
+ return 0;
+}
+
+static inline unsigned frag_num(unsigned i)
+{
+#ifdef __BIG_ENDIAN
+ return (i & ~3) + 3 - (i & 3);
+#else
+ return i;
+#endif
+}
+
+/* Returns SKB for a received packet */
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
+{
+ int frag;
+ int payload_len = 0;
+ struct sk_buff *skb = NULL;
+ struct sk_buff *skb_frag = NULL;
+ struct sk_buff *prev_frag = NULL;
+ u16 *rb_lens = NULL;
+ u64 *rb_ptrs = NULL;
+
+ rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
+ rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
+
+ netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
+ __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
+
+ for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
+ payload_len = rb_lens[frag_num(frag)];
+ if (!frag) {
+ /* First fragment */
+ skb = nicvf_rb_ptr_to_skb(nic,
+ *rb_ptrs - cqe_rx->align_pad,
+ payload_len);
+ if (!skb)
+ return NULL;
+ skb_reserve(skb, cqe_rx->align_pad);
+ skb_put(skb, payload_len);
+ } else {
+ /* Add fragments */
+ skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs,
+ payload_len);
+ if (!skb_frag) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ if (!skb_shinfo(skb)->frag_list)
+ skb_shinfo(skb)->frag_list = skb_frag;
+ else
+ prev_frag->next = skb_frag;
+
+ prev_frag = skb_frag;
+ skb->len += payload_len;
+ skb->data_len += payload_len;
+ skb_frag->len = payload_len;
+ }
+ /* Next buffer pointer */
+ rb_ptrs++;
+ }
+ return skb;
+}
+
+/* Enable interrupt */
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 reg_val;
+
+ reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+ break;
+ case NICVF_INTR_MBOX:
+ reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
+ break;
+ case NICVF_INTR_QS_ERR:
+ reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Failed to enable interrupt: unknown type\n");
+ break;
+ }
+
+ nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
+}
+
+/* Disable interrupt */
+void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 reg_val = 0;
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+ break;
+ case NICVF_INTR_MBOX:
+ reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
+ break;
+ case NICVF_INTR_QS_ERR:
+ reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Failed to disable interrupt: unknown type\n");
+ break;
+ }
+
+ nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
+}
+
+/* Clear interrupt */
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 reg_val = 0;
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+ break;
+ case NICVF_INTR_MBOX:
+ reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
+ break;
+ case NICVF_INTR_QS_ERR:
+ reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Failed to clear interrupt: unknown type\n");
+ break;
+ }
+
+ nicvf_reg_write(nic, NIC_VF_INT, reg_val);
+}
+
+/* Check if interrupt is enabled */
+int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 reg_val;
+ u64 mask = 0xff;
+
+ reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ mask = NICVF_INTR_PKT_DROP_MASK;
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ mask = NICVF_INTR_TCP_TIMER_MASK;
+ break;
+ case NICVF_INTR_MBOX:
+ mask = NICVF_INTR_MBOX_MASK;
+ break;
+ case NICVF_INTR_QS_ERR:
+ mask = NICVF_INTR_QS_ERR_MASK;
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Failed to check interrupt enable: unknown type\n");
+ break;
+ }
+
+ return (reg_val & mask);
+}
+
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
+{
+ struct rcv_queue *rq;
+
+#define GET_RQ_STATS(reg) \
+ nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
+ (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
+
+ rq = &nic->qs->rq[rq_idx];
+ rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
+ rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
+{
+ struct snd_queue *sq;
+
+#define GET_SQ_STATS(reg) \
+ nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
+ (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
+
+ sq = &nic->qs->sq[sq_idx];
+ sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
+ sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+/* Check for errors in the receive cmp.queue entry */
+int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
+{
+ struct cmp_queue_stats *stats = &cq->stats;
+
+ if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
+ stats->rx.errop.good++;
+ return 0;
+ }
+
+ if (netif_msg_rx_err(nic))
+ netdev_err(nic->netdev,
+ "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
+ nic->netdev->name,
+ cqe_rx->err_level, cqe_rx->err_opcode);
+
+ switch (cqe_rx->err_level) {
+ case CQ_ERRLVL_MAC:
+ stats->rx.errlvl.mac_errs++;
+ break;
+ case CQ_ERRLVL_L2:
+ stats->rx.errlvl.l2_errs++;
+ break;
+ case CQ_ERRLVL_L3:
+ stats->rx.errlvl.l3_errs++;
+ break;
+ case CQ_ERRLVL_L4:
+ stats->rx.errlvl.l4_errs++;
+ break;
+ }
+
+ switch (cqe_rx->err_opcode) {
+ case CQ_RX_ERROP_RE_PARTIAL:
+ stats->rx.errop.partial_pkts++;
+ break;
+ case CQ_RX_ERROP_RE_JABBER:
+ stats->rx.errop.jabber_errs++;
+ break;
+ case CQ_RX_ERROP_RE_FCS:
+ stats->rx.errop.fcs_errs++;
+ break;
+ case CQ_RX_ERROP_RE_TERMINATE:
+ stats->rx.errop.terminate_errs++;
+ break;
+ case CQ_RX_ERROP_RE_RX_CTL:
+ stats->rx.errop.bgx_rx_errs++;
+ break;
+ case CQ_RX_ERROP_PREL2_ERR:
+ stats->rx.errop.prel2_errs++;
+ break;
+ case CQ_RX_ERROP_L2_FRAGMENT:
+ stats->rx.errop.l2_frags++;
+ break;
+ case CQ_RX_ERROP_L2_OVERRUN:
+ stats->rx.errop.l2_overruns++;
+ break;
+ case CQ_RX_ERROP_L2_PFCS:
+ stats->rx.errop.l2_pfcs++;
+ break;
+ case CQ_RX_ERROP_L2_PUNY:
+ stats->rx.errop.l2_puny++;
+ break;
+ case CQ_RX_ERROP_L2_MAL:
+ stats->rx.errop.l2_hdr_malformed++;
+ break;
+ case CQ_RX_ERROP_L2_OVERSIZE:
+ stats->rx.errop.l2_oversize++;
+ break;
+ case CQ_RX_ERROP_L2_UNDERSIZE:
+ stats->rx.errop.l2_undersize++;
+ break;
+ case CQ_RX_ERROP_L2_LENMISM:
+ stats->rx.errop.l2_len_mismatch++;
+ break;
+ case CQ_RX_ERROP_L2_PCLP:
+ stats->rx.errop.l2_pclp++;
+ break;
+ case CQ_RX_ERROP_IP_NOT:
+ stats->rx.errop.non_ip++;
+ break;
+ case CQ_RX_ERROP_IP_CSUM_ERR:
+ stats->rx.errop.ip_csum_err++;
+ break;
+ case CQ_RX_ERROP_IP_MAL:
+ stats->rx.errop.ip_hdr_malformed++;
+ break;
+ case CQ_RX_ERROP_IP_MALD:
+ stats->rx.errop.ip_payload_malformed++;
+ break;
+ case CQ_RX_ERROP_IP_HOP:
+ stats->rx.errop.ip_hop_errs++;
+ break;
+ case CQ_RX_ERROP_L3_ICRC:
+ stats->rx.errop.l3_icrc_errs++;
+ break;
+ case CQ_RX_ERROP_L3_PCLP:
+ stats->rx.errop.l3_pclp++;
+ break;
+ case CQ_RX_ERROP_L4_MAL:
+ stats->rx.errop.l4_malformed++;
+ break;
+ case CQ_RX_ERROP_L4_CHK:
+ stats->rx.errop.l4_csum_errs++;
+ break;
+ case CQ_RX_ERROP_UDP_LEN:
+ stats->rx.errop.udp_len_err++;
+ break;
+ case CQ_RX_ERROP_L4_PORT:
+ stats->rx.errop.bad_l4_port++;
+ break;
+ case CQ_RX_ERROP_TCP_FLAG:
+ stats->rx.errop.bad_tcp_flag++;
+ break;
+ case CQ_RX_ERROP_TCP_OFFSET:
+ stats->rx.errop.tcp_offset_errs++;
+ break;
+ case CQ_RX_ERROP_L4_PCLP:
+ stats->rx.errop.l4_pclp++;
+ break;
+ case CQ_RX_ERROP_RBDR_TRUNC:
+ stats->rx.errop.pkt_truncated++;
+ break;
+ }
+
+ return 1;
+}
+
+/* Check for errors in the send cmp.queue entry */
+int nicvf_check_cqe_tx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
+{
+ struct cmp_queue_stats *stats = &cq->stats;
+
+ switch (cqe_tx->send_status) {
+ case CQ_TX_ERROP_GOOD:
+ stats->tx.good++;
+ return 0;
+ case CQ_TX_ERROP_DESC_FAULT:
+ stats->tx.desc_fault++;
+ break;
+ case CQ_TX_ERROP_HDR_CONS_ERR:
+ stats->tx.hdr_cons_err++;
+ break;
+ case CQ_TX_ERROP_SUBDC_ERR:
+ stats->tx.subdesc_err++;
+ break;
+ case CQ_TX_ERROP_IMM_SIZE_OFLOW:
+ stats->tx.imm_size_oflow++;
+ break;
+ case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
+ stats->tx.data_seq_err++;
+ break;
+ case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
+ stats->tx.mem_seq_err++;
+ break;
+ case CQ_TX_ERROP_LOCK_VIOL:
+ stats->tx.lock_viol++;
+ break;
+ case CQ_TX_ERROP_DATA_FAULT:
+ stats->tx.data_fault++;
+ break;
+ case CQ_TX_ERROP_TSTMP_CONFLICT:
+ stats->tx.tstmp_conflict++;
+ break;
+ case CQ_TX_ERROP_TSTMP_TIMEOUT:
+ stats->tx.tstmp_timeout++;
+ break;
+ case CQ_TX_ERROP_MEM_FAULT:
+ stats->tx.mem_fault++;
+ break;
+ case CQ_TX_ERROP_CK_OVERLAP:
+ stats->tx.csum_overlap++;
+ break;
+ case CQ_TX_ERROP_CK_OFLOW:
+ stats->tx.csum_overflow++;
+ break;
+ }
+
+ return 1;
+}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
new file mode 100644
index 000000000..f0937b7bf
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NICVF_QUEUES_H
+#define NICVF_QUEUES_H
+
+#include <linux/netdevice.h>
+#include "q_struct.h"
+
+#define MAX_QUEUE_SET 128
+#define MAX_RCV_QUEUES_PER_QS 8
+#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
+#define MAX_SND_QUEUES_PER_QS 8
+#define MAX_CMP_QUEUES_PER_QS 8
+
+/* VF's queue interrupt ranges */
+#define NICVF_INTR_ID_CQ 0
+#define NICVF_INTR_ID_SQ 8
+#define NICVF_INTR_ID_RBDR 16
+#define NICVF_INTR_ID_MISC 18
+#define NICVF_INTR_ID_QS_ERR 19
+
+#define for_each_cq_irq(irq) \
+ for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++)
+#define for_each_sq_irq(irq) \
+ for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++)
+#define for_each_rbdr_irq(irq) \
+ for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++)
+
+#define RBDR_SIZE0 0ULL /* 8K entries */
+#define RBDR_SIZE1 1ULL /* 16K entries */
+#define RBDR_SIZE2 2ULL /* 32K entries */
+#define RBDR_SIZE3 3ULL /* 64K entries */
+#define RBDR_SIZE4 4ULL /* 126K entries */
+#define RBDR_SIZE5 5ULL /* 256K entries */
+#define RBDR_SIZE6 6ULL /* 512K entries */
+
+#define SND_QUEUE_SIZE0 0ULL /* 1K entries */
+#define SND_QUEUE_SIZE1 1ULL /* 2K entries */
+#define SND_QUEUE_SIZE2 2ULL /* 4K entries */
+#define SND_QUEUE_SIZE3 3ULL /* 8K entries */
+#define SND_QUEUE_SIZE4 4ULL /* 16K entries */
+#define SND_QUEUE_SIZE5 5ULL /* 32K entries */
+#define SND_QUEUE_SIZE6 6ULL /* 64K entries */
+
+#define CMP_QUEUE_SIZE0 0ULL /* 1K entries */
+#define CMP_QUEUE_SIZE1 1ULL /* 2K entries */
+#define CMP_QUEUE_SIZE2 2ULL /* 4K entries */
+#define CMP_QUEUE_SIZE3 3ULL /* 8K entries */
+#define CMP_QUEUE_SIZE4 4ULL /* 16K entries */
+#define CMP_QUEUE_SIZE5 5ULL /* 32K entries */
+#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
+
+/* Default queue count per QS, its lengths and threshold values */
+#define RBDR_CNT 1
+#define RCV_QUEUE_CNT 8
+#define SND_QUEUE_CNT 8
+#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
+
+#define SND_QSIZE SND_QUEUE_SIZE2
+#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
+#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
+#define SND_QUEUE_THRESH 2ULL
+#define MIN_SQ_DESC_PER_PKT_XMIT 2
+/* Since timestamp not enabled, otherwise 2 */
+#define MAX_CQE_PER_PKT_XMIT 1
+
+/* Keep CQ and SQ sizes same, if timestamping
+ * is enabled this equation will change.
+ */
+#define CMP_QSIZE CMP_QUEUE_SIZE2
+#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
+#define CMP_QUEUE_CQE_THRESH 0
+#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
+
+#define RBDR_SIZE RBDR_SIZE0
+#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
+#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
+#define RBDR_THRESH (RCV_BUF_COUNT / 2)
+#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
+#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
+ (NICVF_RCV_BUF_ALIGN_BYTES * 2))
+#define RCV_DATA_OFFSET NICVF_RCV_BUF_ALIGN_BYTES
+
+#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
+ MAX_CQE_PER_PKT_XMIT)
+/* Calculate number of CQEs to reserve for all SQEs.
+ * Its 1/256th level of CQ size.
+ * '+ 1' to account for pipelining
+ */
+#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \
+ (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
+
+/* Descriptor size in bytes */
+#define SND_QUEUE_DESC_SIZE 16
+#define CMP_QUEUE_DESC_SIZE 512
+
+/* Buffer / descriptor alignments */
+#define NICVF_RCV_BUF_ALIGN 7
+#define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN)
+#define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */
+#define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */
+
+#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES)
+#define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\
+ (NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES)
+#define NICVF_RCV_BUF_ALIGN_LEN(X)\
+ (NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X)
+
+/* Queue enable/disable */
+#define NICVF_SQ_EN BIT_ULL(19)
+
+/* Queue reset */
+#define NICVF_CQ_RESET BIT_ULL(41)
+#define NICVF_SQ_RESET BIT_ULL(17)
+#define NICVF_RBDR_RESET BIT_ULL(43)
+
+enum CQ_RX_ERRLVL_E {
+ CQ_ERRLVL_MAC,
+ CQ_ERRLVL_L2,
+ CQ_ERRLVL_L3,
+ CQ_ERRLVL_L4,
+};
+
+enum CQ_RX_ERROP_E {
+ CQ_RX_ERROP_RE_NONE = 0x0,
+ CQ_RX_ERROP_RE_PARTIAL = 0x1,
+ CQ_RX_ERROP_RE_JABBER = 0x2,
+ CQ_RX_ERROP_RE_FCS = 0x7,
+ CQ_RX_ERROP_RE_TERMINATE = 0x9,
+ CQ_RX_ERROP_RE_RX_CTL = 0xb,
+ CQ_RX_ERROP_PREL2_ERR = 0x1f,
+ CQ_RX_ERROP_L2_FRAGMENT = 0x20,
+ CQ_RX_ERROP_L2_OVERRUN = 0x21,
+ CQ_RX_ERROP_L2_PFCS = 0x22,
+ CQ_RX_ERROP_L2_PUNY = 0x23,
+ CQ_RX_ERROP_L2_MAL = 0x24,
+ CQ_RX_ERROP_L2_OVERSIZE = 0x25,
+ CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
+ CQ_RX_ERROP_L2_LENMISM = 0x27,
+ CQ_RX_ERROP_L2_PCLP = 0x28,
+ CQ_RX_ERROP_IP_NOT = 0x41,
+ CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
+ CQ_RX_ERROP_IP_MAL = 0x43,
+ CQ_RX_ERROP_IP_MALD = 0x44,
+ CQ_RX_ERROP_IP_HOP = 0x45,
+ CQ_RX_ERROP_L3_ICRC = 0x46,
+ CQ_RX_ERROP_L3_PCLP = 0x47,
+ CQ_RX_ERROP_L4_MAL = 0x61,
+ CQ_RX_ERROP_L4_CHK = 0x62,
+ CQ_RX_ERROP_UDP_LEN = 0x63,
+ CQ_RX_ERROP_L4_PORT = 0x64,
+ CQ_RX_ERROP_TCP_FLAG = 0x65,
+ CQ_RX_ERROP_TCP_OFFSET = 0x66,
+ CQ_RX_ERROP_L4_PCLP = 0x67,
+ CQ_RX_ERROP_RBDR_TRUNC = 0x70,
+};
+
+enum CQ_TX_ERROP_E {
+ CQ_TX_ERROP_GOOD = 0x0,
+ CQ_TX_ERROP_DESC_FAULT = 0x10,
+ CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
+ CQ_TX_ERROP_SUBDC_ERR = 0x12,
+ CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
+ CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
+ CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
+ CQ_TX_ERROP_LOCK_VIOL = 0x83,
+ CQ_TX_ERROP_DATA_FAULT = 0x84,
+ CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
+ CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
+ CQ_TX_ERROP_MEM_FAULT = 0x87,
+ CQ_TX_ERROP_CK_OVERLAP = 0x88,
+ CQ_TX_ERROP_CK_OFLOW = 0x89,
+ CQ_TX_ERROP_ENUM_LAST = 0x8a,
+};
+
+struct cmp_queue_stats {
+ struct rx_stats {
+ struct {
+ u64 mac_errs;
+ u64 l2_errs;
+ u64 l3_errs;
+ u64 l4_errs;
+ } errlvl;
+ struct {
+ u64 good;
+ u64 partial_pkts;
+ u64 jabber_errs;
+ u64 fcs_errs;
+ u64 terminate_errs;
+ u64 bgx_rx_errs;
+ u64 prel2_errs;
+ u64 l2_frags;
+ u64 l2_overruns;
+ u64 l2_pfcs;
+ u64 l2_puny;
+ u64 l2_hdr_malformed;
+ u64 l2_oversize;
+ u64 l2_undersize;
+ u64 l2_len_mismatch;
+ u64 l2_pclp;
+ u64 non_ip;
+ u64 ip_csum_err;
+ u64 ip_hdr_malformed;
+ u64 ip_payload_malformed;
+ u64 ip_hop_errs;
+ u64 l3_icrc_errs;
+ u64 l3_pclp;
+ u64 l4_malformed;
+ u64 l4_csum_errs;
+ u64 udp_len_err;
+ u64 bad_l4_port;
+ u64 bad_tcp_flag;
+ u64 tcp_offset_errs;
+ u64 l4_pclp;
+ u64 pkt_truncated;
+ } errop;
+ } rx;
+ struct tx_stats {
+ u64 good;
+ u64 desc_fault;
+ u64 hdr_cons_err;
+ u64 subdesc_err;
+ u64 imm_size_oflow;
+ u64 data_seq_err;
+ u64 mem_seq_err;
+ u64 lock_viol;
+ u64 data_fault;
+ u64 tstmp_conflict;
+ u64 tstmp_timeout;
+ u64 mem_fault;
+ u64 csum_overlap;
+ u64 csum_overflow;
+ } tx;
+} ____cacheline_aligned_in_smp;
+
+enum RQ_SQ_STATS {
+ RQ_SQ_STATS_OCTS,
+ RQ_SQ_STATS_PKTS,
+};
+
+struct rx_tx_queue_stats {
+ u64 bytes;
+ u64 pkts;
+} ____cacheline_aligned_in_smp;
+
+struct q_desc_mem {
+ dma_addr_t dma;
+ u64 size;
+ u16 q_len;
+ dma_addr_t phys_base;
+ void *base;
+ void *unalign_base;
+};
+
+struct rbdr {
+ bool enable;
+ u32 dma_size;
+ u32 frag_len;
+ u32 thresh; /* Threshold level for interrupt */
+ void *desc;
+ u32 head;
+ u32 tail;
+ struct q_desc_mem dmem;
+} ____cacheline_aligned_in_smp;
+
+struct rcv_queue {
+ bool enable;
+ struct rbdr *rbdr_start;
+ struct rbdr *rbdr_cont;
+ bool en_tcp_reassembly;
+ u8 cq_qs; /* CQ's QS to which this RQ is assigned */
+ u8 cq_idx; /* CQ index (0 to 7) in the QS */
+ u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */
+ u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */
+ u8 start_rbdr_qs; /* First buffer ptrs - QS num */
+ u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
+ u8 caching;
+ struct rx_tx_queue_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct cmp_queue {
+ bool enable;
+ u16 thresh;
+ spinlock_t lock; /* lock to serialize processing CQEs */
+ void *desc;
+ struct q_desc_mem dmem;
+ struct cmp_queue_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct snd_queue {
+ bool enable;
+ u8 cq_qs; /* CQ's QS to which this SQ is pointing */
+ u8 cq_idx; /* CQ index (0 to 7) in the above QS */
+ u16 thresh;
+ atomic_t free_cnt;
+ u32 head;
+ u32 tail;
+ u64 *skbuff;
+ void *desc;
+
+#define TSO_HEADER_SIZE 128
+ /* For TSO segment's header */
+ char *tso_hdrs;
+ dma_addr_t tso_hdrs_phys;
+
+ cpumask_t affinity_mask;
+ struct q_desc_mem dmem;
+ struct rx_tx_queue_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct queue_set {
+ bool enable;
+ bool be_en;
+ u8 vnic_id;
+ u8 rq_cnt;
+ u8 cq_cnt;
+ u64 cq_len;
+ u8 sq_cnt;
+ u64 sq_len;
+ u8 rbdr_cnt;
+ u64 rbdr_len;
+ struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS];
+ struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS];
+ struct snd_queue sq[MAX_SND_QUEUES_PER_QS];
+ struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
+} ____cacheline_aligned_in_smp;
+
+#define GET_RBDR_DESC(RING, idx)\
+ (&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
+#define GET_SQ_DESC(RING, idx)\
+ (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
+#define GET_CQ_DESC(RING, idx)\
+ (&(((union cq_desc_t *)((RING)->desc))[idx]))
+
+/* CQ status bits */
+#define CQ_WR_FULL BIT(26)
+#define CQ_WR_DISABLE BIT(25)
+#define CQ_WR_FAULT BIT(24)
+#define CQ_CQE_COUNT (0xFFFF << 0)
+
+#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
+
+int nicvf_set_qset_resources(struct nicvf *nic);
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
+void nicvf_qset_config(struct nicvf *nic, bool enable);
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable);
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
+void nicvf_sq_disable(struct nicvf *nic, int qidx);
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
+void nicvf_sq_free_used_descs(struct net_device *netdev,
+ struct snd_queue *sq, int qidx);
+int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb);
+
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
+void nicvf_rbdr_task(unsigned long data);
+void nicvf_rbdr_work(struct work_struct *work);
+
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
+int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
+
+/* Register access APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+ u64 qidx, u64 val);
+u64 nicvf_queue_reg_read(struct nicvf *nic,
+ u64 offset, u64 qidx);
+
+/* Stats */
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
+int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
+int nicvf_check_cqe_tx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
+#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h
new file mode 100644
index 000000000..3c1de97b1
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/q_struct.h
@@ -0,0 +1,701 @@
+/*
+ * This file contains HW queue descriptor formats, config register
+ * structures etc
+ *
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef Q_STRUCT_H
+#define Q_STRUCT_H
+
+/* Load transaction types for reading segment bytes specified by
+ * NIC_SEND_GATHER_S[LD_TYPE].
+ */
+enum nic_send_ld_type_e {
+ NIC_SEND_LD_TYPE_E_LDD = 0x0,
+ NIC_SEND_LD_TYPE_E_LDT = 0x1,
+ NIC_SEND_LD_TYPE_E_LDWB = 0x2,
+ NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3,
+};
+
+enum ether_type_algorithm {
+ ETYPE_ALG_NONE = 0x0,
+ ETYPE_ALG_SKIP = 0x1,
+ ETYPE_ALG_ENDPARSE = 0x2,
+ ETYPE_ALG_VLAN = 0x3,
+ ETYPE_ALG_VLAN_STRIP = 0x4,
+};
+
+enum layer3_type {
+ L3TYPE_NONE = 0x00,
+ L3TYPE_GRH = 0x01,
+ L3TYPE_IPV4 = 0x04,
+ L3TYPE_IPV4_OPTIONS = 0x05,
+ L3TYPE_IPV6 = 0x06,
+ L3TYPE_IPV6_OPTIONS = 0x07,
+ L3TYPE_ET_STOP = 0x0D,
+ L3TYPE_OTHER = 0x0E,
+};
+
+enum layer4_type {
+ L4TYPE_NONE = 0x00,
+ L4TYPE_IPSEC_ESP = 0x01,
+ L4TYPE_IPFRAG = 0x02,
+ L4TYPE_IPCOMP = 0x03,
+ L4TYPE_TCP = 0x04,
+ L4TYPE_UDP = 0x05,
+ L4TYPE_SCTP = 0x06,
+ L4TYPE_GRE = 0x07,
+ L4TYPE_ROCE_BTH = 0x08,
+ L4TYPE_OTHER = 0x0E,
+};
+
+/* CPI and RSSI configuration */
+enum cpi_algorithm_type {
+ CPI_ALG_NONE = 0x0,
+ CPI_ALG_VLAN = 0x1,
+ CPI_ALG_VLAN16 = 0x2,
+ CPI_ALG_DIFF = 0x3,
+};
+
+enum rss_algorithm_type {
+ RSS_ALG_NONE = 0x00,
+ RSS_ALG_PORT = 0x01,
+ RSS_ALG_IP = 0x02,
+ RSS_ALG_TCP_IP = 0x03,
+ RSS_ALG_UDP_IP = 0x04,
+ RSS_ALG_SCTP_IP = 0x05,
+ RSS_ALG_GRE_IP = 0x06,
+ RSS_ALG_ROCE = 0x07,
+};
+
+enum rss_hash_cfg {
+ RSS_HASH_L2ETC = 0x00,
+ RSS_HASH_IP = 0x01,
+ RSS_HASH_TCP = 0x02,
+ RSS_HASH_TCP_SYN_DIS = 0x03,
+ RSS_HASH_UDP = 0x04,
+ RSS_HASH_L4ETC = 0x05,
+ RSS_HASH_ROCE = 0x06,
+ RSS_L3_BIDI = 0x07,
+ RSS_L4_BIDI = 0x08,
+};
+
+/* Completion queue entry types */
+enum cqe_type {
+ CQE_TYPE_INVALID = 0x0,
+ CQE_TYPE_RX = 0x2,
+ CQE_TYPE_RX_SPLIT = 0x3,
+ CQE_TYPE_RX_TCP = 0x4,
+ CQE_TYPE_SEND = 0x8,
+ CQE_TYPE_SEND_PTP = 0x9,
+};
+
+enum cqe_rx_tcp_status {
+ CQE_RX_STATUS_VALID_TCP_CNXT = 0x00,
+ CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
+};
+
+enum cqe_send_status {
+ CQE_SEND_STATUS_GOOD = 0x00,
+ CQE_SEND_STATUS_DESC_FAULT = 0x01,
+ CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
+ CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
+ CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
+ CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
+ CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
+ CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
+ CQE_SEND_STATUS_LOCK_VIOL = 0x84,
+ CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
+ CQE_SEND_STATUS_DATA_FAULT = 0x86,
+ CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
+ CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
+ CQE_SEND_STATUS_MEM_FAULT = 0x89,
+ CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
+ CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
+};
+
+enum cqe_rx_tcp_end_reason {
+ CQE_RX_TCP_END_FIN_FLAG_DET = 0,
+ CQE_RX_TCP_END_INVALID_FLAG = 1,
+ CQE_RX_TCP_END_TIMEOUT = 2,
+ CQE_RX_TCP_END_OUT_OF_SEQ = 3,
+ CQE_RX_TCP_END_PKT_ERR = 4,
+ CQE_RX_TCP_END_QS_DISABLED = 0x0F,
+};
+
+/* Packet protocol level error enumeration */
+enum cqe_rx_err_level {
+ CQE_RX_ERRLVL_RE = 0x0,
+ CQE_RX_ERRLVL_L2 = 0x1,
+ CQE_RX_ERRLVL_L3 = 0x2,
+ CQE_RX_ERRLVL_L4 = 0x3,
+};
+
+/* Packet protocol level error type enumeration */
+enum cqe_rx_err_opcode {
+ CQE_RX_ERR_RE_NONE = 0x0,
+ CQE_RX_ERR_RE_PARTIAL = 0x1,
+ CQE_RX_ERR_RE_JABBER = 0x2,
+ CQE_RX_ERR_RE_FCS = 0x7,
+ CQE_RX_ERR_RE_TERMINATE = 0x9,
+ CQE_RX_ERR_RE_RX_CTL = 0xb,
+ CQE_RX_ERR_PREL2_ERR = 0x1f,
+ CQE_RX_ERR_L2_FRAGMENT = 0x20,
+ CQE_RX_ERR_L2_OVERRUN = 0x21,
+ CQE_RX_ERR_L2_PFCS = 0x22,
+ CQE_RX_ERR_L2_PUNY = 0x23,
+ CQE_RX_ERR_L2_MAL = 0x24,
+ CQE_RX_ERR_L2_OVERSIZE = 0x25,
+ CQE_RX_ERR_L2_UNDERSIZE = 0x26,
+ CQE_RX_ERR_L2_LENMISM = 0x27,
+ CQE_RX_ERR_L2_PCLP = 0x28,
+ CQE_RX_ERR_IP_NOT = 0x41,
+ CQE_RX_ERR_IP_CHK = 0x42,
+ CQE_RX_ERR_IP_MAL = 0x43,
+ CQE_RX_ERR_IP_MALD = 0x44,
+ CQE_RX_ERR_IP_HOP = 0x45,
+ CQE_RX_ERR_L3_ICRC = 0x46,
+ CQE_RX_ERR_L3_PCLP = 0x47,
+ CQE_RX_ERR_L4_MAL = 0x61,
+ CQE_RX_ERR_L4_CHK = 0x62,
+ CQE_RX_ERR_UDP_LEN = 0x63,
+ CQE_RX_ERR_L4_PORT = 0x64,
+ CQE_RX_ERR_TCP_FLAG = 0x65,
+ CQE_RX_ERR_TCP_OFFSET = 0x66,
+ CQE_RX_ERR_L4_PCLP = 0x67,
+ CQE_RX_ERR_RBDR_TRUNC = 0x70,
+};
+
+struct cqe_rx_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 stdn_fault:1;
+ u64 rsvd0:1;
+ u64 rq_qs:7;
+ u64 rq_idx:3;
+ u64 rsvd1:12;
+ u64 rss_alg:4;
+ u64 rsvd2:4;
+ u64 rb_cnt:4;
+ u64 vlan_found:1;
+ u64 vlan_stripped:1;
+ u64 vlan2_found:1;
+ u64 vlan2_stripped:1;
+ u64 l4_type:4;
+ u64 l3_type:4;
+ u64 l2_present:1;
+ u64 err_level:3;
+ u64 err_opcode:8;
+
+ u64 pkt_len:16; /* W1 */
+ u64 l2_ptr:8;
+ u64 l3_ptr:8;
+ u64 l4_ptr:8;
+ u64 cq_pkt_len:8;
+ u64 align_pad:3;
+ u64 rsvd3:1;
+ u64 chan:12;
+
+ u64 rss_tag:32; /* W2 */
+ u64 vlan_tci:16;
+ u64 vlan_ptr:8;
+ u64 vlan2_ptr:8;
+
+ u64 rb3_sz:16; /* W3 */
+ u64 rb2_sz:16;
+ u64 rb1_sz:16;
+ u64 rb0_sz:16;
+
+ u64 rb7_sz:16; /* W4 */
+ u64 rb6_sz:16;
+ u64 rb5_sz:16;
+ u64 rb4_sz:16;
+
+ u64 rb11_sz:16; /* W5 */
+ u64 rb10_sz:16;
+ u64 rb9_sz:16;
+ u64 rb8_sz:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 err_opcode:8;
+ u64 err_level:3;
+ u64 l2_present:1;
+ u64 l3_type:4;
+ u64 l4_type:4;
+ u64 vlan2_stripped:1;
+ u64 vlan2_found:1;
+ u64 vlan_stripped:1;
+ u64 vlan_found:1;
+ u64 rb_cnt:4;
+ u64 rsvd2:4;
+ u64 rss_alg:4;
+ u64 rsvd1:12;
+ u64 rq_idx:3;
+ u64 rq_qs:7;
+ u64 rsvd0:1;
+ u64 stdn_fault:1;
+ u64 cqe_type:4; /* W0 */
+ u64 chan:12;
+ u64 rsvd3:1;
+ u64 align_pad:3;
+ u64 cq_pkt_len:8;
+ u64 l4_ptr:8;
+ u64 l3_ptr:8;
+ u64 l2_ptr:8;
+ u64 pkt_len:16; /* W1 */
+ u64 vlan2_ptr:8;
+ u64 vlan_ptr:8;
+ u64 vlan_tci:16;
+ u64 rss_tag:32; /* W2 */
+ u64 rb0_sz:16;
+ u64 rb1_sz:16;
+ u64 rb2_sz:16;
+ u64 rb3_sz:16; /* W3 */
+ u64 rb4_sz:16;
+ u64 rb5_sz:16;
+ u64 rb6_sz:16;
+ u64 rb7_sz:16; /* W4 */
+ u64 rb8_sz:16;
+ u64 rb9_sz:16;
+ u64 rb10_sz:16;
+ u64 rb11_sz:16; /* W5 */
+#endif
+ u64 rb0_ptr:64;
+ u64 rb1_ptr:64;
+ u64 rb2_ptr:64;
+ u64 rb3_ptr:64;
+ u64 rb4_ptr:64;
+ u64 rb5_ptr:64;
+ u64 rb6_ptr:64;
+ u64 rb7_ptr:64;
+ u64 rb8_ptr:64;
+ u64 rb9_ptr:64;
+ u64 rb10_ptr:64;
+ u64 rb11_ptr:64;
+};
+
+struct cqe_rx_tcp_err_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:60;
+
+ u64 rsvd1:4; /* W1 */
+ u64 partial_first:1;
+ u64 rsvd2:27;
+ u64 rbdr_bytes:8;
+ u64 rsvd3:24;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 rsvd0:60;
+ u64 cqe_type:4;
+
+ u64 rsvd3:24;
+ u64 rbdr_bytes:8;
+ u64 rsvd2:27;
+ u64 partial_first:1;
+ u64 rsvd1:4;
+#endif
+};
+
+struct cqe_rx_tcp_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:52;
+ u64 cq_tcp_status:8;
+
+ u64 rsvd1:32; /* W1 */
+ u64 tcp_cntx_bytes:8;
+ u64 rsvd2:8;
+ u64 tcp_err_bytes:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 cq_tcp_status:8;
+ u64 rsvd0:52;
+ u64 cqe_type:4; /* W0 */
+
+ u64 tcp_err_bytes:16;
+ u64 rsvd2:8;
+ u64 tcp_cntx_bytes:8;
+ u64 rsvd1:32; /* W1 */
+#endif
+};
+
+struct cqe_send_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:4;
+ u64 sqe_ptr:16;
+ u64 rsvd1:4;
+ u64 rsvd2:10;
+ u64 sq_qs:7;
+ u64 sq_idx:3;
+ u64 rsvd3:8;
+ u64 send_status:8;
+
+ u64 ptp_timestamp:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 send_status:8;
+ u64 rsvd3:8;
+ u64 sq_idx:3;
+ u64 sq_qs:7;
+ u64 rsvd2:10;
+ u64 rsvd1:4;
+ u64 sqe_ptr:16;
+ u64 rsvd0:4;
+ u64 cqe_type:4; /* W0 */
+
+ u64 ptp_timestamp:64; /* W1 */
+#endif
+};
+
+union cq_desc_t {
+ u64 u[64];
+ struct cqe_send_t snd_hdr;
+ struct cqe_rx_t rx_hdr;
+ struct cqe_rx_tcp_t rx_tcp_hdr;
+ struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
+};
+
+struct rbdr_entry_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd0:15;
+ u64 buf_addr:42;
+ u64 cache_align:7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 cache_align:7;
+ u64 buf_addr:42;
+ u64 rsvd0:15;
+#endif
+};
+
+/* TCP reassembly context */
+struct rbe_tcp_cnxt_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 tcp_pkt_cnt:12;
+ u64 rsvd1:4;
+ u64 align_hdr_bytes:4;
+ u64 align_ptr_bytes:4;
+ u64 ptr_bytes:16;
+ u64 rsvd2:24;
+ u64 cqe_type:4;
+ u64 rsvd0:54;
+ u64 tcp_end_reason:2;
+ u64 tcp_status:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tcp_status:4;
+ u64 tcp_end_reason:2;
+ u64 rsvd0:54;
+ u64 cqe_type:4;
+ u64 rsvd2:24;
+ u64 ptr_bytes:16;
+ u64 align_ptr_bytes:4;
+ u64 align_hdr_bytes:4;
+ u64 rsvd1:4;
+ u64 tcp_pkt_cnt:12;
+#endif
+};
+
+/* Always Big endian */
+struct rx_hdr_t {
+ u64 opaque:32;
+ u64 rss_flow:8;
+ u64 skip_length:6;
+ u64 disable_rss:1;
+ u64 disable_tcp_reassembly:1;
+ u64 nodrop:1;
+ u64 dest_alg:2;
+ u64 rsvd0:2;
+ u64 dest_rq:11;
+};
+
+enum send_l4_csum_type {
+ SEND_L4_CSUM_DISABLE = 0x00,
+ SEND_L4_CSUM_UDP = 0x01,
+ SEND_L4_CSUM_TCP = 0x02,
+ SEND_L4_CSUM_SCTP = 0x03,
+};
+
+enum send_crc_alg {
+ SEND_CRCALG_CRC32 = 0x00,
+ SEND_CRCALG_CRC32C = 0x01,
+ SEND_CRCALG_ICRC = 0x02,
+};
+
+enum send_load_type {
+ SEND_LD_TYPE_LDD = 0x00,
+ SEND_LD_TYPE_LDT = 0x01,
+ SEND_LD_TYPE_LDWB = 0x02,
+};
+
+enum send_mem_alg_type {
+ SEND_MEMALG_SET = 0x00,
+ SEND_MEMALG_ADD = 0x08,
+ SEND_MEMALG_SUB = 0x09,
+ SEND_MEMALG_ADDLEN = 0x0A,
+ SEND_MEMALG_SUBLEN = 0x0B,
+};
+
+enum send_mem_dsz_type {
+ SEND_MEMDSZ_B64 = 0x00,
+ SEND_MEMDSZ_B32 = 0x01,
+ SEND_MEMDSZ_B8 = 0x03,
+};
+
+enum sq_subdesc_type {
+ SQ_DESC_TYPE_INVALID = 0x00,
+ SQ_DESC_TYPE_HEADER = 0x01,
+ SQ_DESC_TYPE_CRC = 0x02,
+ SQ_DESC_TYPE_IMMEDIATE = 0x03,
+ SQ_DESC_TYPE_GATHER = 0x04,
+ SQ_DESC_TYPE_MEMORY = 0x05,
+};
+
+struct sq_crc_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd1:32;
+ u64 crc_ival:32;
+ u64 subdesc_type:4;
+ u64 crc_alg:2;
+ u64 rsvd0:10;
+ u64 crc_insert_pos:16;
+ u64 hdr_start:16;
+ u64 crc_len:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 crc_len:16;
+ u64 hdr_start:16;
+ u64 crc_insert_pos:16;
+ u64 rsvd0:10;
+ u64 crc_alg:2;
+ u64 subdesc_type:4;
+ u64 crc_ival:32;
+ u64 rsvd1:32;
+#endif
+};
+
+struct sq_gather_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 ld_type:2;
+ u64 rsvd0:42;
+ u64 size:16;
+
+ u64 rsvd1:15; /* W1 */
+ u64 addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 size:16;
+ u64 rsvd0:42;
+ u64 ld_type:2;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 addr:49;
+ u64 rsvd1:15; /* W1 */
+#endif
+};
+
+/* SQ immediate subdescriptor */
+struct sq_imm_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 rsvd0:46;
+ u64 len:14;
+
+ u64 data:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 len:14;
+ u64 rsvd0:46;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 data:64; /* W1 */
+#endif
+};
+
+struct sq_mem_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 mem_alg:4;
+ u64 mem_dsz:2;
+ u64 wmem:1;
+ u64 rsvd0:21;
+ u64 offset:32;
+
+ u64 rsvd1:15; /* W1 */
+ u64 addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 offset:32;
+ u64 rsvd0:21;
+ u64 wmem:1;
+ u64 mem_dsz:2;
+ u64 mem_alg:4;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 addr:49;
+ u64 rsvd1:15; /* W1 */
+#endif
+};
+
+struct sq_hdr_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4;
+ u64 tso:1;
+ u64 post_cqe:1; /* Post CQE on no error also */
+ u64 dont_send:1;
+ u64 tstmp:1;
+ u64 subdesc_cnt:8;
+ u64 csum_l4:2;
+ u64 csum_l3:1;
+ u64 rsvd0:5;
+ u64 l4_offset:8;
+ u64 l3_offset:8;
+ u64 rsvd1:4;
+ u64 tot_len:20; /* W0 */
+
+ u64 tso_sdc_cont:8;
+ u64 tso_sdc_first:8;
+ u64 tso_l4_offset:8;
+ u64 tso_flags_last:12;
+ u64 tso_flags_first:12;
+ u64 rsvd2:2;
+ u64 tso_max_paysize:14; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tot_len:20;
+ u64 rsvd1:4;
+ u64 l3_offset:8;
+ u64 l4_offset:8;
+ u64 rsvd0:5;
+ u64 csum_l3:1;
+ u64 csum_l4:2;
+ u64 subdesc_cnt:8;
+ u64 tstmp:1;
+ u64 dont_send:1;
+ u64 post_cqe:1; /* Post CQE on no error also */
+ u64 tso:1;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 tso_max_paysize:14;
+ u64 rsvd2:2;
+ u64 tso_flags_first:12;
+ u64 tso_flags_last:12;
+ u64 tso_l4_offset:8;
+ u64 tso_sdc_first:8;
+ u64 tso_sdc_cont:8; /* W1 */
+#endif
+};
+
+/* Queue config register formats */
+struct rq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_2_63:62;
+ u64 ena:1;
+ u64 tcp_ena:1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tcp_ena:1;
+ u64 ena:1;
+ u64 reserved_2_63:62;
+#endif
+};
+
+struct cq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_43_63:21;
+ u64 ena:1;
+ u64 reset:1;
+ u64 caching:1;
+ u64 reserved_35_39:5;
+ u64 qsize:3;
+ u64 reserved_25_31:7;
+ u64 avg_con:9;
+ u64 reserved_0_15:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 reserved_0_15:16;
+ u64 avg_con:9;
+ u64 reserved_25_31:7;
+ u64 qsize:3;
+ u64 reserved_35_39:5;
+ u64 caching:1;
+ u64 reset:1;
+ u64 ena:1;
+ u64 reserved_43_63:21;
+#endif
+};
+
+struct sq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_20_63:44;
+ u64 ena:1;
+ u64 reserved_18_18:1;
+ u64 reset:1;
+ u64 ldwb:1;
+ u64 reserved_11_15:5;
+ u64 qsize:3;
+ u64 reserved_3_7:5;
+ u64 tstmp_bgx_intf:3;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tstmp_bgx_intf:3;
+ u64 reserved_3_7:5;
+ u64 qsize:3;
+ u64 reserved_11_15:5;
+ u64 ldwb:1;
+ u64 reset:1;
+ u64 reserved_18_18:1;
+ u64 ena:1;
+ u64 reserved_20_63:44;
+#endif
+};
+
+struct rbdr_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_45_63:19;
+ u64 ena:1;
+ u64 reset:1;
+ u64 ldwb:1;
+ u64 reserved_36_41:6;
+ u64 qsize:4;
+ u64 reserved_25_31:7;
+ u64 avg_con:9;
+ u64 reserved_12_15:4;
+ u64 lines:12;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 lines:12;
+ u64 reserved_12_15:4;
+ u64 avg_con:9;
+ u64 reserved_25_31:7;
+ u64 qsize:4;
+ u64 reserved_36_41:6;
+ u64 ldwb:1;
+ u64 reset:1;
+ u64 ena: 1;
+ u64 reserved_45_63:19;
+#endif
+};
+
+struct qs_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_32_63:32;
+ u64 ena:1;
+ u64 reserved_27_30:4;
+ u64 sq_ins_ena:1;
+ u64 sq_ins_pos:6;
+ u64 lock_ena:1;
+ u64 lock_viol_cqe_ena:1;
+ u64 send_tstmp_ena:1;
+ u64 be:1;
+ u64 reserved_7_15:9;
+ u64 vnic:7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 vnic:7;
+ u64 reserved_7_15:9;
+ u64 be:1;
+ u64 send_tstmp_ena:1;
+ u64 lock_viol_cqe_ena:1;
+ u64 lock_ena:1;
+ u64 sq_ins_pos:6;
+ u64 sq_ins_ena:1;
+ u64 reserved_27_30:4;
+ u64 ena:1;
+ u64 reserved_32_63:32;
+#endif
+};
+
+#endif /* Q_STRUCT_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
new file mode 100644
index 000000000..b961a89dc
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -0,0 +1,969 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-BGX"
+#define DRV_VERSION "1.0"
+
+struct lmac {
+ struct bgx *bgx;
+ int dmac;
+ unsigned char mac[ETH_ALEN];
+ bool link_up;
+ int lmacid; /* ID within BGX */
+ int lmacid_bd; /* ID on board */
+ struct net_device netdev;
+ struct phy_device *phydev;
+ unsigned int last_duplex;
+ unsigned int last_link;
+ unsigned int last_speed;
+ bool is_sgmii;
+ struct delayed_work dwork;
+ struct workqueue_struct *check_link;
+};
+
+struct bgx {
+ u8 bgx_id;
+ u8 qlm_mode;
+ struct lmac lmac[MAX_LMAC_PER_BGX];
+ int lmac_count;
+ int lmac_type;
+ int lane_to_sds;
+ int use_training;
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+};
+
+static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
+static int lmac_count; /* Total no of LMACs in system */
+
+static int bgx_xaui_check_link(struct lmac *lmac);
+
+/* Supported devices */
+static const struct pci_device_id bgx_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Cavium Inc");
+MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, bgx_id_table);
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation. All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver. The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset)
+{
+ void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+ return readq_relaxed(addr);
+}
+
+static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
+{
+ void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+ writeq_relaxed(val, addr);
+}
+
+static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
+{
+ void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+ writeq_relaxed(val | readq_relaxed(addr), addr);
+}
+
+static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
+{
+ int timeout = 100;
+ u64 reg_val;
+
+ while (timeout) {
+ reg_val = bgx_reg_read(bgx, lmac, reg);
+ if (zero && !(reg_val & mask))
+ return 0;
+ if (!zero && (reg_val & mask))
+ return 0;
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+ return 1;
+}
+
+/* Return number of BGX present in HW */
+unsigned bgx_get_map(int node)
+{
+ int i;
+ unsigned map = 0;
+
+ for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
+ if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
+ map |= (1 << i);
+ }
+
+ return map;
+}
+EXPORT_SYMBOL(bgx_get_map);
+
+/* Return number of LMAC configured for this BGX */
+int bgx_get_lmac_count(int node, int bgx_idx)
+{
+ struct bgx *bgx;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (bgx)
+ return bgx->lmac_count;
+
+ return 0;
+}
+EXPORT_SYMBOL(bgx_get_lmac_count);
+
+/* Returns the current link status of LMAC */
+void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
+{
+ struct bgx_link_status *link = (struct bgx_link_status *)status;
+ struct bgx *bgx;
+ struct lmac *lmac;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (!bgx)
+ return;
+
+ lmac = &bgx->lmac[lmacid];
+ link->link_up = lmac->link_up;
+ link->duplex = lmac->last_duplex;
+ link->speed = lmac->last_speed;
+}
+EXPORT_SYMBOL(bgx_get_lmac_link_state);
+
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
+{
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+
+ if (bgx)
+ return bgx->lmac[lmacid].mac;
+
+ return NULL;
+}
+EXPORT_SYMBOL(bgx_get_lmac_mac);
+
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
+{
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+
+ if (!bgx)
+ return;
+
+ ether_addr_copy(bgx->lmac[lmacid].mac, mac);
+}
+EXPORT_SYMBOL(bgx_set_lmac_mac);
+
+static void bgx_sgmii_change_link_state(struct lmac *lmac)
+{
+ struct bgx *bgx = lmac->bgx;
+ u64 cmr_cfg;
+ u64 port_cfg = 0;
+ u64 misc_ctl = 0;
+
+ cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
+ cmr_cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+
+ port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
+ misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
+
+ if (lmac->link_up) {
+ misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
+ port_cfg &= ~GMI_PORT_CFG_DUPLEX;
+ port_cfg |= (lmac->last_duplex << 2);
+ } else {
+ misc_ctl |= PCS_MISC_CTL_GMX_ENO;
+ }
+
+ switch (lmac->last_speed) {
+ case 10:
+ port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
+ port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
+ port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
+ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+ misc_ctl |= 50; /* samp_pt */
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+ break;
+ case 100:
+ port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
+ port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
+ port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
+ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+ misc_ctl |= 5; /* samp_pt */
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+ break;
+ case 1000:
+ port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
+ port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
+ port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
+ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+ misc_ctl |= 1; /* samp_pt */
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
+ if (lmac->last_duplex)
+ bgx_reg_write(bgx, lmac->lmacid,
+ BGX_GMP_GMI_TXX_BURST, 0);
+ else
+ bgx_reg_write(bgx, lmac->lmacid,
+ BGX_GMP_GMI_TXX_BURST, 8192);
+ break;
+ default:
+ break;
+ }
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
+
+ port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
+
+ /* renable lmac */
+ cmr_cfg |= CMR_EN;
+ bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+}
+
+static void bgx_lmac_handler(struct net_device *netdev)
+{
+ struct lmac *lmac = container_of(netdev, struct lmac, netdev);
+ struct phy_device *phydev = lmac->phydev;
+ int link_changed = 0;
+
+ if (!lmac)
+ return;
+
+ if (!phydev->link && lmac->last_link)
+ link_changed = -1;
+
+ if (phydev->link &&
+ (lmac->last_duplex != phydev->duplex ||
+ lmac->last_link != phydev->link ||
+ lmac->last_speed != phydev->speed)) {
+ link_changed = 1;
+ }
+
+ lmac->last_link = phydev->link;
+ lmac->last_speed = phydev->speed;
+ lmac->last_duplex = phydev->duplex;
+
+ if (!link_changed)
+ return;
+
+ if (link_changed > 0)
+ lmac->link_up = true;
+ else
+ lmac->link_up = false;
+
+ if (lmac->is_sgmii)
+ bgx_sgmii_change_link_state(lmac);
+ else
+ bgx_xaui_check_link(lmac);
+}
+
+u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
+{
+ struct bgx *bgx;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (!bgx)
+ return 0;
+
+ if (idx > 8)
+ lmac = 0;
+ return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8));
+}
+EXPORT_SYMBOL(bgx_get_rx_stats);
+
+u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
+{
+ struct bgx *bgx;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (!bgx)
+ return 0;
+
+ return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8));
+}
+EXPORT_SYMBOL(bgx_get_tx_stats);
+
+static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
+{
+ u64 offset;
+
+ while (bgx->lmac[lmac].dmac > 0) {
+ offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) +
+ (lmac * MAX_DMAC_PER_LMAC * sizeof(u64));
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
+ bgx->lmac[lmac].dmac--;
+ }
+}
+
+static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
+{
+ u64 cfg;
+
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
+ /* max packet size */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
+
+ /* Disable frame alignment if using preamble */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+ if (cfg & 1)
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+ /* PCS reset */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
+ if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
+ PCS_MRX_CTL_RESET, true)) {
+ dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
+ return -1;
+ }
+
+ /* power down, reset autoneg, autoneg enable */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
+ cfg &= ~PCS_MRX_CTL_PWR_DN;
+ cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
+ bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
+
+ if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
+ PCS_MRX_STATUS_AN_CPT, false)) {
+ dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
+{
+ u64 cfg;
+
+ /* Reset SPU */
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+ dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
+ return -1;
+ }
+
+ /* Disable LMAC */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+ /* Set interleaved running disparity for RXAUI */
+ if (bgx->lmac_type != BGX_MODE_RXAUI)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
+ else
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
+ SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
+
+ /* clear all interrupts */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+
+ if (bgx->use_training) {
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
+ /* training enable */
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
+ }
+
+ /* Append FCS to each packet */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
+
+ /* Disable forward error correction */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
+ cfg &= ~SPU_FEC_CTL_FEC_EN;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
+
+ /* Disable autoneg */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
+ cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
+ if (bgx->lmac_type == BGX_MODE_10G_KR)
+ cfg |= (1 << 23);
+ else if (bgx->lmac_type == BGX_MODE_40G_KR)
+ cfg |= (1 << 24);
+ else
+ cfg &= ~((1 << 23) | (1 << 24));
+ cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
+
+ cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
+ cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
+ bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
+ cfg &= ~SPU_CTL_LOW_POWER;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
+ cfg &= ~SMU_TX_CTL_UNI_EN;
+ cfg |= SMU_TX_CTL_DIC_EN;
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
+
+ /* take lmac_count into account */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
+ /* max packet size */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
+
+ return 0;
+}
+
+static int bgx_xaui_check_link(struct lmac *lmac)
+{
+ struct bgx *bgx = lmac->bgx;
+ int lmacid = lmac->lmacid;
+ int lmac_type = bgx->lmac_type;
+ u64 cfg;
+
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
+ if (bgx->use_training) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ if (!(cfg & (1ull << 13))) {
+ cfg = (1ull << 13) | (1ull << 14);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
+ cfg |= (1ull << 0);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
+ return -1;
+ }
+ }
+
+ /* wait for PCS to come out of reset */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+ dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
+ return -1;
+ }
+
+ if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
+ (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
+ SPU_BR_STATUS_BLK_LOCK, false)) {
+ dev_err(&bgx->pdev->dev,
+ "SPU_BR_STATUS_BLK_LOCK not completed\n");
+ return -1;
+ }
+ } else {
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
+ SPU_BX_STATUS_RX_ALIGN, false)) {
+ dev_err(&bgx->pdev->dev,
+ "SPU_BX_STATUS_RX_ALIGN not completed\n");
+ return -1;
+ }
+ }
+
+ /* Clear rcvflt bit (latching high) and read it back */
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+ dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
+ if (bgx->use_training) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ if (!(cfg & (1ull << 13))) {
+ cfg = (1ull << 13) | (1ull << 14);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL);
+ cfg |= (1ull << 0);
+ bgx_reg_write(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL, cfg);
+ return -1;
+ }
+ }
+ return -1;
+ }
+
+ /* Wait for MAC RX to be ready */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
+ SMU_RX_CTL_STATUS, true)) {
+ dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
+ return -1;
+ }
+
+ /* Wait for BGX RX to be idle */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
+ dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
+ return -1;
+ }
+
+ /* Wait for BGX TX to be idle */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
+ dev_err(&bgx->pdev->dev, "SMU TX not idle\n");
+ return -1;
+ }
+
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+ dev_err(&bgx->pdev->dev, "Receive fault\n");
+ return -1;
+ }
+
+ /* Receive link is latching low. Force it high and verify it */
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
+ SPU_STATUS1_RCV_LNK, false)) {
+ dev_err(&bgx->pdev->dev, "SPU receive link down\n");
+ return -1;
+ }
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
+ cfg &= ~SPU_MISC_CTL_RX_DIS;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
+ return 0;
+}
+
+static void bgx_poll_for_link(struct work_struct *work)
+{
+ struct lmac *lmac;
+ u64 link;
+
+ lmac = container_of(work, struct lmac, dwork.work);
+
+ /* Receive link is latching low. Force it high and verify it */
+ bgx_reg_modify(lmac->bgx, lmac->lmacid,
+ BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
+ bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
+ SPU_STATUS1_RCV_LNK, false);
+
+ link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+ if (link & SPU_STATUS1_RCV_LNK) {
+ lmac->link_up = 1;
+ if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
+ lmac->last_speed = 40000;
+ else
+ lmac->last_speed = 10000;
+ lmac->last_duplex = 1;
+ } else {
+ lmac->link_up = 0;
+ }
+
+ if (lmac->last_link != lmac->link_up) {
+ lmac->last_link = lmac->link_up;
+ if (lmac->link_up)
+ bgx_xaui_check_link(lmac);
+ }
+
+ queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
+}
+
+static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
+{
+ struct lmac *lmac;
+ u64 cfg;
+
+ lmac = &bgx->lmac[lmacid];
+ lmac->bgx = bgx;
+
+ if (bgx->lmac_type == BGX_MODE_SGMII) {
+ lmac->is_sgmii = 1;
+ if (bgx_lmac_sgmii_init(bgx, lmacid))
+ return -1;
+ } else {
+ lmac->is_sgmii = 0;
+ if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
+ return -1;
+ }
+
+ if (lmac->is_sgmii) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+ cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
+ } else {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
+ cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
+ }
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
+ CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+
+ /* Restore default cfg, incase low level firmware changed it */
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
+
+ if ((bgx->lmac_type != BGX_MODE_XFI) &&
+ (bgx->lmac_type != BGX_MODE_XLAUI) &&
+ (bgx->lmac_type != BGX_MODE_40G_KR) &&
+ (bgx->lmac_type != BGX_MODE_10G_KR)) {
+ if (!lmac->phydev)
+ return -ENODEV;
+
+ lmac->phydev->dev_flags = 0;
+
+ if (phy_connect_direct(&lmac->netdev, lmac->phydev,
+ bgx_lmac_handler,
+ PHY_INTERFACE_MODE_SGMII))
+ return -ENODEV;
+
+ phy_start_aneg(lmac->phydev);
+ } else {
+ lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!lmac->check_link)
+ return -ENOMEM;
+ INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
+ queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+ }
+
+ return 0;
+}
+
+static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
+{
+ struct lmac *lmac;
+ u64 cmrx_cfg;
+
+ lmac = &bgx->lmac[lmacid];
+ if (lmac->check_link) {
+ /* Destroy work queue */
+ cancel_delayed_work(&lmac->dwork);
+ flush_workqueue(lmac->check_link);
+ destroy_workqueue(lmac->check_link);
+ }
+
+ cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cmrx_cfg &= ~(1 << 15);
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+ bgx_flush_dmac_addrs(bgx, lmacid);
+
+ if ((bgx->lmac_type != BGX_MODE_XFI) &&
+ (bgx->lmac_type != BGX_MODE_XLAUI) &&
+ (bgx->lmac_type != BGX_MODE_40G_KR) &&
+ (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
+ phy_disconnect(lmac->phydev);
+
+ lmac->phydev = NULL;
+}
+
+static void bgx_set_num_ports(struct bgx *bgx)
+{
+ u64 lmac_count;
+
+ switch (bgx->qlm_mode) {
+ case QLM_MODE_SGMII:
+ bgx->lmac_count = 4;
+ bgx->lmac_type = BGX_MODE_SGMII;
+ bgx->lane_to_sds = 0;
+ break;
+ case QLM_MODE_XAUI_1X4:
+ bgx->lmac_count = 1;
+ bgx->lmac_type = BGX_MODE_XAUI;
+ bgx->lane_to_sds = 0xE4;
+ break;
+ case QLM_MODE_RXAUI_2X2:
+ bgx->lmac_count = 2;
+ bgx->lmac_type = BGX_MODE_RXAUI;
+ bgx->lane_to_sds = 0xE4;
+ break;
+ case QLM_MODE_XFI_4X1:
+ bgx->lmac_count = 4;
+ bgx->lmac_type = BGX_MODE_XFI;
+ bgx->lane_to_sds = 0;
+ break;
+ case QLM_MODE_XLAUI_1X4:
+ bgx->lmac_count = 1;
+ bgx->lmac_type = BGX_MODE_XLAUI;
+ bgx->lane_to_sds = 0xE4;
+ break;
+ case QLM_MODE_10G_KR_4X1:
+ bgx->lmac_count = 4;
+ bgx->lmac_type = BGX_MODE_10G_KR;
+ bgx->lane_to_sds = 0;
+ bgx->use_training = 1;
+ break;
+ case QLM_MODE_40G_KR4_1X4:
+ bgx->lmac_count = 1;
+ bgx->lmac_type = BGX_MODE_40G_KR;
+ bgx->lane_to_sds = 0xE4;
+ bgx->use_training = 1;
+ break;
+ default:
+ bgx->lmac_count = 0;
+ break;
+ }
+
+ /* Check if low level firmware has programmed LMAC count
+ * based on board type, if yes consider that otherwise
+ * the default static values
+ */
+ lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
+ if (lmac_count != 4)
+ bgx->lmac_count = lmac_count;
+}
+
+static void bgx_init_hw(struct bgx *bgx)
+{
+ int i;
+
+ bgx_set_num_ports(bgx);
+
+ bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
+ if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
+ dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
+
+ /* Set lmac type and lane2serdes mapping */
+ for (i = 0; i < bgx->lmac_count; i++) {
+ if (bgx->lmac_type == BGX_MODE_RXAUI) {
+ if (i)
+ bgx->lane_to_sds = 0x0e;
+ else
+ bgx->lane_to_sds = 0x04;
+ bgx_reg_write(bgx, i, BGX_CMRX_CFG,
+ (bgx->lmac_type << 8) | bgx->lane_to_sds);
+ continue;
+ }
+ bgx_reg_write(bgx, i, BGX_CMRX_CFG,
+ (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
+ bgx->lmac[i].lmacid_bd = lmac_count;
+ lmac_count++;
+ }
+
+ bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
+
+ /* Set the backpressure AND mask */
+ for (i = 0; i < bgx->lmac_count; i++)
+ bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
+ ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
+ (i * MAX_BGX_CHANS_PER_LMAC));
+
+ /* Disable all MAC filtering */
+ for (i = 0; i < RX_DMAC_COUNT; i++)
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
+
+ /* Disable MAC steering (NCSI traffic) */
+ for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
+}
+
+static void bgx_get_qlm_mode(struct bgx *bgx)
+{
+ struct device *dev = &bgx->pdev->dev;
+ int lmac_type;
+ int train_en;
+
+ /* Read LMAC0 type to figure out QLM mode
+ * This is configured by low level firmware
+ */
+ lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
+ lmac_type = (lmac_type >> 8) & 0x07;
+
+ train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
+ SPU_PMD_CRTL_TRAIN_EN;
+
+ switch (lmac_type) {
+ case BGX_MODE_SGMII:
+ bgx->qlm_mode = QLM_MODE_SGMII;
+ dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id);
+ break;
+ case BGX_MODE_XAUI:
+ bgx->qlm_mode = QLM_MODE_XAUI_1X4;
+ dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id);
+ break;
+ case BGX_MODE_RXAUI:
+ bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
+ dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id);
+ break;
+ case BGX_MODE_XFI:
+ if (!train_en) {
+ bgx->qlm_mode = QLM_MODE_XFI_4X1;
+ dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id);
+ } else {
+ bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
+ dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id);
+ }
+ break;
+ case BGX_MODE_XLAUI:
+ if (!train_en) {
+ bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
+ dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id);
+ } else {
+ bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
+ dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id);
+ }
+ break;
+ default:
+ bgx->qlm_mode = QLM_MODE_SGMII;
+ dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id);
+ }
+}
+
+static void bgx_init_of(struct bgx *bgx, struct device_node *np)
+{
+ struct device_node *np_child;
+ u8 lmac = 0;
+
+ for_each_child_of_node(np, np_child) {
+ struct device_node *phy_np;
+ const char *mac;
+
+ phy_np = of_parse_phandle(np_child, "phy-handle", 0);
+ if (phy_np)
+ bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
+
+ mac = of_get_mac_address(np_child);
+ if (mac)
+ ether_addr_copy(bgx->lmac[lmac].mac, mac);
+
+ SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
+ bgx->lmac[lmac].lmacid = lmac;
+ lmac++;
+ if (lmac == MAX_LMAC_PER_BGX)
+ break;
+ }
+}
+
+static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+ struct bgx *bgx = NULL;
+ struct device_node *np;
+ char bgx_sel[5];
+ u8 lmac;
+
+ bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
+ if (!bgx)
+ return -ENOMEM;
+ bgx->pdev = pdev;
+
+ pci_set_drvdata(pdev, bgx);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!bgx->reg_base) {
+ dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+ bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
+ bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX;
+
+ bgx_vnic[bgx->bgx_id] = bgx;
+ bgx_get_qlm_mode(bgx);
+
+ snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
+ np = of_find_node_by_name(NULL, bgx_sel);
+ if (np)
+ bgx_init_of(bgx, np);
+
+ bgx_init_hw(bgx);
+
+ /* Enable all LMACs */
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+ err = bgx_lmac_enable(bgx, lmac);
+ if (err) {
+ dev_err(dev, "BGX%d failed to enable lmac%d\n",
+ bgx->bgx_id, lmac);
+ goto err_enable;
+ }
+ }
+
+ return 0;
+
+err_enable:
+ bgx_vnic[bgx->bgx_id] = NULL;
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void bgx_remove(struct pci_dev *pdev)
+{
+ struct bgx *bgx = pci_get_drvdata(pdev);
+ u8 lmac;
+
+ /* Disable all LMACs */
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++)
+ bgx_lmac_disable(bgx, lmac);
+
+ bgx_vnic[bgx->bgx_id] = NULL;
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver bgx_driver = {
+ .name = DRV_NAME,
+ .id_table = bgx_id_table,
+ .probe = bgx_probe,
+ .remove = bgx_remove,
+};
+
+static int __init bgx_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&bgx_driver);
+}
+
+static void __exit bgx_cleanup_module(void)
+{
+ pci_unregister_driver(&bgx_driver);
+}
+
+module_init(bgx_init_module);
+module_exit(bgx_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
new file mode 100644
index 000000000..ba4f53b7c
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef THUNDER_BGX_H
+#define THUNDER_BGX_H
+
+#define MAX_BGX_THUNDER 8 /* Max 4 nodes, 2 per node */
+#define MAX_BGX_PER_CN88XX 2
+#define MAX_LMAC_PER_BGX 4
+#define MAX_BGX_CHANS_PER_LMAC 16
+#define MAX_DMAC_PER_LMAC 8
+#define MAX_FRAME_SIZE 9216
+
+#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
+
+#define MAX_LMAC (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX)
+
+/* Registers */
+#define BGX_CMRX_CFG 0x00
+#define CMR_PKT_TX_EN BIT_ULL(13)
+#define CMR_PKT_RX_EN BIT_ULL(14)
+#define CMR_EN BIT_ULL(15)
+#define BGX_CMR_GLOBAL_CFG 0x08
+#define CMR_GLOBAL_CFG_FCS_STRIP BIT_ULL(6)
+#define BGX_CMRX_RX_ID_MAP 0x60
+#define BGX_CMRX_RX_STAT0 0x70
+#define BGX_CMRX_RX_STAT1 0x78
+#define BGX_CMRX_RX_STAT2 0x80
+#define BGX_CMRX_RX_STAT3 0x88
+#define BGX_CMRX_RX_STAT4 0x90
+#define BGX_CMRX_RX_STAT5 0x98
+#define BGX_CMRX_RX_STAT6 0xA0
+#define BGX_CMRX_RX_STAT7 0xA8
+#define BGX_CMRX_RX_STAT8 0xB0
+#define BGX_CMRX_RX_STAT9 0xB8
+#define BGX_CMRX_RX_STAT10 0xC0
+#define BGX_CMRX_RX_BP_DROP 0xC8
+#define BGX_CMRX_RX_DMAC_CTL 0x0E8
+#define BGX_CMR_RX_DMACX_CAM 0x200
+#define RX_DMACX_CAM_EN BIT_ULL(48)
+#define RX_DMACX_CAM_LMACID(x) (x << 49)
+#define RX_DMAC_COUNT 32
+#define BGX_CMR_RX_STREERING 0x300
+#define RX_TRAFFIC_STEER_RULE_COUNT 8
+#define BGX_CMR_CHAN_MSK_AND 0x450
+#define BGX_CMR_BIST_STATUS 0x460
+#define BGX_CMR_RX_LMACS 0x468
+#define BGX_CMRX_TX_STAT0 0x600
+#define BGX_CMRX_TX_STAT1 0x608
+#define BGX_CMRX_TX_STAT2 0x610
+#define BGX_CMRX_TX_STAT3 0x618
+#define BGX_CMRX_TX_STAT4 0x620
+#define BGX_CMRX_TX_STAT5 0x628
+#define BGX_CMRX_TX_STAT6 0x630
+#define BGX_CMRX_TX_STAT7 0x638
+#define BGX_CMRX_TX_STAT8 0x640
+#define BGX_CMRX_TX_STAT9 0x648
+#define BGX_CMRX_TX_STAT10 0x650
+#define BGX_CMRX_TX_STAT11 0x658
+#define BGX_CMRX_TX_STAT12 0x660
+#define BGX_CMRX_TX_STAT13 0x668
+#define BGX_CMRX_TX_STAT14 0x670
+#define BGX_CMRX_TX_STAT15 0x678
+#define BGX_CMRX_TX_STAT16 0x680
+#define BGX_CMRX_TX_STAT17 0x688
+#define BGX_CMR_TX_LMACS 0x1000
+
+#define BGX_SPUX_CONTROL1 0x10000
+#define SPU_CTL_LOW_POWER BIT_ULL(11)
+#define SPU_CTL_RESET BIT_ULL(15)
+#define BGX_SPUX_STATUS1 0x10008
+#define SPU_STATUS1_RCV_LNK BIT_ULL(2)
+#define BGX_SPUX_STATUS2 0x10020
+#define SPU_STATUS2_RCVFLT BIT_ULL(10)
+#define BGX_SPUX_BX_STATUS 0x10028
+#define SPU_BX_STATUS_RX_ALIGN BIT_ULL(12)
+#define BGX_SPUX_BR_STATUS1 0x10030
+#define SPU_BR_STATUS_BLK_LOCK BIT_ULL(0)
+#define SPU_BR_STATUS_RCV_LNK BIT_ULL(12)
+#define BGX_SPUX_BR_PMD_CRTL 0x10068
+#define SPU_PMD_CRTL_TRAIN_EN BIT_ULL(1)
+#define BGX_SPUX_BR_PMD_LP_CUP 0x10078
+#define BGX_SPUX_BR_PMD_LD_CUP 0x10088
+#define BGX_SPUX_BR_PMD_LD_REP 0x10090
+#define BGX_SPUX_FEC_CONTROL 0x100A0
+#define SPU_FEC_CTL_FEC_EN BIT_ULL(0)
+#define SPU_FEC_CTL_ERR_EN BIT_ULL(1)
+#define BGX_SPUX_AN_CONTROL 0x100C8
+#define SPU_AN_CTL_AN_EN BIT_ULL(12)
+#define SPU_AN_CTL_XNP_EN BIT_ULL(13)
+#define BGX_SPUX_AN_ADV 0x100D8
+#define BGX_SPUX_MISC_CONTROL 0x10218
+#define SPU_MISC_CTL_INTLV_RDISP BIT_ULL(10)
+#define SPU_MISC_CTL_RX_DIS BIT_ULL(12)
+#define BGX_SPUX_INT 0x10220 /* +(0..3) << 20 */
+#define BGX_SPUX_INT_W1S 0x10228
+#define BGX_SPUX_INT_ENA_W1C 0x10230
+#define BGX_SPUX_INT_ENA_W1S 0x10238
+#define BGX_SPU_DBG_CONTROL 0x10300
+#define SPU_DBG_CTL_AN_ARB_LINK_CHK_EN BIT_ULL(18)
+#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29)
+
+#define BGX_SMUX_RX_INT 0x20000
+#define BGX_SMUX_RX_JABBER 0x20030
+#define BGX_SMUX_RX_CTL 0x20048
+#define SMU_RX_CTL_STATUS (3ull << 0)
+#define BGX_SMUX_TX_APPEND 0x20100
+#define SMU_TX_APPEND_FCS_D BIT_ULL(2)
+#define BGX_SMUX_TX_MIN_PKT 0x20118
+#define BGX_SMUX_TX_INT 0x20140
+#define BGX_SMUX_TX_CTL 0x20178
+#define SMU_TX_CTL_DIC_EN BIT_ULL(0)
+#define SMU_TX_CTL_UNI_EN BIT_ULL(1)
+#define SMU_TX_CTL_LNK_STATUS (3ull << 4)
+#define BGX_SMUX_TX_THRESH 0x20180
+#define BGX_SMUX_CTL 0x20200
+#define SMU_CTL_RX_IDLE BIT_ULL(0)
+#define SMU_CTL_TX_IDLE BIT_ULL(1)
+
+#define BGX_GMP_PCS_MRX_CTL 0x30000
+#define PCS_MRX_CTL_RST_AN BIT_ULL(9)
+#define PCS_MRX_CTL_PWR_DN BIT_ULL(11)
+#define PCS_MRX_CTL_AN_EN BIT_ULL(12)
+#define PCS_MRX_CTL_RESET BIT_ULL(15)
+#define BGX_GMP_PCS_MRX_STATUS 0x30008
+#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
+#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
+#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
+#define BGX_GMP_PCS_MISCX_CTL 0x30078
+#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
+#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
+#define BGX_GMP_GMI_PRTX_CFG 0x38020
+#define GMI_PORT_CFG_SPEED BIT_ULL(1)
+#define GMI_PORT_CFG_DUPLEX BIT_ULL(2)
+#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3)
+#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
+#define BGX_GMP_GMI_RXX_JABBER 0x38038
+#define BGX_GMP_GMI_TXX_THRESH 0x38210
+#define BGX_GMP_GMI_TXX_APPEND 0x38218
+#define BGX_GMP_GMI_TXX_SLOT 0x38220
+#define BGX_GMP_GMI_TXX_BURST 0x38228
+#define BGX_GMP_GMI_TXX_MIN_PKT 0x38240
+#define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300
+
+#define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */
+#define BGX_MSIX_VEC_0_29_CTL 0x400008
+#define BGX_MSIX_PBA_0 0x4F0000
+
+/* MSI-X interrupts */
+#define BGX_MSIX_VECTORS 30
+#define BGX_LMAC_VEC_OFFSET 7
+#define BGX_MSIX_VEC_SHIFT 4
+
+#define CMRX_INT 0
+#define SPUX_INT 1
+#define SMUX_RX_INT 2
+#define SMUX_TX_INT 3
+#define GMPX_PCS_INT 4
+#define GMPX_GMI_RX_INT 5
+#define GMPX_GMI_TX_INT 6
+#define CMR_MEM_INT 28
+#define SPU_MEM_INT 29
+
+#define LMAC_INTR_LINK_UP BIT(0)
+#define LMAC_INTR_LINK_DOWN BIT(1)
+
+/* RX_DMAC_CTL configuration*/
+enum MCAST_MODE {
+ MCAST_MODE_REJECT,
+ MCAST_MODE_ACCEPT,
+ MCAST_MODE_CAM_FILTER,
+ RSVD
+};
+
+#define BCAST_ACCEPT 1
+#define CAM_ACCEPT 1
+
+void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
+unsigned bgx_get_map(int node);
+int bgx_get_lmac_count(int node, int bgx);
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
+void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
+u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
+u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
+#define BGX_RX_STATS_COUNT 11
+#define BGX_TX_STATS_COUNT 18
+
+struct bgx_stats {
+ u64 rx_stats[BGX_RX_STATS_COUNT];
+ u64 tx_stats[BGX_TX_STATS_COUNT];
+};
+
+enum LMAC_TYPE {
+ BGX_MODE_SGMII = 0, /* 1 lane, 1.250 Gbaud */
+ BGX_MODE_XAUI = 1, /* 4 lanes, 3.125 Gbaud */
+ BGX_MODE_DXAUI = 1, /* 4 lanes, 6.250 Gbaud */
+ BGX_MODE_RXAUI = 2, /* 2 lanes, 6.250 Gbaud */
+ BGX_MODE_XFI = 3, /* 1 lane, 10.3125 Gbaud */
+ BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */
+ BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */
+ BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */
+};
+
+enum qlm_mode {
+ QLM_MODE_SGMII, /* SGMII, each lane independent */
+ QLM_MODE_XAUI_1X4, /* 1 XAUI or DXAUI, 4 lanes */
+ QLM_MODE_RXAUI_2X2, /* 2 RXAUI, 2 lanes each */
+ QLM_MODE_XFI_4X1, /* 4 XFI, 1 lane each */
+ QLM_MODE_XLAUI_1X4, /* 1 XLAUI, 4 lanes each */
+ QLM_MODE_10G_KR_4X1, /* 4 10GBASE-KR, 1 lane each */
+ QLM_MODE_40G_KR4_1X4, /* 1 40GBASE-KR4, 4 lanes each */
+};
+
+#endif /* THUNDER_BGX_H */
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 7daa088a9..a79813a17 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_CHELSIO
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index c9761ceb4..e68c5e4f3 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1018,19 +1018,19 @@ int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
{
struct adapter *adapter = phy->adapter;
const struct firmware *fw;
- char buf[64];
+ const char *fw_name;
u32 csum;
const __be32 *p;
u16 *cache = phy->phy_cache;
- int i, ret;
-
- snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
+ int i, ret = -EINVAL;
- ret = reject_firmware(&fw, buf, &adapter->pdev->dev);
+ fw_name = get_edc_fw_name(edc_idx);
+ if (fw_name)
+ ret = reject_firmware(&fw, fw_name, &adapter->pdev->dev);
if (ret < 0) {
dev_err(&adapter->pdev->dev,
"could not upgrade firmware: unable to load %s\n",
- buf);
+ fw_name);
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index b0cbb2b7f..76684dcb8 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -1169,10 +1169,7 @@ void *cxgb_alloc_mem(unsigned long size)
*/
void cxgb_free_mem(void *addr)
{
- if (is_vmalloc_addr(addr))
- vfree(addr);
- else
- kfree(addr);
+ kvfree(addr);
}
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 524d11098..629f75d70 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -46,17 +46,19 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
+#include <linux/etherdevice.h>
#include <asm/io.h>
#include "cxgb4_uld.h"
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
enum {
- MAX_NPORTS = 4, /* max # of ports */
- SERNUM_LEN = 24, /* Serial # length */
- EC_LEN = 16, /* E/C length */
- ID_LEN = 16, /* ID length */
- PN_LEN = 16, /* Part Number length */
+ MAX_NPORTS = 4, /* max # of ports */
+ SERNUM_LEN = 24, /* Serial # length */
+ EC_LEN = 16, /* E/C length */
+ ID_LEN = 16, /* ID length */
+ PN_LEN = 16, /* Part Number length */
+ MACADDR_LEN = 12, /* MAC Address length */
};
enum {
@@ -198,23 +200,45 @@ struct lb_port_stats {
};
struct tp_tcp_stats {
- u32 tcpOutRsts;
- u64 tcpInSegs;
- u64 tcpOutSegs;
- u64 tcpRetransSegs;
+ u32 tcp_out_rsts;
+ u64 tcp_in_segs;
+ u64 tcp_out_segs;
+ u64 tcp_retrans_segs;
+};
+
+struct tp_usm_stats {
+ u32 frames;
+ u32 drops;
+ u64 octets;
+};
+
+struct tp_fcoe_stats {
+ u32 frames_ddp;
+ u32 frames_drop;
+ u64 octets_ddp;
};
struct tp_err_stats {
- u32 macInErrs[4];
- u32 hdrInErrs[4];
- u32 tcpInErrs[4];
- u32 tnlCongDrops[4];
- u32 ofldChanDrops[4];
- u32 tnlTxDrops[4];
- u32 ofldVlanDrops[4];
- u32 tcp6InErrs[4];
- u32 ofldNoNeigh;
- u32 ofldCongDefer;
+ u32 mac_in_errs[4];
+ u32 hdr_in_errs[4];
+ u32 tcp_in_errs[4];
+ u32 tnl_cong_drops[4];
+ u32 ofld_chan_drops[4];
+ u32 tnl_tx_drops[4];
+ u32 ofld_vlan_drops[4];
+ u32 tcp6_in_errs[4];
+ u32 ofld_no_neigh;
+ u32 ofld_cong_defer;
+};
+
+struct tp_cpl_stats {
+ u32 req[4];
+ u32 rsp[4];
+};
+
+struct tp_rdma_stats {
+ u32 rqe_dfr_pkt;
+ u32 rqe_dfr_mod;
};
struct sge_params {
@@ -224,7 +248,6 @@ struct sge_params {
};
struct tp_params {
- unsigned int ntxchan; /* # of Tx channels */
unsigned int tre; /* log2 of core clocks per TP tick */
unsigned int la_mask; /* what events are recorded by TP LA */
unsigned short tx_modq_map; /* TX modulation scheduler queue to */
@@ -259,6 +282,7 @@ struct vpd_params {
u8 sn[SERNUM_LEN + 1];
u8 id[ID_LEN + 1];
u8 pn[PN_LEN + 1];
+ u8 na[MACADDR_LEN + 1];
};
struct pci_params {
@@ -273,6 +297,7 @@ struct pci_params {
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
+#define CHELSIO_T6 0x6
enum chip_type {
T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
@@ -284,6 +309,10 @@ enum chip_type {
T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
T5_FIRST_REV = T5_A0,
T5_LAST_REV = T5_A1,
+
+ T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0),
+ T6_FIRST_REV = T6_A0,
+ T6_LAST_REV = T6_A0,
};
struct devlog_params {
@@ -292,6 +321,15 @@ struct devlog_params {
u32 size; /* size of log */
};
+/* Stores chip specific parameters */
+struct arch_specific_params {
+ u8 nchan;
+ u16 mps_rplc_size;
+ u16 vfcount;
+ u32 sge_fl_db;
+ u16 mps_tcam_size;
+};
+
struct adapter_params {
struct sge_params sge;
struct tp_params tp;
@@ -317,6 +355,7 @@ struct adapter_params {
unsigned char nports; /* # of ethernet ports */
unsigned char portvec;
enum chip_type chip; /* chip code */
+ struct arch_specific_params arch; /* chip specific params */
unsigned char offload;
unsigned char bypass;
@@ -328,6 +367,17 @@ struct adapter_params {
unsigned int max_ird_adapter; /* Max read depth per adapter */
};
+/* State needed to monitor the forward progress of SGE Ingress DMA activities
+ * and possible hangs.
+ */
+struct sge_idma_monitor_state {
+ unsigned int idma_1s_thresh; /* 1s threshold in Core Clock ticks */
+ unsigned int idma_stalled[2]; /* synthesized stalled timers in HZ */
+ unsigned int idma_state[2]; /* IDMA Hang detect state */
+ unsigned int idma_qid[2]; /* IDMA Hung Ingress Queue ID */
+ unsigned int idma_warn[2]; /* time to warning in HZ */
+};
+
#include "t4fw_api.h"
#define FW_VERSION(chip) ( \
@@ -421,6 +471,7 @@ struct port_info {
u8 rss_mode;
struct link_config link_cfg;
u16 *rss;
+ struct port_stats stats_base;
#ifdef CONFIG_CHELSIO_T4_DCB
struct port_dcb_info dcb; /* Data Center Bridging support */
#endif
@@ -630,12 +681,7 @@ struct sge {
u32 fl_align; /* response queue message alignment */
u32 fl_starve_thres; /* Free List starvation threshold */
- /* State variables for detecting an SGE Ingress DMA hang */
- unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */
- unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */
- unsigned int idma_state[2]; /* SGE IDMA Hang detect state */
- unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
-
+ struct sge_idma_monitor_state idma_monitor;
unsigned int egr_start;
unsigned int egr_sz;
unsigned int ingr_start;
@@ -644,6 +690,7 @@ struct sge {
struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
unsigned long *starving_fl;
unsigned long *txq_maperr;
+ unsigned long *blocked_fl;
struct timer_list rx_timer; /* refills starving FLs */
struct timer_list tx_timer; /* checks Tx queues */
};
@@ -665,6 +712,12 @@ struct l2t_data;
#endif
+struct doorbell_stats {
+ u32 db_drop;
+ u32 db_empty;
+ u32 db_full;
+};
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -672,7 +725,7 @@ struct adapter {
struct pci_dev *pdev;
struct device *pdev_dev;
unsigned int mbox;
- unsigned int fn;
+ unsigned int pf;
unsigned int flags;
enum chip_type chip;
@@ -682,13 +735,12 @@ struct adapter {
struct cxgb4_virt_res vres;
unsigned int swintr;
- unsigned int wol;
-
struct {
unsigned short vec;
char desc[IFNAMSIZ + 10];
} msix_info[MAX_INGQ + 1];
+ struct doorbell_stats db_stats;
struct sge sge;
struct net_device *port[MAX_NPORTS];
@@ -843,6 +895,16 @@ enum {
VLAN_REWRITE
};
+static inline int is_offload(const struct adapter *adap)
+{
+ return adap->params.offload;
+}
+
+static inline int is_t6(enum chip_type chip)
+{
+ return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T6;
+}
+
static inline int is_t5(enum chip_type chip)
{
return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5;
@@ -887,6 +949,22 @@ static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
}
/**
+ * t4_set_hw_addr - store a port's MAC address in SW
+ * @adapter: the adapter
+ * @port_idx: the port index
+ * @hw_addr: the Ethernet address
+ *
+ * Store the Ethernet address of the given port in SW. Called by the common
+ * code when it retrieves a port's Ethernet address from EEPROM.
+ */
+static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx,
+ u8 hw_addr[])
+{
+ ether_addr_copy(adapter->port[port_idx]->dev_addr, hw_addr);
+ ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr);
+}
+
+/**
* netdev2pinfo - return the port_info structure associated with a net_device
* @dev: the netdev
*
@@ -1055,7 +1133,7 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct net_device *dev, int intr_idx,
- struct sge_fl *fl, rspq_handler_t hnd);
+ struct sge_fl *fl, rspq_handler_t hnd, int cong);
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct net_device *dev, struct netdev_queue *netdevq,
unsigned int iqid);
@@ -1095,6 +1173,19 @@ static inline int is_bypass_device(int device)
}
}
+static inline int is_10gbt_device(int device)
+{
+ /* this should be set based upon device capabilities */
+ switch (device) {
+ case 0x4409:
+ case 0x4486:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
{
return adap->params.vpd.cclk / 1000;
@@ -1117,9 +1208,19 @@ static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
u32 val);
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl, bool sleep_ok, int timeout);
int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
void *rpl, bool sleep_ok);
+static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox,
+ const void *cmd, int size, void *rpl,
+ int timeout)
+{
+ return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true,
+ timeout);
+}
+
static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
int size, void *rpl)
{
@@ -1147,10 +1248,14 @@ void t4_intr_disable(struct adapter *adapter);
int t4_slow_intr_handler(struct adapter *adapter);
int t4_wait_dev_ready(void __iomem *regs);
-int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc);
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
+u32 t4_read_pcie_cfg4(struct adapter *adap, int reg);
+u32 t4_get_util_window(struct adapter *adap);
+void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window);
+
#define T4_MEMORY_WRITE 0
#define T4_MEMORY_READ 1
int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
@@ -1165,10 +1270,16 @@ unsigned int t4_get_regs_len(struct adapter *adapter);
void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
int t4_seeprom_wp(struct adapter *adapter, bool enable);
-int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
+int t4_load_phy_fw(struct adapter *adap,
+ int win, spinlock_t *lock,
+ int (*phy_fw_version)(const u8 *, size_t),
+ const u8 *phy_fw_data, size_t phy_fw_size);
+int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver);
int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
const u8 *fw_data, unsigned int size, int force);
@@ -1182,9 +1293,10 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
int t4_prep_adapter(struct adapter *adapter);
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
-int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
+int t4_bar2_sge_qregs(struct adapter *adapter,
unsigned int qid,
enum t4_bar2_qtype qtype,
+ int user,
u64 *pbar2_qoffset,
unsigned int *pbar2_qid);
@@ -1195,12 +1307,15 @@ int t4_init_devlog_params(struct adapter *adapter);
int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
+int t4_init_rss_mode(struct adapter *adap, int mbox);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
unsigned int flags);
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ unsigned int flags, unsigned int defq);
int t4_read_rss(struct adapter *adapter, u16 *entries);
void t4_read_rss_key(struct adapter *adapter, u32 *key);
void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx);
@@ -1211,10 +1326,7 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
u32 t4_read_rss_pf_map(struct adapter *adapter);
u32 t4_read_rss_pf_mask(struct adapter *adapter);
-int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
- u64 *parity);
-int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
- u64 *parity);
+unsigned int t4_get_mps_bg_map(struct adapter *adapter, int idx);
void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data,
@@ -1226,21 +1338,36 @@ int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
const unsigned int *valp);
int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
+void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
+ unsigned int *pif_req_wrptr,
+ unsigned int *pif_rsp_wrptr);
+void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp);
void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
const char *t4_get_port_type_description(enum fw_port_type port_type);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+ struct port_stats *stats,
+ struct port_stats *offset);
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
unsigned int mask, unsigned int val);
void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st);
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st);
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st);
void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6);
+void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+ struct tp_fcoe_stats *st);
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
const unsigned short *alpha, const unsigned short *beta);
void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
+void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate);
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
@@ -1259,13 +1386,16 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
u32 *val);
+int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val, int rw);
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int nparams, const u32 *params,
+ const u32 *val, int timeout);
int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
const u32 *val);
-int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
- unsigned int pf, unsigned int vf,
- unsigned int nparams, const u32 *params,
- const u32 *val);
int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
unsigned int rxqi, unsigned int rxq, unsigned int tc,
@@ -1274,6 +1404,9 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
unsigned int *rss_size);
+int t4_free_vi(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int viid);
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok);
@@ -1303,6 +1436,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
void t4_db_full(struct adapter *adapter);
void t4_db_dropped(struct adapter *adapter);
@@ -1310,4 +1444,9 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val);
void t4_sge_decode_idma_state(struct adapter *adapter, int state);
void t4_free_mem(void *addr);
+void t4_idma_monitor_init(struct adapter *adapter,
+ struct sge_idma_monitor_state *idma);
+void t4_idma_monitor(struct adapter *adapter,
+ struct sge_idma_monitor_state *idma,
+ int hz, int ticks);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 371f75e78..c3c7db418 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -182,6 +182,95 @@ static const struct file_operations cim_la_fops = {
.release = seq_release_private
};
+static int cim_pif_la_show(struct seq_file *seq, void *v, int idx)
+{
+ const u32 *p = v;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Cntl ID DataBE Addr Data\n");
+ } else if (idx < CIM_PIFLA_SIZE) {
+ seq_printf(seq, " %02x %02x %04x %08x %08x%08x%08x%08x\n",
+ (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f,
+ p[5] & 0xffff, p[4], p[3], p[2], p[1], p[0]);
+ } else {
+ if (idx == CIM_PIFLA_SIZE)
+ seq_puts(seq, "\nCntl ID Data\n");
+ seq_printf(seq, " %02x %02x %08x%08x%08x%08x\n",
+ (p[4] >> 6) & 0xff, p[4] & 0x3f,
+ p[3], p[2], p[1], p[0]);
+ }
+ return 0;
+}
+
+static int cim_pif_la_open(struct inode *inode, struct file *file)
+{
+ struct seq_tab *p;
+ struct adapter *adap = inode->i_private;
+
+ p = seq_open_tab(file, 2 * CIM_PIFLA_SIZE, 6 * sizeof(u32), 1,
+ cim_pif_la_show);
+ if (!p)
+ return -ENOMEM;
+
+ t4_cim_read_pif_la(adap, (u32 *)p->data,
+ (u32 *)p->data + 6 * CIM_PIFLA_SIZE, NULL, NULL);
+ return 0;
+}
+
+static const struct file_operations cim_pif_la_fops = {
+ .owner = THIS_MODULE,
+ .open = cim_pif_la_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
+static int cim_ma_la_show(struct seq_file *seq, void *v, int idx)
+{
+ const u32 *p = v;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "\n");
+ } else if (idx < CIM_MALA_SIZE) {
+ seq_printf(seq, "%02x%08x%08x%08x%08x\n",
+ p[4], p[3], p[2], p[1], p[0]);
+ } else {
+ if (idx == CIM_MALA_SIZE)
+ seq_puts(seq,
+ "\nCnt ID Tag UE Data RDY VLD\n");
+ seq_printf(seq, "%3u %2u %x %u %08x%08x %u %u\n",
+ (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
+ (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
+ (p[1] >> 2) | ((p[2] & 3) << 30),
+ (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
+ p[0] & 1);
+ }
+ return 0;
+}
+
+static int cim_ma_la_open(struct inode *inode, struct file *file)
+{
+ struct seq_tab *p;
+ struct adapter *adap = inode->i_private;
+
+ p = seq_open_tab(file, 2 * CIM_MALA_SIZE, 5 * sizeof(u32), 1,
+ cim_ma_la_show);
+ if (!p)
+ return -ENOMEM;
+
+ t4_cim_read_ma_la(adap, (u32 *)p->data,
+ (u32 *)p->data + 5 * CIM_MALA_SIZE);
+ return 0;
+}
+
+static const struct file_operations cim_ma_la_fops = {
+ .owner = THIS_MODULE,
+ .open = cim_ma_la_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
static int cim_qcfg_show(struct seq_file *seq, void *v)
{
static const char * const qname[] = {
@@ -663,6 +752,39 @@ static const struct file_operations pm_stats_debugfs_fops = {
.write = pm_stats_clear
};
+static int tx_rate_show(struct seq_file *seq, void *v)
+{
+ u64 nrate[NCHAN], orate[NCHAN];
+ struct adapter *adap = seq->private;
+
+ t4_get_chan_txrate(adap, nrate, orate);
+ if (adap->params.arch.nchan == NCHAN) {
+ seq_puts(seq, " channel 0 channel 1 "
+ "channel 2 channel 3\n");
+ seq_printf(seq, "NIC B/s: %10llu %10llu %10llu %10llu\n",
+ (unsigned long long)nrate[0],
+ (unsigned long long)nrate[1],
+ (unsigned long long)nrate[2],
+ (unsigned long long)nrate[3]);
+ seq_printf(seq, "Offload B/s: %10llu %10llu %10llu %10llu\n",
+ (unsigned long long)orate[0],
+ (unsigned long long)orate[1],
+ (unsigned long long)orate[2],
+ (unsigned long long)orate[3]);
+ } else {
+ seq_puts(seq, " channel 0 channel 1\n");
+ seq_printf(seq, "NIC B/s: %10llu %10llu\n",
+ (unsigned long long)nrate[0],
+ (unsigned long long)nrate[1]);
+ seq_printf(seq, "Offload B/s: %10llu %10llu\n",
+ (unsigned long long)orate[0],
+ (unsigned long long)orate[1]);
+ }
+ return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(tx_rate);
+
static int cctrl_tbl_show(struct seq_file *seq, void *v)
{
static const char * const dec_fac[] = {
@@ -830,16 +952,23 @@ static int devlog_show(struct seq_file *seq, void *v)
* eventually have to put a format interpreter in here ...
*/
seq_printf(seq, "%10d %15llu %8s %8s ",
- e->seqno, e->timestamp,
+ be32_to_cpu(e->seqno),
+ be64_to_cpu(e->timestamp),
(e->level < ARRAY_SIZE(devlog_level_strings)
? devlog_level_strings[e->level]
: "UNKNOWN"),
(e->facility < ARRAY_SIZE(devlog_facility_strings)
? devlog_facility_strings[e->facility]
: "UNKNOWN"));
- seq_printf(seq, e->fmt, e->params[0], e->params[1],
- e->params[2], e->params[3], e->params[4],
- e->params[5], e->params[6], e->params[7]);
+ seq_printf(seq, e->fmt,
+ be32_to_cpu(e->params[0]),
+ be32_to_cpu(e->params[1]),
+ be32_to_cpu(e->params[2]),
+ be32_to_cpu(e->params[3]),
+ be32_to_cpu(e->params[4]),
+ be32_to_cpu(e->params[5]),
+ be32_to_cpu(e->params[6]),
+ be32_to_cpu(e->params[7]));
}
return 0;
}
@@ -921,23 +1050,17 @@ static int devlog_open(struct inode *inode, struct file *file)
return ret;
}
- /* Translate log multi-byte integral elements into host native format
- * and determine where the first entry in the log is.
+ /* Find the earliest (lowest Sequence Number) log entry in the
+ * circular Device Log.
*/
for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) {
struct fw_devlog_e *e = &dinfo->log[index];
- int i;
__u32 seqno;
if (e->timestamp == 0)
continue;
- e->timestamp = (__force __be64)be64_to_cpu(e->timestamp);
seqno = be32_to_cpu(e->seqno);
- for (i = 0; i < 8; i++)
- e->params[i] =
- (__force __be32)be32_to_cpu(e->params[i]);
-
if (seqno < fseqno) {
fseqno = seqno;
dinfo->first = index;
@@ -1084,41 +1207,89 @@ static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
static int mps_tcam_show(struct seq_file *seq, void *v)
{
- if (v == SEQ_START_TOKEN)
- seq_puts(seq, "Idx Ethernet address Mask Vld Ports PF"
- " VF Replication "
- "P0 P1 P2 P3 ML\n");
- else {
+ struct adapter *adap = seq->private;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ if (v == SEQ_START_TOKEN) {
+ if (adap->params.arch.mps_rplc_size > 128)
+ seq_puts(seq, "Idx Ethernet address Mask "
+ "Vld Ports PF VF "
+ "Replication "
+ " P0 P1 P2 P3 ML\n");
+ else
+ seq_puts(seq, "Idx Ethernet address Mask "
+ "Vld Ports PF VF Replication"
+ " P0 P1 P2 P3 ML\n");
+ } else {
u64 mask;
u8 addr[ETH_ALEN];
- struct adapter *adap = seq->private;
+ bool replicate;
unsigned int idx = (uintptr_t)v - 2;
- u64 tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
- u64 tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
- u32 cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
- u32 cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
- u32 rplc[4] = {0, 0, 0, 0};
+ u64 tcamy, tcamx, val;
+ u32 cls_lo, cls_hi, ctl;
+ u32 rplc[8] = {0};
+
+ if (chip_ver > CHELSIO_T5) {
+ /* CtlCmdType - 0: Read, 1: Write
+ * CtlTcamSel - 0: TCAM0, 1: TCAM1
+ * CtlXYBitSel- 0: Y bit, 1: X bit
+ */
+
+ /* Read tcamy */
+ ctl = CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
+ if (idx < 256)
+ ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
+ else
+ ctl |= CTLTCAMINDEX_V(idx - 256) |
+ CTLTCAMSEL_V(1);
+ t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+ val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
+ tcamy = DMACH_G(val) << 32;
+ tcamy |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+
+ /* Read tcamx. Change the control param */
+ ctl |= CTLXYBITSEL_V(1);
+ t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+ val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
+ tcamx = DMACH_G(val) << 32;
+ tcamx |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+ } else {
+ tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
+ tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
+ }
+
+ cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
+ cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
if (tcamx & tcamy) {
seq_printf(seq, "%3u -\n", idx);
goto out;
}
- if (cls_lo & REPLICATE_F) {
+ rplc[0] = rplc[1] = rplc[2] = rplc[3] = 0;
+ if (chip_ver > CHELSIO_T5)
+ replicate = (cls_lo & T6_REPLICATE_F);
+ else
+ replicate = (cls_lo & REPLICATE_F);
+
+ if (replicate) {
struct fw_ldst_cmd ldst_cmd;
int ret;
+ struct fw_ldst_mps_rplc mps_rplc;
+ u32 ldst_addrspc;
memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_addrspc =
+ FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS);
ldst_cmd.op_to_addrspace =
htonl(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F |
FW_CMD_READ_F |
- FW_LDST_CMD_ADDRSPACE_V(
- FW_LDST_ADDRSPC_MPS));
+ ldst_addrspc);
ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
- ldst_cmd.u.mps.fid_ctl =
+ ldst_cmd.u.mps.rplc.fid_idx =
htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
- FW_LDST_CMD_CTL_V(idx));
+ FW_LDST_CMD_IDX_V(idx));
ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd,
sizeof(ldst_cmd), &ldst_cmd);
if (ret)
@@ -1126,30 +1297,69 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
"replication map for idx %d: %d\n",
idx, -ret);
else {
- rplc[0] = ntohl(ldst_cmd.u.mps.rplc31_0);
- rplc[1] = ntohl(ldst_cmd.u.mps.rplc63_32);
- rplc[2] = ntohl(ldst_cmd.u.mps.rplc95_64);
- rplc[3] = ntohl(ldst_cmd.u.mps.rplc127_96);
+ mps_rplc = ldst_cmd.u.mps.rplc;
+ rplc[0] = ntohl(mps_rplc.rplc31_0);
+ rplc[1] = ntohl(mps_rplc.rplc63_32);
+ rplc[2] = ntohl(mps_rplc.rplc95_64);
+ rplc[3] = ntohl(mps_rplc.rplc127_96);
+ if (adap->params.arch.mps_rplc_size > 128) {
+ rplc[4] = ntohl(mps_rplc.rplc159_128);
+ rplc[5] = ntohl(mps_rplc.rplc191_160);
+ rplc[6] = ntohl(mps_rplc.rplc223_192);
+ rplc[7] = ntohl(mps_rplc.rplc255_224);
+ }
}
}
tcamxy2valmask(tcamx, tcamy, addr, &mask);
- seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x %012llx"
- "%3c %#x%4u%4d",
- idx, addr[0], addr[1], addr[2], addr[3], addr[4],
- addr[5], (unsigned long long)mask,
- (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi),
- PF_G(cls_lo),
- (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
- if (cls_lo & REPLICATE_F)
- seq_printf(seq, " %08x %08x %08x %08x",
- rplc[3], rplc[2], rplc[1], rplc[0]);
+ if (chip_ver > CHELSIO_T5)
+ seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012llx%3c %#x%4u%4d",
+ idx, addr[0], addr[1], addr[2], addr[3],
+ addr[4], addr[5], (unsigned long long)mask,
+ (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
+ PORTMAP_G(cls_hi),
+ T6_PF_G(cls_lo),
+ (cls_lo & T6_VF_VALID_F) ?
+ T6_VF_G(cls_lo) : -1);
+ else
+ seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012llx%3c %#x%4u%4d",
+ idx, addr[0], addr[1], addr[2], addr[3],
+ addr[4], addr[5], (unsigned long long)mask,
+ (cls_lo & SRAM_VLD_F) ? 'Y' : 'N',
+ PORTMAP_G(cls_hi),
+ PF_G(cls_lo),
+ (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
+
+ if (replicate) {
+ if (adap->params.arch.mps_rplc_size > 128)
+ seq_printf(seq, " %08x %08x %08x %08x "
+ "%08x %08x %08x %08x",
+ rplc[7], rplc[6], rplc[5], rplc[4],
+ rplc[3], rplc[2], rplc[1], rplc[0]);
+ else
+ seq_printf(seq, " %08x %08x %08x %08x",
+ rplc[3], rplc[2], rplc[1], rplc[0]);
+ } else {
+ if (adap->params.arch.mps_rplc_size > 128)
+ seq_printf(seq, "%72c", ' ');
+ else
+ seq_printf(seq, "%36c", ' ');
+ }
+
+ if (chip_ver > CHELSIO_T5)
+ seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+ T6_SRAM_PRIO0_G(cls_lo),
+ T6_SRAM_PRIO1_G(cls_lo),
+ T6_SRAM_PRIO2_G(cls_lo),
+ T6_SRAM_PRIO3_G(cls_lo),
+ (cls_lo >> T6_MULTILISTEN0_S) & 0xf);
else
- seq_printf(seq, "%36c", ' ');
- seq_printf(seq, "%4u%3u%3u%3u %#x\n",
- SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
- SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
- (cls_lo >> MULTILISTEN0_S) & 0xf);
+ seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+ SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
+ SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
+ (cls_lo >> MULTILISTEN0_S) & 0xf);
}
out: return 0;
}
@@ -1222,7 +1432,7 @@ static int sensors_show(struct seq_file *seq, void *v)
param[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_VDD));
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
param, val);
if (ret < 0 || val[0] == 0)
@@ -1416,6 +1626,9 @@ static int rss_config_show(struct seq_file *seq, void *v)
seq_printf(seq, " HashDelay: %3d\n", HASHDELAY_G(rssconf));
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
seq_printf(seq, " VfWrAddr: %3d\n", VFWRADDR_G(rssconf));
+ else
+ seq_printf(seq, " VfWrAddr: %3d\n",
+ T6_VFWRADDR_G(rssconf));
seq_printf(seq, " KeyMode: %s\n", keymode[KEYMODE_G(rssconf)]);
seq_printf(seq, " VfWrEn: %3s\n", yesno(rssconf & VFWREN_F));
seq_printf(seq, " KeyWrEn: %3s\n", yesno(rssconf & KEYWREN_F));
@@ -1634,14 +1847,14 @@ static int rss_vf_config_open(struct inode *inode, struct file *file)
struct adapter *adapter = inode->i_private;
struct seq_tab *p;
struct rss_vf_conf *vfconf;
- int vf;
+ int vf, vfcount = adapter->params.arch.vfcount;
- p = seq_open_tab(file, 128, sizeof(*vfconf), 1, rss_vf_config_show);
+ p = seq_open_tab(file, vfcount, sizeof(*vfconf), 1, rss_vf_config_show);
if (!p)
return -ENOMEM;
vfconf = (struct rss_vf_conf *)p->data;
- for (vf = 0; vf < 128; vf++) {
+ for (vf = 0; vf < vfcount; vf++) {
t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl,
&vfconf[vf].rss_vf_vfh);
}
@@ -1959,6 +2172,61 @@ static void add_debugfs_mem(struct adapter *adap, const char *name,
size_mb << 20);
}
+static int blocked_fl_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int len;
+ const struct adapter *adap = filp->private_data;
+ char *buf;
+ ssize_t size = (adap->sge.egr_sz + 3) / 4 +
+ adap->sge.egr_sz / 32 + 2; /* includes ,/\n/\0 */
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len = snprintf(buf, size - 1, "%*pb\n",
+ adap->sge.egr_sz, adap->sge.blocked_fl);
+ len += sprintf(buf + len, "\n");
+ size = simple_read_from_buffer(ubuf, count, ppos, buf, len);
+ t4_free_mem(buf);
+ return size;
+}
+
+static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int err;
+ unsigned long *t;
+ struct adapter *adap = filp->private_data;
+
+ t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
+ err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
+ if (err)
+ return err;
+
+ bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
+ t4_free_mem(t);
+ return count;
+}
+
+static const struct file_operations blocked_fl_fops = {
+ .owner = THIS_MODULE,
+ .open = blocked_fl_open,
+ .read = blocked_fl_read,
+ .write = blocked_fl_write,
+ .llseek = generic_file_llseek,
+};
+
/* Add an array of Debug FS files.
*/
void add_debugfs_files(struct adapter *adap,
@@ -1978,11 +2246,13 @@ void add_debugfs_files(struct adapter *adap,
int t4_setup_debugfs(struct adapter *adap)
{
int i;
- u32 size;
+ u32 size = 0;
struct dentry *de;
static struct t4_debugfs_entry t4_debugfs_files[] = {
{ "cim_la", &cim_la_fops, S_IRUSR, 0 },
+ { "cim_pif_la", &cim_pif_la_fops, S_IRUSR, 0 },
+ { "cim_ma_la", &cim_ma_la_fops, S_IRUSR, 0 },
{ "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 },
{ "clk", &clk_debugfs_fops, S_IRUSR, 0 },
{ "devlog", &devlog_fops, S_IRUSR, 0 },
@@ -2018,10 +2288,12 @@ int t4_setup_debugfs(struct adapter *adap)
{ "ulprx_la", &ulprx_la_fops, S_IRUSR, 0 },
{ "sensors", &sensors_debugfs_fops, S_IRUSR, 0 },
{ "pm_stats", &pm_stats_debugfs_fops, S_IRUSR, 0 },
+ { "tx_rate", &tx_rate_debugfs_fops, S_IRUSR, 0 },
{ "cctrl", &cctrl_tbl_debugfs_fops, S_IRUSR, 0 },
#if IS_ENABLED(CONFIG_IPV6)
{ "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
#endif
+ { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
};
/* Debug FS nodes common to all T5 and later adapters.
@@ -2048,12 +2320,7 @@ int t4_setup_debugfs(struct adapter *adap)
size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size));
}
- if (is_t4(adap->params.chip)) {
- size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
- if (i & EXT_MEM_ENABLE_F)
- add_debugfs_mem(adap, "mc", MEM_MC,
- EXT_MEM_SIZE_G(size));
- } else {
+ if (is_t5(adap->params.chip)) {
if (i & EXT_MEM0_ENABLE_F) {
size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
add_debugfs_mem(adap, "mc0", MEM_MC0,
@@ -2064,6 +2331,12 @@ int t4_setup_debugfs(struct adapter *adap)
add_debugfs_mem(adap, "mc1", MEM_MC1,
EXT_MEM1_SIZE_G(size));
}
+ } else {
+ if (i & EXT_MEM_ENABLE_F) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
+ add_debugfs_mem(adap, "mc", MEM_MC,
+ EXT_MEM_SIZE_G(size));
+ }
}
de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 10d82b51d..687acf71f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -108,15 +108,82 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
"VLANinsertions ",
"GROpackets ",
"GROmerged ",
- "WriteCoalSuccess ",
- "WriteCoalFail ",
+};
+
+static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
+ "db_drop ",
+ "db_full ",
+ "db_empty ",
+ "tcp_ipv4_out_rsts ",
+ "tcp_ipv4_in_segs ",
+ "tcp_ipv4_out_segs ",
+ "tcp_ipv4_retrans_segs ",
+ "tcp_ipv6_out_rsts ",
+ "tcp_ipv6_in_segs ",
+ "tcp_ipv6_out_segs ",
+ "tcp_ipv6_retrans_segs ",
+ "usm_ddp_frames ",
+ "usm_ddp_octets ",
+ "usm_ddp_drops ",
+ "rdma_no_rqe_mod_defer ",
+ "rdma_no_rqe_pkt_defer ",
+ "tp_err_ofld_no_neigh ",
+ "tp_err_ofld_cong_defer ",
+ "write_coal_success ",
+ "write_coal_fail ",
+};
+
+static char channel_stats_strings[][ETH_GSTRING_LEN] = {
+ "--------Channel--------- ",
+ "tp_cpl_requests ",
+ "tp_cpl_responses ",
+ "tp_mac_in_errs ",
+ "tp_hdr_in_errs ",
+ "tp_tcp_in_errs ",
+ "tp_tcp6_in_errs ",
+ "tp_tnl_cong_drops ",
+ "tp_tnl_tx_drops ",
+ "tp_ofld_vlan_drops ",
+ "tp_ofld_chan_drops ",
+ "fcoe_octets_ddp ",
+ "fcoe_frames_ddp ",
+ "fcoe_frames_drop ",
+};
+
+static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
+ "-------Loopback----------- ",
+ "octets_ok ",
+ "frames_ok ",
+ "bcast_frames ",
+ "mcast_frames ",
+ "ucast_frames ",
+ "error_frames ",
+ "frames_64 ",
+ "frames_65_to_127 ",
+ "frames_128_to_255 ",
+ "frames_256_to_511 ",
+ "frames_512_to_1023 ",
+ "frames_1024_to_1518 ",
+ "frames_1519_to_max ",
+ "frames_dropped ",
+ "bg0_frames_dropped ",
+ "bg1_frames_dropped ",
+ "bg2_frames_dropped ",
+ "bg3_frames_dropped ",
+ "bg0_frames_trunc ",
+ "bg1_frames_trunc ",
+ "bg2_frames_trunc ",
+ "bg3_frames_trunc ",
};
static int get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return ARRAY_SIZE(stats_strings);
+ return ARRAY_SIZE(stats_strings) +
+ ARRAY_SIZE(adapter_stats_strings) +
+ ARRAY_SIZE(channel_stats_strings) +
+ ARRAY_SIZE(loopback_stats_strings);
default:
return -EOPNOTSUPP;
}
@@ -168,8 +235,18 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
- if (stringset == ETH_SS_STATS)
+ if (stringset == ETH_SS_STATS) {
memcpy(data, stats_strings, sizeof(stats_strings));
+ data += sizeof(stats_strings);
+ memcpy(data, adapter_stats_strings,
+ sizeof(adapter_stats_strings));
+ data += sizeof(adapter_stats_strings);
+ memcpy(data, channel_stats_strings,
+ sizeof(channel_stats_strings));
+ data += sizeof(channel_stats_strings);
+ memcpy(data, loopback_stats_strings,
+ sizeof(loopback_stats_strings));
+ }
}
/* port stats maintained per queue of the port. They should be in the same
@@ -185,6 +262,45 @@ struct queue_port_stats {
u64 gro_merged;
};
+struct adapter_stats {
+ u64 db_drop;
+ u64 db_full;
+ u64 db_empty;
+ u64 tcp_v4_out_rsts;
+ u64 tcp_v4_in_segs;
+ u64 tcp_v4_out_segs;
+ u64 tcp_v4_retrans_segs;
+ u64 tcp_v6_out_rsts;
+ u64 tcp_v6_in_segs;
+ u64 tcp_v6_out_segs;
+ u64 tcp_v6_retrans_segs;
+ u64 frames;
+ u64 octets;
+ u64 drops;
+ u64 rqe_dfr_mod;
+ u64 rqe_dfr_pkt;
+ u64 ofld_no_neigh;
+ u64 ofld_cong_defer;
+ u64 wc_success;
+ u64 wc_fail;
+};
+
+struct channel_stats {
+ u64 cpl_req;
+ u64 cpl_rsp;
+ u64 mac_in_errs;
+ u64 hdr_in_errs;
+ u64 tcp_in_errs;
+ u64 tcp6_in_errs;
+ u64 tnl_cong_drops;
+ u64 tnl_tx_drops;
+ u64 ofld_vlan_drops;
+ u64 ofld_chan_drops;
+ u64 octets_ddp;
+ u64 frames_ddp;
+ u64 frames_drop;
+};
+
static void collect_sge_port_stats(const struct adapter *adap,
const struct port_info *p,
struct queue_port_stats *s)
@@ -205,30 +321,121 @@ static void collect_sge_port_stats(const struct adapter *adap,
}
}
+static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
+{
+ struct tp_tcp_stats v4, v6;
+ struct tp_rdma_stats rdma_stats;
+ struct tp_err_stats err_stats;
+ struct tp_usm_stats usm_stats;
+ u64 val1, val2;
+
+ memset(s, 0, sizeof(*s));
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_tcp_stats(adap, &v4, &v6);
+ t4_tp_get_rdma_stats(adap, &rdma_stats);
+ t4_get_usm_stats(adap, &usm_stats);
+ t4_tp_get_err_stats(adap, &err_stats);
+ spin_unlock(&adap->stats_lock);
+
+ s->db_drop = adap->db_stats.db_drop;
+ s->db_full = adap->db_stats.db_full;
+ s->db_empty = adap->db_stats.db_empty;
+
+ s->tcp_v4_out_rsts = v4.tcp_out_rsts;
+ s->tcp_v4_in_segs = v4.tcp_in_segs;
+ s->tcp_v4_out_segs = v4.tcp_out_segs;
+ s->tcp_v4_retrans_segs = v4.tcp_retrans_segs;
+ s->tcp_v6_out_rsts = v6.tcp_out_rsts;
+ s->tcp_v6_in_segs = v6.tcp_in_segs;
+ s->tcp_v6_out_segs = v6.tcp_out_segs;
+ s->tcp_v6_retrans_segs = v6.tcp_retrans_segs;
+
+ if (is_offload(adap)) {
+ s->frames = usm_stats.frames;
+ s->octets = usm_stats.octets;
+ s->drops = usm_stats.drops;
+ s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod;
+ s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt;
+ }
+
+ s->ofld_no_neigh = err_stats.ofld_no_neigh;
+ s->ofld_cong_defer = err_stats.ofld_cong_defer;
+
+ if (!is_t4(adap->params.chip)) {
+ int v;
+
+ v = t4_read_reg(adap, SGE_STAT_CFG_A);
+ if (STATSOURCE_T5_G(v) == 7) {
+ val2 = t4_read_reg(adap, SGE_STAT_MATCH_A);
+ val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A);
+ s->wc_success = val1 - val2;
+ s->wc_fail = val2;
+ }
+ }
+}
+
+static void collect_channel_stats(struct adapter *adap, struct channel_stats *s,
+ u8 i)
+{
+ struct tp_cpl_stats cpl_stats;
+ struct tp_err_stats err_stats;
+ struct tp_fcoe_stats fcoe_stats;
+
+ memset(s, 0, sizeof(*s));
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_cpl_stats(adap, &cpl_stats);
+ t4_tp_get_err_stats(adap, &err_stats);
+ t4_get_fcoe_stats(adap, i, &fcoe_stats);
+ spin_unlock(&adap->stats_lock);
+
+ s->cpl_req = cpl_stats.req[i];
+ s->cpl_rsp = cpl_stats.rsp[i];
+ s->mac_in_errs = err_stats.mac_in_errs[i];
+ s->hdr_in_errs = err_stats.hdr_in_errs[i];
+ s->tcp_in_errs = err_stats.tcp_in_errs[i];
+ s->tcp6_in_errs = err_stats.tcp6_in_errs[i];
+ s->tnl_cong_drops = err_stats.tnl_cong_drops[i];
+ s->tnl_tx_drops = err_stats.tnl_tx_drops[i];
+ s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i];
+ s->ofld_chan_drops = err_stats.ofld_chan_drops[i];
+ s->octets_ddp = fcoe_stats.octets_ddp;
+ s->frames_ddp = fcoe_stats.frames_ddp;
+ s->frames_drop = fcoe_stats.frames_drop;
+}
+
static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
u64 *data)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- u32 val1, val2;
+ struct lb_port_stats s;
+ int i;
+ u64 *p0;
- t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
+ t4_get_port_stats_offset(adapter, pi->tx_chan,
+ (struct port_stats *)data,
+ &pi->stats_base);
data += sizeof(struct port_stats) / sizeof(u64);
collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
data += sizeof(struct queue_port_stats) / sizeof(u64);
- if (!is_t4(adapter->params.chip)) {
- t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
- val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
- val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
- *data = val1 - val2;
- data++;
- *data = val2;
- data++;
- } else {
- memset(data, 0, 2 * sizeof(u64));
- *data += 2;
- }
+ collect_adapter_stats(adapter, (struct adapter_stats *)data);
+ data += sizeof(struct adapter_stats) / sizeof(u64);
+
+ *data++ = (u64)pi->port_id;
+ collect_channel_stats(adapter, (struct channel_stats *)data,
+ pi->port_id);
+ data += sizeof(struct channel_stats) / sizeof(u64);
+
+ *data++ = (u64)pi->port_id;
+ memset(&s, 0, sizeof(s));
+ t4_get_lb_stats(adapter, pi->port_id, &s);
+
+ p0 = &s.octets;
+ for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++)
+ *data++ = (unsigned long long)*p0++;
}
static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
@@ -250,7 +457,7 @@ static int restart_autoneg(struct net_device *dev)
return -EAGAIN;
if (p->link_cfg.autoneg != AUTONEG_ENABLE)
return -EINVAL;
- t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
+ t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan);
return 0;
}
@@ -267,7 +474,7 @@ static int identify_port(struct net_device *dev,
else
return -EINVAL;
- return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
+ return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val);
}
static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
@@ -439,7 +646,7 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
lc->autoneg = cmd->autoneg;
if (netif_running(dev))
- return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+ return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
lc);
return 0;
}
@@ -472,7 +679,7 @@ static int set_pauseparam(struct net_device *dev,
if (epause->tx_pause)
lc->requested_fc |= PAUSE_TX;
if (netif_running(dev))
- return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+ return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
lc);
return 0;
}
@@ -578,7 +785,7 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
c->rx_coalesce_usecs = qtimer_val(adap, rq);
- c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
+ c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
adap->sge.counter_val[rq->pktcnt_idx] : 0;
c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
return 0;
@@ -617,7 +824,7 @@ static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
*/
static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
{
- int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+ int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
@@ -626,7 +833,7 @@ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
{
- int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+ int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
@@ -669,8 +876,8 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
aligned_offset = eeprom->offset & ~3;
aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
- if (adapter->fn > 0) {
- u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
+ if (adapter->pf > 0) {
+ u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
if (aligned_offset < start ||
aligned_offset + aligned_len > start + EEPROMPFSIZE)
@@ -740,37 +947,6 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
return ret;
}
-#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
-#define BCAST_CRC 0xa0ccc1a6
-
-static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- wol->supported = WAKE_BCAST | WAKE_MAGIC;
- wol->wolopts = netdev2adap(dev)->wol;
- memset(&wol->sopass, 0, sizeof(wol->sopass));
-}
-
-static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- int err = 0;
- struct port_info *pi = netdev_priv(dev);
-
- if (wol->wolopts & ~WOL_SUPPORTED)
- return -EINVAL;
- t4_wol_magic_enable(pi->adapter, pi->tx_chan,
- (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
- if (wol->wolopts & WAKE_BCAST) {
- err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
- ~0ULL, 0, false);
- if (!err)
- err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
- ~6ULL, ~0ULL, BCAST_CRC, true);
- } else {
- t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
- }
- return err;
-}
-
static u32 get_rss_table_size(struct net_device *dev)
{
const struct port_info *pi = netdev_priv(dev);
@@ -900,8 +1076,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_ethtool_stats = get_stats,
.get_regs_len = get_regs_len,
.get_regs = get_regs,
- .get_wol = get_wol,
- .set_wol = set_wol,
.get_rxnfc = get_rxnfc,
.get_rxfh_indir_size = get_rss_table_size,
.get_rxfh = get_rss_table,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 2b4328b33..a65eca515 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -135,8 +135,14 @@ struct filter_entry {
#define FW4_FNAME "/*(DEBLOBBED)*/"
#define FW5_FNAME "/*(DEBLOBBED)*/"
+#define FW6_FNAME "/*(DEBLOBBED)*/"
#define FW4_CFNAME "cxgb4/t4-config.txt"
#define FW5_CFNAME "cxgb4/t5-config.txt"
+#define FW6_CFNAME "cxgb4/t6-config.txt"
+#define PHY_AQ1202_FIRMWARE "/*(DEBLOBBED)*/"
+#define PHY_BCM84834_FIRMWARE "/*(DEBLOBBED)*/"
+#define PHY_AQ1202_DEVICEID 0x4409
+#define PHY_BCM84834_DEVICEID 0x4486
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
@@ -317,8 +323,9 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
* level") we need to issue the Set Parameters Commannd
* without sleeping (timeout < 0).
*/
- err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
- &name, &value);
+ err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
+ &name, &value,
+ -FW_CMD_MAX_TIMEOUT);
if (err)
dev_err(adap->pdev_dev,
@@ -381,7 +388,7 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
int uc_cnt = netdev_uc_count(dev);
int mc_cnt = netdev_mc_count(dev);
const struct port_info *pi = netdev_priv(dev);
- unsigned int mb = pi->adapter->fn;
+ unsigned int mb = pi->adapter->pf;
/* first do the secondary unicast addresses */
netdev_for_each_uc_addr(ha, dev) {
@@ -438,7 +445,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
ret = set_addr_filters(dev, sleep_ok);
if (ret == 0)
- ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
+ ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, mtu,
(dev->flags & IFF_PROMISC) ? 1 : 0,
(dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
sleep_ok);
@@ -455,7 +462,7 @@ static int link_start(struct net_device *dev)
{
int ret;
struct port_info *pi = netdev_priv(dev);
- unsigned int mb = pi->adapter->fn;
+ unsigned int mb = pi->adapter->pf;
/*
* We do not set address filters and promiscuity here, the stack does
@@ -473,7 +480,7 @@ static int link_start(struct net_device *dev)
}
}
if (ret == 0)
- ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
+ ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
&pi->link_cfg);
if (ret == 0) {
local_bh_disable();
@@ -855,23 +862,39 @@ static void free_msix_queue_irqs(struct adapter *adap)
*
* Sets up the portion of the HW RSS table for the port's VI to distribute
* packets to the Rx queues in @queues.
+ * Should never be called before setting up sge eth rx queues
*/
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
{
u16 *rss;
int i, err;
- const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
+ struct adapter *adapter = pi->adapter;
+ const struct sge_eth_rxq *rxq;
+ rxq = &adapter->sge.ethrxq[pi->first_qset];
rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
if (!rss)
return -ENOMEM;
/* map the queue indices to queue ids */
for (i = 0; i < pi->rss_size; i++, queues++)
- rss[i] = q[*queues].rspq.abs_id;
+ rss[i] = rxq[*queues].rspq.abs_id;
- err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
+ err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
pi->rss_size, rss, pi->rss_size);
+ /* If Tunnel All Lookup isn't specified in the global RSS
+ * Configuration, then we need to specify a default Ingress
+ * Queue for any ingress packets which aren't hashed. We'll
+ * use our first ingress queue ...
+ */
+ if (!err)
+ err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
+ FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
+ FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
+ FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
+ FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
+ FW_RSS_VI_CONFIG_CMD_UDPEN_F,
+ rss[0]);
kfree(rss);
return err;
}
@@ -884,11 +907,15 @@ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
*/
static int setup_rss(struct adapter *adap)
{
- int i, err;
+ int i, j, err;
for_each_port(adap, i) {
const struct port_info *pi = adap2pinfo(adap, i);
+ /* Fill default values with equal distribution */
+ for (j = 0; j < pi->rss_size; j++)
+ pi->rss[j] = j % pi->nqsets;
+
err = cxgb4_write_rss(pi, pi->rss);
if (err)
return err;
@@ -976,7 +1003,7 @@ static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
adap->port[i / per_chan],
msi_idx, q->fl.size ? &q->fl : NULL,
- uldrx_handler);
+ uldrx_handler, 0);
if (err)
return err;
memset(&q->stats, 0, sizeof(q->stats));
@@ -1006,7 +1033,7 @@ static int setup_sge_queues(struct adapter *adap)
msi_idx = 1; /* vector 0 is for non-queue interrupts */
else {
err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
- NULL, NULL);
+ NULL, NULL, -1);
if (err)
return err;
msi_idx = -((int)s->intrq.abs_id + 1);
@@ -1026,7 +1053,7 @@ static int setup_sge_queues(struct adapter *adap)
* new/deleted queues.
*/
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
- msi_idx, NULL, fwevtq_handler);
+ msi_idx, NULL, fwevtq_handler, -1);
if (err) {
freeout: t4_free_sge_resources(adap);
return err;
@@ -1043,7 +1070,9 @@ freeout: t4_free_sge_resources(adap);
msi_idx++;
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
msi_idx, &q->fl,
- t4_ethrx_handler);
+ t4_ethrx_handler,
+ t4_get_mps_bg_map(adap,
+ pi->tx_chan));
if (err)
goto freeout;
q->rspq.idx = j;
@@ -1120,10 +1149,7 @@ void *t4_alloc_mem(size_t size)
*/
void t4_free_mem(void *addr)
{
- if (is_vmalloc_addr(addr))
- vfree(addr);
- else
- kfree(addr);
+ kvfree(addr);
}
/* Send a Work Request to write the filter at a specified index. We construct
@@ -1323,11 +1349,6 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
return fallback(dev, skb) % dev->real_num_tx_queues;
}
-static inline int is_offload(const struct adapter *adap)
-{
- return adap->params.offload;
-}
-
static int closest_timer(const struct sge *s, int time)
{
int i, delta, match = 0, min_delta = INT_MAX;
@@ -1388,8 +1409,8 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
FW_PARAMS_PARAM_X_V(
FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
- err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
- &new_idx);
+ err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+ &v, &new_idx);
if (err)
return err;
}
@@ -1397,7 +1418,7 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
}
us = us == 0 ? 6 : closest_timer(&adap->sge, us);
- q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
+ q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
return 0;
}
@@ -1410,7 +1431,7 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
return 0;
- err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
+ err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
-1, -1, -1,
!!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (unlikely(err))
@@ -1693,7 +1714,7 @@ static int tid_init(struct tid_info *t)
bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
/* Reserve stid 0 for T4/T5 adapters */
if (!t->stid_base &&
- (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
+ (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
__set_bit(0, t->stid_bmap);
return 0;
@@ -1982,11 +2003,8 @@ EXPORT_SYMBOL(cxgb4_iscsi_init);
int cxgb4_flush_eq_cache(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
- int ret;
- ret = t4_fwaddrspace_write(adap, adap->mbox,
- 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
- return ret;
+ return t4_sge_ctxt_flush(adap, adap->mbox);
}
EXPORT_SYMBOL(cxgb4_flush_eq_cache);
@@ -2041,25 +2059,6 @@ out:
}
EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
-void cxgb4_disable_db_coalescing(struct net_device *dev)
-{
- struct adapter *adap;
-
- adap = netdev2adap(dev);
- t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
- NOCOALESCE_F);
-}
-EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
-
-void cxgb4_enable_db_coalescing(struct net_device *dev)
-{
- struct adapter *adap;
-
- adap = netdev2adap(dev);
- t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
-}
-EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
-
int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
{
struct adapter *adap;
@@ -2099,10 +2098,7 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
if (offset < mc0_end) {
memtype = MEM_MC0;
memaddr = offset - edc1_end;
- } else if (is_t4(adap->params.chip)) {
- /* T4 only has a single memory channel */
- goto err;
- } else {
+ } else if (is_t5(adap->params.chip)) {
size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
mc1_size = EXT_MEM1_SIZE_G(size) << 20;
mc1_end = mc0_end + mc1_size;
@@ -2113,6 +2109,9 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
/* offset beyond the end of any memory */
goto err;
}
+ } else {
+ /* T4/T6 only has a single memory channel */
+ goto err;
}
}
@@ -2144,14 +2143,16 @@ EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
int cxgb4_bar2_sge_qregs(struct net_device *dev,
unsigned int qid,
enum cxgb4_bar2_qtype qtype,
+ int user,
u64 *pbar2_qoffset,
unsigned int *pbar2_qid)
{
- return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev),
+ return t4_bar2_sge_qregs(netdev2adap(dev),
qid,
(qtype == CXGB4_BAR2_QTYPE_EGRESS
? T4_BAR2_QTYPE_EGRESS
: T4_BAR2_QTYPE_INGRESS),
+ user,
pbar2_qoffset,
pbar2_qid);
}
@@ -2277,9 +2278,13 @@ static void process_db_full(struct work_struct *work)
drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
- t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
- DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
- DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+ DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
+ DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
+ else
+ t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+ DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
}
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -2341,7 +2346,7 @@ static void process_db_drop(struct work_struct *work)
drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
- } else {
+ } else if (is_t5(adap->params.chip)) {
u32 dropped_db = t4_read_reg(adap, 0x010ac);
u16 qid = (dropped_db >> 15) & 0x1ffff;
u16 pidx_inc = dropped_db & 0x1fff;
@@ -2349,8 +2354,8 @@ static void process_db_drop(struct work_struct *work)
unsigned int bar2_qid;
int ret;
- ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
- &bar2_qoffset, &bar2_qid);
+ ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
+ 0, &bar2_qoffset, &bar2_qid);
if (ret)
dev_err(adap->pdev_dev, "doorbell drop recovery: "
"qid=%d, pidx_inc=%d\n", qid, pidx_inc);
@@ -2362,7 +2367,8 @@ static void process_db_drop(struct work_struct *work)
t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
}
- t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
}
void t4_db_full(struct adapter *adap)
@@ -2392,7 +2398,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
unsigned short i;
lli.pdev = adap->pdev;
- lli.pf = adap->fn;
+ lli.pf = adap->pf;
lli.l2t = adap->l2t;
lli.tids = &adap->tids;
lli.ports = adap->port;
@@ -2431,6 +2437,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.max_ordird_qp = adap->params.max_ordird_qp;
lli.max_ird_adapter = adap->params.max_ird_adapter;
lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
+ lli.nodeid = dev_to_node(adap->pdev_dev);
handle = ulds[uld].add(&lli);
if (IS_ERR(handle)) {
@@ -2728,7 +2735,7 @@ static int cxgb_close(struct net_device *dev)
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
- return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
+ return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
}
/* Return an error number if the indicated filter isn't writable ...
@@ -2872,7 +2879,8 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
spin_unlock(&adapter->stats_lock);
return ns;
}
- t4_get_port_stats(adapter, p->tx_chan, &stats);
+ t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
+ &p->stats_base);
spin_unlock(&adapter->stats_lock);
ns->tx_bytes = stats.tx_octets;
@@ -2931,7 +2939,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
} else
return -EINVAL;
- mbox = pi->adapter->fn;
+ mbox = pi->adapter->pf;
if (cmd == SIOCGMIIREG)
ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
data->reg_num, &data->val_out);
@@ -2958,7 +2966,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
return -EINVAL;
- ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
+ ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
-1, -1, -1, true);
if (!ret)
dev->mtu = new_mtu;
@@ -2974,7 +2982,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
+ ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
pi->xact_addr_filt, addr->sa_data, true, true);
if (ret < 0)
return ret;
@@ -3033,86 +3041,11 @@ void t4_fatal_err(struct adapter *adap)
dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
}
-/* Return the specified PCI-E Configuration Space register from our Physical
- * Function. We try first via a Firmware LDST Command since we prefer to let
- * the firmware own all of these registers, but if that fails we go for it
- * directly ourselves.
- */
-static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
-{
- struct fw_ldst_cmd ldst_cmd;
- u32 val;
- int ret;
-
- /* Construct and send the Firmware LDST Command to retrieve the
- * specified PCI-E Configuration Space register.
- */
- memset(&ldst_cmd, 0, sizeof(ldst_cmd));
- ldst_cmd.op_to_addrspace =
- htonl(FW_CMD_OP_V(FW_LDST_CMD) |
- FW_CMD_REQUEST_F |
- FW_CMD_READ_F |
- FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
- ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
- ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
- ldst_cmd.u.pcie.ctrl_to_fn =
- (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
- ldst_cmd.u.pcie.r = reg;
- ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
- &ldst_cmd);
-
- /* If the LDST Command suucceeded, exctract the returned register
- * value. Otherwise read it directly ourself.
- */
- if (ret == 0)
- val = ntohl(ldst_cmd.u.pcie.data[0]);
- else
- t4_hw_pci_read_cfg4(adap, reg, &val);
-
- return val;
-}
-
static void setup_memwin(struct adapter *adap)
{
- u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
+ u32 nic_win_base = t4_get_util_window(adap);
- if (is_t4(adap->params.chip)) {
- u32 bar0;
-
- /* Truncation intentional: we only read the bottom 32-bits of
- * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
- * mechanism to read BAR0 instead of using
- * pci_resource_start() because we could be operating from
- * within a Virtual Machine which is trapping our accesses to
- * our Configuration Space and we need to set up the PCI-E
- * Memory Window decoders with the actual addresses which will
- * be coming across the PCI-E link.
- */
- bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
- bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
- adap->t4_bar0 = bar0;
-
- mem_win0_base = bar0 + MEMWIN0_BASE;
- mem_win1_base = bar0 + MEMWIN1_BASE;
- mem_win2_base = bar0 + MEMWIN2_BASE;
- mem_win2_aperture = MEMWIN2_APERTURE;
- } else {
- /* For T5, only relative offset inside the PCIe BAR is passed */
- mem_win0_base = MEMWIN0_BASE;
- mem_win1_base = MEMWIN1_BASE;
- mem_win2_base = MEMWIN2_BASE_T5;
- mem_win2_aperture = MEMWIN2_APERTURE_T5;
- }
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
- mem_win0_base | BIR_V(0) |
- WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
- mem_win1_base | BIR_V(0) |
- WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
- mem_win2_base | BIR_V(0) |
- WINDOW_V(ilog2(mem_win2_aperture) - 10));
- t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
+ t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
}
static void setup_memwin_rdma(struct adapter *adap)
@@ -3146,7 +3079,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F);
c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
- ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
+ ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
if (ret < 0)
return ret;
@@ -3162,18 +3095,18 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
}
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
- ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
+ ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
if (ret < 0)
return ret;
- ret = t4_config_glbl_rss(adap, adap->fn,
+ ret = t4_config_glbl_rss(adap, adap->pf,
FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
if (ret < 0)
return ret;
- ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
+ ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
FW_CMD_CAP_PF);
if (ret < 0)
@@ -3217,7 +3150,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
}
/* get basic stuff going */
- return t4_early_init(adap, adap->fn);
+ return t4_early_init(adap, adap->pf);
}
/*
@@ -3273,6 +3206,142 @@ static int adap_init0_tweaks(struct adapter *adapter)
return 0;
}
+/* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
+ * unto themselves and they contain their own firmware to perform their
+ * tasks ...
+ */
+static int phy_aq1202_version(const u8 *phy_fw_data,
+ size_t phy_fw_size)
+{
+ int offset;
+
+ /* At offset 0x8 you're looking for the primary image's
+ * starting offset which is 3 Bytes wide
+ *
+ * At offset 0xa of the primary image, you look for the offset
+ * of the DRAM segment which is 3 Bytes wide.
+ *
+ * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
+ * wide
+ */
+ #define be16(__p) (((__p)[0] << 8) | (__p)[1])
+ #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
+ #define le24(__p) (le16(__p) | ((__p)[2] << 16))
+
+ offset = le24(phy_fw_data + 0x8) << 12;
+ offset = le24(phy_fw_data + offset + 0xa);
+ return be16(phy_fw_data + offset + 0x27e);
+
+ #undef be16
+ #undef le16
+ #undef le24
+}
+
+static struct info_10gbt_phy_fw {
+ unsigned int phy_fw_id; /* PCI Device ID */
+ char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */
+ int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
+ int phy_flash; /* Has FLASH for PHY Firmware */
+} phy_info_array[] = {
+ {
+ PHY_AQ1202_DEVICEID,
+ PHY_AQ1202_FIRMWARE,
+ phy_aq1202_version,
+ 1,
+ },
+ {
+ PHY_BCM84834_DEVICEID,
+ PHY_BCM84834_FIRMWARE,
+ NULL,
+ 0,
+ },
+ { 0, NULL, NULL },
+};
+
+static struct info_10gbt_phy_fw *find_phy_info(int devid)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
+ if (phy_info_array[i].phy_fw_id == devid)
+ return &phy_info_array[i];
+ }
+ return NULL;
+}
+
+/* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
+ * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
+ * we return a negative error number. If we transfer new firmware we return 1
+ * (from t4_load_phy_fw()). If we don't do anything we return 0.
+ */
+static int adap_init0_phy(struct adapter *adap)
+{
+ const struct firmware *phyf;
+ int ret;
+ struct info_10gbt_phy_fw *phy_info;
+
+ /* Use the device ID to determine which PHY file to flash.
+ */
+ phy_info = find_phy_info(adap->pdev->device);
+ if (!phy_info) {
+ dev_warn(adap->pdev_dev,
+ "No PHY Firmware file found for this PHY\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
+ * use that. The adapter firmware provides us with a memory buffer
+ * where we can load a PHY firmware file from the host if we want to
+ * override the PHY firmware File in flash.
+ */
+ ret = reject_firmware_direct(&phyf, phy_info->phy_fw_file,
+ adap->pdev_dev);
+ if (ret < 0) {
+ /* For adapters without FLASH attached to PHY for their
+ * firmware, it's obviously a fatal error if we can't get the
+ * firmware to the adapter. For adapters with PHY firmware
+ * FLASH storage, it's worth a warning if we can't find the
+ * PHY Firmware but we'll neuter the error ...
+ */
+ dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
+ "/lib/firmware/%s, error %d\n",
+ phy_info->phy_fw_file, -ret);
+ if (phy_info->phy_flash) {
+ int cur_phy_fw_ver = 0;
+
+ t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+ dev_warn(adap->pdev_dev, "continuing with, on-adapter "
+ "FLASH copy, version %#x\n", cur_phy_fw_ver);
+ ret = 0;
+ }
+
+ return ret;
+ }
+
+ /* Load PHY Firmware onto adapter.
+ */
+ ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
+ phy_info->phy_fw_version,
+ (u8 *)phyf->data, phyf->size);
+ if (ret < 0)
+ dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
+ -ret);
+ else if (ret > 0) {
+ int new_phy_fw_ver = 0;
+
+ if (phy_info->phy_fw_version)
+ new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
+ phyf->size);
+ dev_info(adap->pdev_dev, "Successfully transferred PHY "
+ "Firmware /lib/firmware/%s, version %#x\n",
+ phy_info->phy_fw_file, new_phy_fw_ver);
+ }
+
+ release_firmware(phyf);
+
+ return ret;
+}
+
/*
* Attempt to initialize the adapter via a Firmware Configuration File.
*/
@@ -3297,6 +3366,16 @@ static int adap_init0_config(struct adapter *adapter, int reset)
goto bye;
}
+ /* If this is a 10Gb/s-BT adapter make sure the chip-external
+ * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
+ * to be performed after any global adapter RESET above since some
+ * PHYs only have local RAM copies of the PHY firmware.
+ */
+ if (is_10gbt_device(adapter->pdev->device)) {
+ ret = adap_init0_phy(adapter);
+ if (ret < 0)
+ goto bye;
+ }
/*
* If we have a T4 configuration file under /lib/firmware/cxgb4/,
* then use that. Otherwise, use the configuration file stored
@@ -3309,6 +3388,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
case CHELSIO_T5:
fw_config_file = FW5_CFNAME;
break;
+ case CHELSIO_T6:
+ fw_config_file = FW6_CFNAME;
+ break;
default:
dev_err(adapter->pdev_dev, "Device %d is not supported\n",
adapter->pdev->device);
@@ -3334,7 +3416,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
ret = t4_query_params(adapter, adapter->mbox,
- adapter->fn, 0, 1, params, val);
+ adapter->pf, 0, 1, params, val);
if (ret == 0) {
/*
* For t4_memory_rw() below addresses and
@@ -3505,7 +3587,24 @@ static struct fw_info fw_info_array[] = {
.intfver_iscsi = FW_INTFVER(T5, ISCSI),
.intfver_fcoe = FW_INTFVER(T5, FCOE),
},
+ }, {
+ .chip = CHELSIO_T6,
+ .fs_name = FW6_CFNAME,
+ .fw_mod_name = FW6_FNAME,
+ .fw_hdr = {
+ .chip = FW_HDR_CHIP_T6,
+ .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
+ .intfver_nic = FW_INTFVER(T6, NIC),
+ .intfver_vnic = FW_INTFVER(T6, VNIC),
+ .intfver_ofld = FW_INTFVER(T6, OFLD),
+ .intfver_ri = FW_INTFVER(T6, RI),
+ .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
+ .intfver_iscsi = FW_INTFVER(T6, ISCSI),
+ .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
+ .intfver_fcoe = FW_INTFVER(T6, FCOE),
+ },
}
+
};
static struct fw_info *find_fw_info(int chip)
@@ -3611,7 +3710,7 @@ static int adap_init0(struct adapter *adap)
* the firmware. On the other hand, we need these fairly early on
* so we do this right after getting ahold of the firmware.
*/
- ret = get_vpd_params(adap, &adap->params.vpd);
+ ret = t4_get_vpd_params(adap, &adap->params.vpd);
if (ret < 0)
goto bye;
@@ -3623,7 +3722,7 @@ static int adap_init0(struct adapter *adap)
v =
FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
if (ret < 0)
goto bye;
@@ -3646,7 +3745,7 @@ static int adap_init0(struct adapter *adap)
*/
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
params, val);
/* If the firmware doesn't support Configuration Files,
@@ -3705,7 +3804,7 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(FILTER_START);
params[4] = FW_PARAM_PFVF(FILTER_END);
params[5] = FW_PARAM_PFVF(IQFLINT_START);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
if (ret < 0)
goto bye;
adap->sge.egr_start = val[0];
@@ -3723,7 +3822,7 @@ static int adap_init0(struct adapter *adap)
*/
params[0] = FW_PARAM_PFVF(EQ_END);
params[1] = FW_PARAM_PFVF(IQFLINT_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
if (ret < 0)
goto bye;
adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
@@ -3744,7 +3843,7 @@ static int adap_init0(struct adapter *adap)
}
/* Allocate the memory for the vaious egress queue bitmaps
- * ie starving_fl and txq_maperr.
+ * ie starving_fl, txq_maperr and blocked_fl.
*/
adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
sizeof(long), GFP_KERNEL);
@@ -3760,9 +3859,18 @@ static int adap_init0(struct adapter *adap)
goto bye;
}
+#ifdef CONFIG_DEBUG_FS
+ adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+ sizeof(long), GFP_KERNEL);
+ if (!adap->sge.blocked_fl) {
+ ret = -ENOMEM;
+ goto bye;
+ }
+#endif
+
params[0] = FW_PARAM_PFVF(CLIP_START);
params[1] = FW_PARAM_PFVF(CLIP_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
if (ret < 0)
goto bye;
adap->clipt_start = val[0];
@@ -3771,7 +3879,7 @@ static int adap_init0(struct adapter *adap)
/* query params related to active filter region */
params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
/* If Active filter size is set we enable establishing
* offload connection through firmware work request
*/
@@ -3788,7 +3896,7 @@ static int adap_init0(struct adapter *adap)
*/
params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
val[0] = 1;
- (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
+ (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
/*
* Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
@@ -3800,7 +3908,7 @@ static int adap_init0(struct adapter *adap)
adap->params.ulptx_memwrite_dsgl = false;
} else {
params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1, params, val);
adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
}
@@ -3826,7 +3934,7 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(TDDP_START);
params[4] = FW_PARAM_PFVF(TDDP_END);
params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
params, val);
if (ret < 0)
goto bye;
@@ -3864,7 +3972,7 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(RQ_END);
params[4] = FW_PARAM_PFVF(PBL_START);
params[5] = FW_PARAM_PFVF(PBL_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
params, val);
if (ret < 0)
goto bye;
@@ -3881,7 +3989,7 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(CQ_END);
params[4] = FW_PARAM_PFVF(OCQ_START);
params[5] = FW_PARAM_PFVF(OCQ_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
val);
if (ret < 0)
goto bye;
@@ -3894,7 +4002,7 @@ static int adap_init0(struct adapter *adap)
params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
val);
if (ret < 0) {
adap->params.max_ordird_qp = 8;
@@ -3912,7 +4020,7 @@ static int adap_init0(struct adapter *adap)
if (caps_cmd.iscsicaps) {
params[0] = FW_PARAM_PFVF(ISCSI_START);
params[1] = FW_PARAM_PFVF(ISCSI_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
params, val);
if (ret < 0)
goto bye;
@@ -3958,8 +4066,8 @@ static int adap_init0(struct adapter *adap)
adap->params.b_wnd);
}
t4_init_sge_params(adap);
- t4_init_tp_params(adap);
adap->flags |= FW_OK;
+ t4_init_tp_params(adap);
return 0;
/*
@@ -3972,6 +4080,9 @@ bye:
kfree(adap->sge.ingr_map);
kfree(adap->sge.starving_fl);
kfree(adap->sge.txq_maperr);
+#ifdef CONFIG_DEBUG_FS
+ kfree(adap->sge.blocked_fl);
+#endif
if (ret != -ETIMEDOUT && ret != -EIO)
t4_fw_bye(adap, adap->mbox);
return ret;
@@ -4039,7 +4150,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
if (t4_wait_dev_ready(adap->regs) < 0)
return PCI_ERS_RESULT_DISCONNECT;
- if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
+ if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
return PCI_ERS_RESULT_DISCONNECT;
adap->flags |= FW_OK;
if (adap_init1(adap, &c))
@@ -4048,7 +4159,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
for_each_port(adap, i) {
struct port_info *p = adap2pinfo(adap, i);
- ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
+ ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
NULL, NULL);
if (ret < 0)
return PCI_ERS_RESULT_DISCONNECT;
@@ -4339,7 +4450,12 @@ static int enable_msix(struct adapter *adap)
static int init_rss(struct adapter *adap)
{
- unsigned int i, j;
+ unsigned int i;
+ int err;
+
+ err = t4_init_rss_mode(adap, adap->mbox);
+ if (err)
+ return err;
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
@@ -4347,8 +4463,6 @@ static int init_rss(struct adapter *adap)
pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
if (!pi->rss)
return -ENOMEM;
- for (j = 0; j < pi->rss_size; j++)
- pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
}
return 0;
}
@@ -4412,15 +4526,23 @@ static void free_some_resources(struct adapter *adapter)
kfree(adapter->sge.ingr_map);
kfree(adapter->sge.starving_fl);
kfree(adapter->sge.txq_maperr);
+#ifdef CONFIG_DEBUG_FS
+ kfree(adapter->sge.blocked_fl);
+#endif
disable_msi(adapter);
for_each_port(adapter, i)
if (adapter->port[i]) {
+ struct port_info *pi = adap2pinfo(adapter, i);
+
+ if (pi->viid != 0)
+ t4_free_vi(adapter, adapter->mbox, adapter->pf,
+ 0, pi->viid);
kfree(adap2pinfo(adapter, i)->rss);
free_netdev(adapter->port[i]);
}
if (adapter->flags & FW_OK)
- t4_fw_bye(adapter, adapter->fn);
+ t4_fw_bye(adapter, adapter->pf);
}
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
@@ -4511,7 +4633,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
adapter->mbox = func;
- adapter->fn = func;
+ adapter->pf = func;
adapter->msg_enable = dflt_msg_enable;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
@@ -4531,7 +4653,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!is_t4(adapter->params.chip)) {
s_qpp = (QUEUESPERPAGEPF0_S +
(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
- adapter->fn);
+ adapter->pf);
qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
num_seg = PAGE_SIZE / SEGMENT_SIZE;
@@ -4554,10 +4676,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENOMEM;
goto out_free_adapter;
}
+ t4_write_reg(adapter, SGE_STAT_CFG_A,
+ STATSOURCE_T5_V(7) | STATMODE_V(0));
}
setup_memwin(adapter);
err = adap_init0(adapter);
+#ifdef CONFIG_DEBUG_FS
+ bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
+#endif
setup_memwin_rdma(adapter);
if (err)
goto out_unmap_bar;
@@ -4606,10 +4733,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = t4_port_init(adapter, func, func, 0);
if (err)
goto out_free_dev;
+ } else if (adapter->params.nports == 1) {
+ /* If we don't have a connection to the firmware -- possibly
+ * because of an error -- grab the raw VPD parameters so we
+ * can set the proper MAC Address on the debug network
+ * interface that we've created.
+ */
+ u8 hw_addr[ETH_ALEN];
+ u8 *na = adapter->params.vpd.na;
+
+ err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
+ if (!err) {
+ for (i = 0; i < ETH_ALEN; i++)
+ hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
+ hex2val(na[2 * i + 1]));
+ t4_set_hw_addr(adapter, 0, hw_addr);
+ }
}
- /*
- * Configure queues and allocate tables now, they can be needed as
+ /* Configure queues and allocate tables now, they can be needed as
* soon as the first register_netdev completes.
*/
cfg_queues(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 78ab4d406..b27897d4f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -264,6 +264,7 @@ struct cxgb4_lld_info {
unsigned int max_ordird_qp; /* Max ORD/IRD depth per RDMA QP */
unsigned int max_ird_adapter; /* Max IRD memory per adapter */
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
+ int nodeid; /* device numa node id */
};
struct cxgb4_uld_info {
@@ -297,8 +298,6 @@ struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
unsigned int skb_len, unsigned int pull_len);
int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
int cxgb4_flush_eq_cache(struct net_device *dev);
-void cxgb4_disable_db_coalescing(struct net_device *dev);
-void cxgb4_enable_db_coalescing(struct net_device *dev);
int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte);
u64 cxgb4_read_sge_timestamp(struct net_device *dev);
@@ -306,6 +305,7 @@ enum cxgb4_bar2_qtype { CXGB4_BAR2_QTYPE_EGRESS, CXGB4_BAR2_QTYPE_INGRESS };
int cxgb4_bar2_sge_qregs(struct net_device *dev,
unsigned int qid,
enum cxgb4_bar2_qtype qtype,
+ int user,
u64 *pbar2_qoffset,
unsigned int *pbar2_qid);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 0d2eddab0..942db078f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -100,16 +100,6 @@
*/
#define TX_QCHECK_PERIOD (HZ / 2)
-/* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
- * (in RX_QCHECK_PERIOD multiples). If we find one of the SGE Ingress DMA
- * State Machines in the same state for this amount of time (in HZ) then we'll
- * issue a warning about a potential hang. We'll repeat the warning as the
- * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
- * the situation clears. If the situation clears, we'll note that as well.
- */
-#define SGE_IDMA_WARN_THRESH (1 * HZ)
-#define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
-
/*
* Max number of Tx descriptors to be reclaimed by the Tx timer.
*/
@@ -532,14 +522,17 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
- u32 val;
if (q->pend_cred >= 8) {
+ u32 val = adap->params.arch.sge_fl_db;
+
if (is_t4(adap->params.chip))
- val = PIDX_V(q->pend_cred / 8);
+ val |= PIDX_V(q->pend_cred / 8);
else
- val = PIDX_T5_V(q->pend_cred / 8) |
- DBTYPE_F;
- val |= DBPRIO_F;
+ val |= PIDX_T5_V(q->pend_cred / 8);
+
+ /* Make sure all memory writes to the Free List queue are
+ * committed before we tell the hardware about them.
+ */
wmb();
/* If we don't have access to the new User Doorbell (T5+), use
@@ -594,6 +587,11 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
int node;
+#ifdef CONFIG_DEBUG_FS
+ if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
+ goto out;
+#endif
+
gfp |= __GFP_NOWARN;
node = dev_to_node(adap->pdev_dev);
@@ -930,7 +928,10 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
*/
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
- wmb(); /* write descriptors before telling HW */
+ /* Make sure that all writes to the TX Descriptors are committed
+ * before we tell the hardware about them.
+ */
+ wmb();
/* If we don't have access to the new User Doorbell (T5+), use the old
* doorbell mechanism; otherwise use the new BAR2 mechanism.
@@ -1032,7 +1033,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
* Figure out what HW csum a packet wants and return the appropriate control
* bits.
*/
-static u64 hwcsum(const struct sk_buff *skb)
+static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
{
int csum_type;
const struct iphdr *iph = ip_hdr(skb);
@@ -1047,7 +1048,7 @@ nocsum: /*
* unknown protocol, disable HW csum
* and hope a bad packet is detected
*/
- return TXPKT_L4CSUM_DIS;
+ return TXPKT_L4CSUM_DIS_F;
}
} else {
/*
@@ -1063,15 +1064,21 @@ nocsum: /*
goto nocsum;
}
- if (likely(csum_type >= TX_CSUM_TCPIP))
- return TXPKT_CSUM_TYPE(csum_type) |
- TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
- TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
- else {
+ if (likely(csum_type >= TX_CSUM_TCPIP)) {
+ u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
+ int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+
+ if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
+ hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+ else
+ hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+ return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
+ } else {
int start = skb_transport_offset(skb);
- return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
- TXPKT_CSUM_LOC(start + skb->csum_offset);
+ return TXPKT_CSUM_TYPE_V(csum_type) |
+ TXPKT_CSUM_START_V(start) |
+ TXPKT_CSUM_LOC_V(start + skb->csum_offset);
}
}
@@ -1112,11 +1119,11 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
return -ENOTSUPP;
/* FC CRC offload */
- *cntrl = TXPKT_CSUM_TYPE(TX_CSUM_FCOE) |
- TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS |
- TXPKT_CSUM_START(CXGB_FCOE_TXPKT_CSUM_START) |
- TXPKT_CSUM_END(CXGB_FCOE_TXPKT_CSUM_END) |
- TXPKT_CSUM_LOC(CXGB_FCOE_TXPKT_CSUM_END);
+ *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
+ TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
+ TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
+ TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
+ TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
return 0;
}
#endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1130,7 +1137,6 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
*/
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- int len;
u32 wr_mid;
u64 cntrl, *end;
int qidx, credits;
@@ -1143,6 +1149,7 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
const struct skb_shared_info *ssi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
bool immediate = false;
+ int len, max_pkt_len;
#ifdef CONFIG_CHELSIO_T4_FCOE
int err;
#endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1156,13 +1163,20 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
+ /* Discard the packet if the length is greater than mtu */
+ max_pkt_len = ETH_HLEN + dev->mtu;
+ if (skb_vlan_tag_present(skb))
+ max_pkt_len += VLAN_HLEN;
+ if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ goto out_free;
+
pi = netdev_priv(dev);
adap = pi->adapter;
qidx = skb_get_queue_mapping(skb);
q = &adap->sge.ethtxq[qidx + pi->first_qset];
reclaim_completed_tx(adap, &q->q, true);
- cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
@@ -1213,23 +1227,29 @@ out_free: dev_kfree_skb_any(skb);
len += sizeof(*lso);
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN_V(len));
- lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE | LSO_LAST_SLICE |
- LSO_IPV6(v6) |
- LSO_ETHHDR_LEN(eth_xtra_len / 4) |
- LSO_IPHDR_LEN(l3hdr_len / 4) |
- LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+ lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
lso->c.ipid_ofst = htons(0);
lso->c.mss = htons(ssi->gso_size);
lso->c.seqno_offset = htonl(0);
if (is_t4(adap->params.chip))
lso->c.len = htonl(skb->len);
else
- lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len));
+ lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
cpl = (void *)(lso + 1);
- cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN(l3hdr_len) |
- TXPKT_ETHHDR_LEN(eth_xtra_len);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
@@ -1238,23 +1258,25 @@ out_free: dev_kfree_skb_any(skb);
FW_WR_IMMDLEN_V(len));
cpl = (void *)(wr + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+ cntrl = hwcsum(adap->params.chip, skb) |
+ TXPKT_IPCSUM_DIS_F;
q->tx_cso++;
}
}
if (skb_vlan_tag_present(skb)) {
q->vlan_ins++;
- cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
#ifdef CONFIG_CHELSIO_T4_FCOE
if (skb->protocol == htons(ETH_P_FCOE))
- cntrl |= TXPKT_VLAN(
+ cntrl |= TXPKT_VLAN_V(
((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
#endif /* CONFIG_CHELSIO_T4_FCOE */
}
- cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
- TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
+ cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+ TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->pf));
cpl->pack = htons(0);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1964,7 +1986,7 @@ static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
static inline bool is_new_response(const struct rsp_ctrl *r,
const struct sge_rspq *q)
{
- return RSPD_GEN(r->type_gen) == q->gen;
+ return (r->type_gen >> RSPD_GEN_S) == q->gen;
}
/**
@@ -2011,19 +2033,19 @@ static int process_responses(struct sge_rspq *q, int budget)
break;
dma_rmb();
- rsp_type = RSPD_TYPE(rc->type_gen);
- if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+ rsp_type = RSPD_TYPE_G(rc->type_gen);
+ if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
struct page_frag *fp;
struct pkt_gl si;
const struct rx_sw_desc *rsd;
u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
- if (len & RSPD_NEWBUF) {
+ if (len & RSPD_NEWBUF_F) {
if (likely(q->offset > 0)) {
free_rx_bufs(q->adap, &rxq->fl, 1);
q->offset = 0;
}
- len = RSPD_LEN(len);
+ len = RSPD_LEN_G(len);
}
si.tot_len = len;
@@ -2058,7 +2080,7 @@ static int process_responses(struct sge_rspq *q, int budget)
q->offset += ALIGN(fp->size, s->fl_align);
else
restore_rx_bufs(&si, &rxq->fl, frags);
- } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+ } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
ret = q->handler(q, q->cur_desc, NULL);
} else {
ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
@@ -2066,7 +2088,7 @@ static int process_responses(struct sge_rspq *q, int budget)
if (unlikely(ret)) {
/* couldn't process descriptor, back off for recovery */
- q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
+ q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
break;
}
@@ -2090,7 +2112,7 @@ int cxgb_busy_poll(struct napi_struct *napi)
return LL_FLUSH_BUSY;
work_done = process_responses(q, 4);
- params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN;
+ params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1);
q->next_intr_params = params;
val = CIDXINC_V(work_done) | SEINTARM_V(params);
@@ -2137,7 +2159,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
int timer_index;
napi_complete(napi);
- timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params);
+ timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
if (q->adaptive_rx) {
if (work_done > max(timer_pkt_quota[timer_index],
@@ -2147,15 +2169,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
timer_index = timer_index - 1;
timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
- q->next_intr_params = QINTR_TIMER_IDX(timer_index) |
- V_QINTR_CNT_EN;
+ q->next_intr_params =
+ QINTR_TIMER_IDX_V(timer_index) |
+ QINTR_CNT_EN_V(0);
params = q->next_intr_params;
} else {
params = q->next_intr_params;
q->next_intr_params = q->intr_params;
}
} else
- params = QINTR_TIMER_IDX(7);
+ params = QINTR_TIMER_IDX_V(7);
val = CIDXINC_V(work_done) | SEINTARM_V(params);
@@ -2203,7 +2226,7 @@ static unsigned int process_intrq(struct adapter *adap)
break;
dma_rmb();
- if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
+ if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
unsigned int qid = ntohl(rc->pldbuflen_qid);
qid -= adap->sge.ingr_start;
@@ -2279,7 +2302,7 @@ irq_handler_t t4_intr_handler(struct adapter *adap)
static void sge_rx_timer_cb(unsigned long data)
{
unsigned long m;
- unsigned int i, idma_same_state_cnt[2];
+ unsigned int i;
struct adapter *adap = (struct adapter *)data;
struct sge *s = &adap->sge;
@@ -2300,67 +2323,16 @@ static void sge_rx_timer_cb(unsigned long data)
set_bit(id, s->starving_fl);
}
}
+ /* The remainder of the SGE RX Timer Callback routine is dedicated to
+ * global Master PF activities like checking for chip ingress stalls,
+ * etc.
+ */
+ if (!(adap->flags & MASTER_PF))
+ goto done;
- t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13);
- idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A);
- idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
-
- for (i = 0; i < 2; i++) {
- u32 debug0, debug11;
-
- /* If the Ingress DMA Same State Counter ("timer") is less
- * than 1s, then we can reset our synthesized Stall Timer and
- * continue. If we have previously emitted warnings about a
- * potential stalled Ingress Queue, issue a note indicating
- * that the Ingress Queue has resumed forward progress.
- */
- if (idma_same_state_cnt[i] < s->idma_1s_thresh) {
- if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH)
- CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n",
- i, s->idma_qid[i],
- s->idma_stalled[i]/HZ);
- s->idma_stalled[i] = 0;
- continue;
- }
-
- /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
- * domain. The first time we get here it'll be because we
- * passed the 1s Threshold; each additional time it'll be
- * because the RX Timer Callback is being fired on its regular
- * schedule.
- *
- * If the stall is below our Potential Hung Ingress Queue
- * Warning Threshold, continue.
- */
- if (s->idma_stalled[i] == 0)
- s->idma_stalled[i] = HZ;
- else
- s->idma_stalled[i] += RX_QCHECK_PERIOD;
-
- if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH)
- continue;
-
- /* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
- if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0)
- continue;
-
- /* Read and save the SGE IDMA State and Queue ID information.
- * We do this every time in case it changes across time ...
- */
- t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0);
- debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
- s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
-
- t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11);
- debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
- s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
-
- CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
- i, s->idma_qid[i], s->idma_state[i],
- s->idma_stalled[i]/HZ, debug0, debug11);
- t4_sge_decode_idma_state(adap, s->idma_state[i]);
- }
+ t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
+done:
mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
}
@@ -2429,7 +2401,7 @@ static void __iomem *bar2_address(struct adapter *adapter,
u64 bar2_qoffset;
int ret;
- ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype,
+ ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
&bar2_qoffset, pbar2_qid);
if (ret)
return NULL;
@@ -2437,9 +2409,12 @@ static void __iomem *bar2_address(struct adapter *adapter,
return adapter->bar2 + bar2_qoffset;
}
+/* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
+ * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
+ */
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct net_device *dev, int intr_idx,
- struct sge_fl *fl, rspq_handler_t hnd)
+ struct sge_fl *fl, rspq_handler_t hnd, int cong)
{
int ret, flsz = 0;
struct fw_iq_cmd c;
@@ -2457,12 +2432,13 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0));
+ FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
FW_LEN16(c));
c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
- FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) |
+ FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
+ FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
-intr_idx - 1));
c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
@@ -2471,8 +2447,21 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
c.iqsize = htons(iq->size);
c.iqaddr = cpu_to_be64(iq->phys_addr);
+ if (cong >= 0)
+ c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
if (fl) {
+ enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ /* Allocate the ring for the hardware free list (with space
+ * for its status page) along with the associated software
+ * descriptor ring. The free list size needs to be a multiple
+ * of the Egress Queue Unit and at least 2 Egress Units larger
+ * than the SGE's Egress Congrestion Threshold
+ * (fl_starve_thres - 1).
+ */
+ if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
+ fl->size = s->fl_starve_thres - 1 + 2 * 8;
fl->size = roundup(fl->size, 8);
fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
sizeof(struct rx_sw_desc), &fl->addr,
@@ -2481,17 +2470,25 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
goto fl_nomem;
flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
- c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN_F |
- FW_IQ_CMD_FL0FETCHRO_F |
- FW_IQ_CMD_FL0DATARO_F |
- FW_IQ_CMD_FL0PADEN_F);
- c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) |
- FW_IQ_CMD_FL0FBMAX_V(3));
+ c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
+ FW_IQ_CMD_FL0FETCHRO_F |
+ FW_IQ_CMD_FL0DATARO_F |
+ FW_IQ_CMD_FL0PADEN_F);
+ if (cong >= 0)
+ c.iqns_to_fl0congen |=
+ htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
+ FW_IQ_CMD_FL0CONGCIF_F |
+ FW_IQ_CMD_FL0CONGEN_F);
+ c.fl0dcaen_to_fl0cidxfthresh =
+ htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) |
+ FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
+ FETCHBURSTMAX_512B_X :
+ FETCHBURSTMAX_256B_X));
c.fl0size = htons(flsz);
c.fl0addr = cpu_to_be64(fl->addr);
}
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret)
goto err;
@@ -2532,6 +2529,41 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
&fl->bar2_qid);
refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
}
+
+ /* For T5 and later we attempt to set up the Congestion Manager values
+ * of the new RX Ethernet Queue. This should really be handled by
+ * firmware because it's more complex than any host driver wants to
+ * get involved with and it's different per chip and this is almost
+ * certainly wrong. Firmware would be wrong as well, but it would be
+ * a lot easier to fix in one place ... For now we do something very
+ * simple (and hopefully less wrong).
+ */
+ if (!is_t4(adap->params.chip) && cong >= 0) {
+ u32 param, val;
+ int i;
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
+ FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
+ if (cong == 0) {
+ val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
+ } else {
+ val =
+ CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
+ for (i = 0; i < 4; i++) {
+ if (cong & (1 << i))
+ val |=
+ CONMCTXT_CNGCHMAP_V(1 << (i << 2));
+ }
+ }
+ ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+ if (ret)
+ dev_warn(adap->pdev_dev, "Failed to set Congestion"
+ " Manager Context for Ingress Queue %d: %d\n",
+ iq->cntxt_id, -ret);
+ }
+
return 0;
fl_nomem:
@@ -2589,23 +2621,24 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_EQ_ETH_CMD_PFN_V(adap->fn) |
+ FW_EQ_ETH_CMD_PFN_V(adap->pf) |
FW_EQ_ETH_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
FW_EQ_ETH_CMD_VIID_V(pi->viid));
- c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) |
- FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
- FW_EQ_ETH_CMD_FETCHRO_V(1) |
- FW_EQ_ETH_CMD_IQID_V(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) |
- FW_EQ_ETH_CMD_FBMAX_V(3) |
- FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) |
- FW_EQ_ETH_CMD_EQSIZE_V(nentries));
+ c.fetchszm_to_iqid =
+ htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize =
+ htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+ FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_ETH_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
kfree(txq->q.sdesc);
txq->q.sdesc = NULL;
@@ -2637,29 +2670,30 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
sizeof(struct tx_desc), 0, &txq->q.phys_addr,
- NULL, 0, NUMA_NO_NODE);
+ NULL, 0, dev_to_node(adap->pdev_dev));
if (!txq->q.desc)
return -ENOMEM;
c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_EQ_CTRL_CMD_PFN_V(adap->fn) |
+ FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
FW_EQ_CTRL_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
c.physeqid_pkd = htonl(0);
- c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) |
- FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
- FW_EQ_CTRL_CMD_FETCHRO_F |
- FW_EQ_CTRL_CMD_IQID_V(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) |
- FW_EQ_CTRL_CMD_FBMAX_V(3) |
- FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) |
- FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
+ c.fetchszm_to_iqid =
+ htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize =
+ htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+ FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
dma_free_coherent(adap->pdev_dev,
nentries * sizeof(struct tx_desc),
@@ -2697,21 +2731,22 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_EQ_OFLD_CMD_PFN_V(adap->fn) |
+ FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
FW_EQ_OFLD_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
- c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) |
- FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
- FW_EQ_OFLD_CMD_FETCHRO_F |
- FW_EQ_OFLD_CMD_IQID_V(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) |
- FW_EQ_OFLD_CMD_FBMAX_V(3) |
- FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) |
- FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
+ c.fetchszm_to_iqid =
+ htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize =
+ htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
kfree(txq->q.sdesc);
txq->q.sdesc = NULL;
@@ -2750,7 +2785,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
- t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
+ t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
rq->cntxt_id, fl_id, 0xffff);
dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
rq->desc, rq->phys_addr);
@@ -2805,7 +2840,7 @@ void t4_free_sge_resources(struct adapter *adap)
free_rspq_fl(adap, &eq->rspq,
eq->fl.size ? &eq->fl : NULL);
if (etq->q.desc) {
- t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
+ t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
etq->q.cntxt_id);
free_tx_desc(adap, &etq->q, etq->q.in_use, true);
kfree(etq->q.sdesc);
@@ -2824,7 +2859,7 @@ void t4_free_sge_resources(struct adapter *adap)
if (q->q.desc) {
tasklet_kill(&q->qresume_tsk);
- t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
+ t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
q->q.cntxt_id);
free_tx_desc(adap, &q->q, q->q.in_use, false);
kfree(q->q.sdesc);
@@ -2839,7 +2874,7 @@ void t4_free_sge_resources(struct adapter *adap)
if (cq->q.desc) {
tasklet_kill(&cq->qresume_tsk);
- t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
+ t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
cq->q.cntxt_id);
__skb_queue_purge(&cq->sendq);
free_txq(adap, &cq->q);
@@ -3023,7 +3058,11 @@ int t4_sge_init(struct adapter *adap)
* Packing Boundary. T5 introduced the ability to specify these
* separately. The actual Ingress Packet Data alignment boundary
* within Packed Buffer Mode is the maximum of these two
- * specifications.
+ * specifications. (Note that it makes no real practical sense to
+ * have the Pading Boudary be larger than the Packing Boundary but you
+ * could set the chip up that way and, in fact, legacy T4 code would
+ * end doing this because it would initialize the Padding Boundary and
+ * leave the Packing Boundary initialized to 0 (16 bytes).)
*/
ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
INGPADBOUNDARY_SHIFT_X);
@@ -3067,11 +3106,14 @@ int t4_sge_init(struct adapter *adap)
egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
s->fl_starve_thres = 2*egress_threshold + 1;
+ t4_idma_monitor_init(adap, &s->idma_monitor);
+
+ /* Set up timers used for recuring callbacks to process RX and TX
+ * administrative tasks.
+ */
setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
- s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000; /* 1 s */
- s->idma_stalled[0] = 0;
- s->idma_stalled[1] = 0;
+
spin_lock_init(&s->intrq_lock);
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e8578a742..2b52aae7e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -150,7 +150,12 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
*/
void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
{
- u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg);
+ u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ req |= ENABLE_F;
+ else
+ req |= T6_ENABLE_F;
if (is_t4(adap->params.chip))
req |= LOCALCFG_F;
@@ -214,8 +219,8 @@ static void fw_asrt(struct adapter *adap, u32 mbox_addr)
get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
dev_alert(adap->pdev_dev,
"FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
- asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
- ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
+ asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
+ be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
}
static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
@@ -233,13 +238,14 @@ static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
}
/**
- * t4_wr_mbox_meat - send a command to FW through the given mailbox
+ * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
* @adap: the adapter
* @mbox: index of the mailbox to use
* @cmd: the command to write
* @size: command length in bytes
* @rpl: where to optionally store the reply
* @sleep_ok: if true we may sleep while awaiting command completion
+ * @timeout: time to wait for command to finish before timing out
*
* Sends the given command to FW through the selected mailbox and waits
* for the FW to execute the command. If @rpl is not %NULL it is used to
@@ -254,8 +260,8 @@ static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
* command or FW executes it but signals an error. In the latter case
* the return value is the error code indicated by FW (negated).
*/
-int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
- void *rpl, bool sleep_ok)
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl, bool sleep_ok, int timeout)
{
static const int delay[] = {
1, 1, 3, 5, 10, 10, 20, 50, 100, 200
@@ -294,7 +300,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
delay_idx = 0;
ms = delay[0];
- for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
+ for (i = 0; i < timeout; i += ms) {
if (sleep_ok) {
ms = delay[delay_idx]; /* last element may repeat */
if (delay_idx < ARRAY_SIZE(delay) - 1)
@@ -332,114 +338,11 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
return -ETIMEDOUT;
}
-/**
- * t4_mc_read - read from MC through backdoor accesses
- * @adap: the adapter
- * @addr: address of first byte requested
- * @idx: which MC to access
- * @data: 64 bytes of data containing the requested address
- * @ecc: where to store the corresponding 64-bit ECC word
- *
- * Read 64 bytes of data from MC starting at a 64-byte-aligned address
- * that covers the requested address @addr. If @parity is not %NULL it
- * is assigned the 64-bit ECC word for the read data.
- */
-int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
-{
- int i;
- u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
- u32 mc_bist_status_rdata, mc_bist_data_pattern;
-
- if (is_t4(adap->params.chip)) {
- mc_bist_cmd = MC_BIST_CMD_A;
- mc_bist_cmd_addr = MC_BIST_CMD_ADDR_A;
- mc_bist_cmd_len = MC_BIST_CMD_LEN_A;
- mc_bist_status_rdata = MC_BIST_STATUS_RDATA_A;
- mc_bist_data_pattern = MC_BIST_DATA_PATTERN_A;
- } else {
- mc_bist_cmd = MC_REG(MC_P_BIST_CMD_A, idx);
- mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
- mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
- mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
- mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
- }
-
- if (t4_read_reg(adap, mc_bist_cmd) & START_BIST_F)
- return -EBUSY;
- t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
- t4_write_reg(adap, mc_bist_cmd_len, 64);
- t4_write_reg(adap, mc_bist_data_pattern, 0xc);
- t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE_V(1) | START_BIST_F |
- BIST_CMD_GAP_V(1));
- i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST_F, 0, 10, 1);
- if (i)
- return i;
-
-#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
-
- for (i = 15; i >= 0; i--)
- *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
- if (ecc)
- *ecc = t4_read_reg64(adap, MC_DATA(16));
-#undef MC_DATA
- return 0;
-}
-
-/**
- * t4_edc_read - read from EDC through backdoor accesses
- * @adap: the adapter
- * @idx: which EDC to access
- * @addr: address of first byte requested
- * @data: 64 bytes of data containing the requested address
- * @ecc: where to store the corresponding 64-bit ECC word
- *
- * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
- * that covers the requested address @addr. If @parity is not %NULL it
- * is assigned the 64-bit ECC word for the read data.
- */
-int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+ void *rpl, bool sleep_ok)
{
- int i;
- u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
- u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
-
- if (is_t4(adap->params.chip)) {
- edc_bist_cmd = EDC_REG(EDC_BIST_CMD_A, idx);
- edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR_A, idx);
- edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN_A, idx);
- edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN_A,
- idx);
- edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA_A,
- idx);
- } else {
- edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
- edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
- edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
- edc_bist_cmd_data_pattern =
- EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
- edc_bist_status_rdata =
- EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
- }
-
- if (t4_read_reg(adap, edc_bist_cmd) & START_BIST_F)
- return -EBUSY;
- t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
- t4_write_reg(adap, edc_bist_cmd_len, 64);
- t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
- t4_write_reg(adap, edc_bist_cmd,
- BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F);
- i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST_F, 0, 10, 1);
- if (i)
- return i;
-
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
-
- for (i = 15; i >= 0; i--)
- *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
- if (ecc)
- *ecc = t4_read_reg64(adap, EDC_DATA(16));
-#undef EDC_DATA
- return 0;
+ return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
+ FW_CMD_MAX_TIMEOUT);
}
/**
@@ -483,9 +386,8 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
/* Offset into the region of memory which is being accessed
* MEM_EDC0 = 0
* MEM_EDC1 = 1
- * MEM_MC = 2 -- T4
- * MEM_MC0 = 2 -- For T5
- * MEM_MC1 = 3 -- For T5
+ * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
+ * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
*/
edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
if (mtype != MEM_MC1)
@@ -514,7 +416,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
if (is_t4(adap->params.chip))
mem_base -= adap->t4_bar0;
- win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn);
+ win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
/* Calculate our initial PCI-E Memory Window Position and Offset into
* that Window.
@@ -625,6 +527,102 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
return 0;
}
+/* Return the specified PCI-E Configuration Space register from our Physical
+ * Function. We try first via a Firmware LDST Command since we prefer to let
+ * the firmware own all of these registers, but if that fails we go for it
+ * directly ourselves.
+ */
+u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
+{
+ u32 val, ldst_addrspace;
+
+ /* If fw_attach != 0, construct and send the Firmware LDST Command to
+ * retrieve the specified PCI-E Configuration Space register.
+ */
+ struct fw_ldst_cmd ldst_cmd;
+ int ret;
+
+ memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
+ ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ ldst_addrspace);
+ ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
+ ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
+ ldst_cmd.u.pcie.ctrl_to_fn =
+ (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
+ ldst_cmd.u.pcie.r = reg;
+
+ /* If the LDST Command succeeds, return the result, otherwise
+ * fall through to reading it directly ourselves ...
+ */
+ ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
+ &ldst_cmd);
+ if (ret == 0)
+ val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
+ else
+ /* Read the desired Configuration Space register via the PCI-E
+ * Backdoor mechanism.
+ */
+ t4_hw_pci_read_cfg4(adap, reg, &val);
+ return val;
+}
+
+/* Get the window based on base passed to it.
+ * Window aperture is currently unhandled, but there is no use case for it
+ * right now
+ */
+static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
+ u32 memwin_base)
+{
+ u32 ret;
+
+ if (is_t4(adap->params.chip)) {
+ u32 bar0;
+
+ /* Truncation intentional: we only read the bottom 32-bits of
+ * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
+ * mechanism to read BAR0 instead of using
+ * pci_resource_start() because we could be operating from
+ * within a Virtual Machine which is trapping our accesses to
+ * our Configuration Space and we need to set up the PCI-E
+ * Memory Window decoders with the actual addresses which will
+ * be coming across the PCI-E link.
+ */
+ bar0 = t4_read_pcie_cfg4(adap, pci_base);
+ bar0 &= pci_mask;
+ adap->t4_bar0 = bar0;
+
+ ret = bar0 + memwin_base;
+ } else {
+ /* For T5, only relative offset inside the PCIe BAR is passed */
+ ret = memwin_base;
+ }
+ return ret;
+}
+
+/* Get the default utility window (win0) used by everyone */
+u32 t4_get_util_window(struct adapter *adap)
+{
+ return t4_get_window(adap, PCI_BASE_ADDRESS_0,
+ PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
+}
+
+/* Set up memory window for accessing adapter memory ranges. (Read
+ * back MA register to ensure that changes propagate before we attempt
+ * to use the new values.)
+ */
+void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
+{
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
+ memwin_base | BIR_V(0) |
+ WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
+}
+
/**
* t4_get_regs_len - return the size of the chips register set
* @adapter: the adapter
@@ -640,6 +638,7 @@ unsigned int t4_get_regs_len(struct adapter *adapter)
return T4_REGMAP_SIZE;
case CHELSIO_T5:
+ case CHELSIO_T6:
return T5_REGMAP_SIZE;
}
@@ -666,7 +665,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x11fc, 0x123c,
0x1300, 0x173c,
0x1800, 0x18fc,
- 0x3000, 0x30d8,
+ 0x3000, 0x305c,
+ 0x3068, 0x30d8,
0x30e0, 0x5924,
0x5960, 0x59d4,
0x5a00, 0x5af8,
@@ -729,7 +729,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x19238, 0x1924c,
0x193f8, 0x19474,
0x19490, 0x194f8,
- 0x19800, 0x19f30,
+ 0x19800, 0x19f4c,
0x1a000, 0x1a06c,
0x1a0b0, 0x1a120,
0x1a128, 0x1a138,
@@ -878,7 +878,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x27780, 0x2778c,
0x27800, 0x27c38,
0x27c80, 0x27d7c,
- 0x27e00, 0x27e04
+ 0x27e00, 0x27e04,
};
static const unsigned int t5_reg_ranges[] = {
@@ -888,7 +888,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x1280, 0x173c,
0x1800, 0x18fc,
0x3000, 0x3028,
- 0x3060, 0x30d8,
+ 0x3068, 0x30d8,
0x30e0, 0x30fc,
0x3140, 0x357c,
0x35a8, 0x35cc,
@@ -900,7 +900,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x5940, 0x59dc,
0x59fc, 0x5a18,
0x5a60, 0x5a9c,
- 0x5b9c, 0x5bfc,
+ 0x5b94, 0x5bfc,
0x6000, 0x6040,
0x6058, 0x614c,
0x7700, 0x7798,
@@ -1014,27 +1014,30 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x30800, 0x30834,
0x308c0, 0x30908,
0x30910, 0x309ac,
- 0x30a00, 0x30a04,
- 0x30a0c, 0x30a2c,
+ 0x30a00, 0x30a2c,
0x30a44, 0x30a50,
0x30a74, 0x30c24,
+ 0x30d00, 0x30d00,
0x30d08, 0x30d14,
0x30d1c, 0x30d20,
0x30d3c, 0x30d50,
0x31200, 0x3120c,
0x31220, 0x31220,
0x31240, 0x31240,
- 0x31600, 0x31600,
- 0x31608, 0x3160c,
+ 0x31600, 0x3160c,
0x31a00, 0x31a1c,
- 0x31e04, 0x31e20,
+ 0x31e00, 0x31e20,
0x31e38, 0x31e3c,
0x31e80, 0x31e80,
0x31e88, 0x31ea8,
0x31eb0, 0x31eb4,
0x31ec8, 0x31ed4,
0x31fb8, 0x32004,
- 0x32208, 0x3223c,
+ 0x32200, 0x32200,
+ 0x32208, 0x32240,
+ 0x32248, 0x32280,
+ 0x32288, 0x322c0,
+ 0x322c8, 0x322fc,
0x32600, 0x32630,
0x32a00, 0x32abc,
0x32b00, 0x32b70,
@@ -1074,27 +1077,30 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x34800, 0x34834,
0x348c0, 0x34908,
0x34910, 0x349ac,
- 0x34a00, 0x34a04,
- 0x34a0c, 0x34a2c,
+ 0x34a00, 0x34a2c,
0x34a44, 0x34a50,
0x34a74, 0x34c24,
+ 0x34d00, 0x34d00,
0x34d08, 0x34d14,
0x34d1c, 0x34d20,
0x34d3c, 0x34d50,
0x35200, 0x3520c,
0x35220, 0x35220,
0x35240, 0x35240,
- 0x35600, 0x35600,
- 0x35608, 0x3560c,
+ 0x35600, 0x3560c,
0x35a00, 0x35a1c,
- 0x35e04, 0x35e20,
+ 0x35e00, 0x35e20,
0x35e38, 0x35e3c,
0x35e80, 0x35e80,
0x35e88, 0x35ea8,
0x35eb0, 0x35eb4,
0x35ec8, 0x35ed4,
0x35fb8, 0x36004,
- 0x36208, 0x3623c,
+ 0x36200, 0x36200,
+ 0x36208, 0x36240,
+ 0x36248, 0x36280,
+ 0x36288, 0x362c0,
+ 0x362c8, 0x362fc,
0x36600, 0x36630,
0x36a00, 0x36abc,
0x36b00, 0x36b70,
@@ -1134,27 +1140,30 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x38800, 0x38834,
0x388c0, 0x38908,
0x38910, 0x389ac,
- 0x38a00, 0x38a04,
- 0x38a0c, 0x38a2c,
+ 0x38a00, 0x38a2c,
0x38a44, 0x38a50,
0x38a74, 0x38c24,
+ 0x38d00, 0x38d00,
0x38d08, 0x38d14,
0x38d1c, 0x38d20,
0x38d3c, 0x38d50,
0x39200, 0x3920c,
0x39220, 0x39220,
0x39240, 0x39240,
- 0x39600, 0x39600,
- 0x39608, 0x3960c,
+ 0x39600, 0x3960c,
0x39a00, 0x39a1c,
- 0x39e04, 0x39e20,
+ 0x39e00, 0x39e20,
0x39e38, 0x39e3c,
0x39e80, 0x39e80,
0x39e88, 0x39ea8,
0x39eb0, 0x39eb4,
0x39ec8, 0x39ed4,
0x39fb8, 0x3a004,
- 0x3a208, 0x3a23c,
+ 0x3a200, 0x3a200,
+ 0x3a208, 0x3a240,
+ 0x3a248, 0x3a280,
+ 0x3a288, 0x3a2c0,
+ 0x3a2c8, 0x3a2fc,
0x3a600, 0x3a630,
0x3aa00, 0x3aabc,
0x3ab00, 0x3ab70,
@@ -1194,27 +1203,30 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x3c800, 0x3c834,
0x3c8c0, 0x3c908,
0x3c910, 0x3c9ac,
- 0x3ca00, 0x3ca04,
- 0x3ca0c, 0x3ca2c,
+ 0x3ca00, 0x3ca2c,
0x3ca44, 0x3ca50,
0x3ca74, 0x3cc24,
+ 0x3cd00, 0x3cd00,
0x3cd08, 0x3cd14,
0x3cd1c, 0x3cd20,
0x3cd3c, 0x3cd50,
0x3d200, 0x3d20c,
0x3d220, 0x3d220,
0x3d240, 0x3d240,
- 0x3d600, 0x3d600,
- 0x3d608, 0x3d60c,
+ 0x3d600, 0x3d60c,
0x3da00, 0x3da1c,
- 0x3de04, 0x3de20,
+ 0x3de00, 0x3de20,
0x3de38, 0x3de3c,
0x3de80, 0x3de80,
0x3de88, 0x3dea8,
0x3deb0, 0x3deb4,
0x3dec8, 0x3ded4,
0x3dfb8, 0x3e004,
- 0x3e208, 0x3e23c,
+ 0x3e200, 0x3e200,
+ 0x3e208, 0x3e240,
+ 0x3e248, 0x3e280,
+ 0x3e288, 0x3e2c0,
+ 0x3e2c8, 0x3e2fc,
0x3e600, 0x3e630,
0x3ea00, 0x3eabc,
0x3eb00, 0x3eb70,
@@ -1247,7 +1259,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x3fcf0, 0x3fcfc,
0x40000, 0x4000c,
0x40040, 0x40068,
- 0x40080, 0x40144,
+ 0x4007c, 0x40144,
0x40180, 0x4018c,
0x40200, 0x40298,
0x402ac, 0x4033c,
@@ -1275,7 +1287,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x47800, 0x47814,
0x48000, 0x4800c,
0x48040, 0x48068,
- 0x48080, 0x48144,
+ 0x4807c, 0x48144,
0x48180, 0x4818c,
0x48200, 0x48298,
0x482ac, 0x4833c,
@@ -1309,6 +1321,344 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x51300, 0x51308,
};
+ static const unsigned int t6_reg_ranges[] = {
+ 0x1008, 0x114c,
+ 0x1180, 0x11b4,
+ 0x11fc, 0x1250,
+ 0x1280, 0x133c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x302c,
+ 0x3060, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35ec, 0x35ec,
+ 0x3600, 0x5624,
+ 0x56cc, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x58bc,
+ 0x5940, 0x595c,
+ 0x5980, 0x598c,
+ 0x59b0, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a6c,
+ 0x5a80, 0x5a9c,
+ 0x5b94, 0x5bfc,
+ 0x5c10, 0x5ec0,
+ 0x5ec8, 0x5ec8,
+ 0x6000, 0x6040,
+ 0x6058, 0x6154,
+ 0x7700, 0x7798,
+ 0x77c0, 0x7880,
+ 0x78cc, 0x78fc,
+ 0x7b00, 0x7c54,
+ 0x7d00, 0x7efc,
+ 0x8dc0, 0x8de0,
+ 0x8df8, 0x8e84,
+ 0x8ea0, 0x8f88,
+ 0x8fb8, 0x911c,
+ 0x9400, 0x9470,
+ 0x9600, 0x971c,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0xa020,
+ 0xd004, 0xd03c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0xf008,
+ 0x11000, 0x11014,
+ 0x11048, 0x11110,
+ 0x11118, 0x1117c,
+ 0x11190, 0x11260,
+ 0x11300, 0x1130c,
+ 0x12000, 0x1205c,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x19124,
+ 0x19150, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x192b8,
+ 0x193f8, 0x19474,
+ 0x19490, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c80,
+ 0x19c94, 0x19cbc,
+ 0x19ce4, 0x19d28,
+ 0x19d50, 0x19d78,
+ 0x19d94, 0x19dc8,
+ 0x19df0, 0x19e10,
+ 0x19e50, 0x19e6c,
+ 0x19ea0, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fac,
+ 0x19fc4, 0x19fe4,
+ 0x1a000, 0x1a06c,
+ 0x1a0b0, 0x1a120,
+ 0x1a128, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30070,
+ 0x30100, 0x3015c,
+ 0x30190, 0x301d0,
+ 0x30200, 0x30318,
+ 0x30400, 0x3052c,
+ 0x30540, 0x3061c,
+ 0x30800, 0x3088c,
+ 0x308c0, 0x30908,
+ 0x30910, 0x309b8,
+ 0x30a00, 0x30a04,
+ 0x30a0c, 0x30a2c,
+ 0x30a44, 0x30a50,
+ 0x30a74, 0x30c24,
+ 0x30d00, 0x30d3c,
+ 0x30d44, 0x30d7c,
+ 0x30de0, 0x30de0,
+ 0x30e00, 0x30ed4,
+ 0x30f00, 0x30fa4,
+ 0x30fc0, 0x30fc4,
+ 0x31000, 0x31004,
+ 0x31080, 0x310fc,
+ 0x31208, 0x31220,
+ 0x3123c, 0x31254,
+ 0x31300, 0x31300,
+ 0x31308, 0x3131c,
+ 0x31338, 0x3133c,
+ 0x31380, 0x31380,
+ 0x31388, 0x313a8,
+ 0x313b4, 0x313b4,
+ 0x31400, 0x31420,
+ 0x31438, 0x3143c,
+ 0x31480, 0x31480,
+ 0x314a8, 0x314a8,
+ 0x314b0, 0x314b4,
+ 0x314c8, 0x314d4,
+ 0x31a40, 0x31a4c,
+ 0x31af0, 0x31b20,
+ 0x31b38, 0x31b3c,
+ 0x31b80, 0x31b80,
+ 0x31ba8, 0x31ba8,
+ 0x31bb0, 0x31bb4,
+ 0x31bc8, 0x31bd4,
+ 0x32140, 0x3218c,
+ 0x321f0, 0x32200,
+ 0x32218, 0x32218,
+ 0x32400, 0x32400,
+ 0x32408, 0x3241c,
+ 0x32618, 0x32620,
+ 0x32664, 0x32664,
+ 0x326a8, 0x326a8,
+ 0x326ec, 0x326ec,
+ 0x32a00, 0x32abc,
+ 0x32b00, 0x32b78,
+ 0x32c00, 0x32c00,
+ 0x32c08, 0x32c3c,
+ 0x32e00, 0x32e2c,
+ 0x32f00, 0x32f2c,
+ 0x33000, 0x330ac,
+ 0x330c0, 0x331ac,
+ 0x331c0, 0x332c4,
+ 0x332e4, 0x333c4,
+ 0x333e4, 0x334ac,
+ 0x334c0, 0x335ac,
+ 0x335c0, 0x336c4,
+ 0x336e4, 0x337c4,
+ 0x337e4, 0x337fc,
+ 0x33814, 0x33814,
+ 0x33854, 0x33868,
+ 0x33880, 0x3388c,
+ 0x338c0, 0x338d0,
+ 0x338e8, 0x338ec,
+ 0x33900, 0x339ac,
+ 0x339c0, 0x33ac4,
+ 0x33ae4, 0x33b10,
+ 0x33b24, 0x33b50,
+ 0x33bf0, 0x33c10,
+ 0x33c24, 0x33c50,
+ 0x33cf0, 0x33cfc,
+ 0x34000, 0x34070,
+ 0x34100, 0x3415c,
+ 0x34190, 0x341d0,
+ 0x34200, 0x34318,
+ 0x34400, 0x3452c,
+ 0x34540, 0x3461c,
+ 0x34800, 0x3488c,
+ 0x348c0, 0x34908,
+ 0x34910, 0x349b8,
+ 0x34a00, 0x34a04,
+ 0x34a0c, 0x34a2c,
+ 0x34a44, 0x34a50,
+ 0x34a74, 0x34c24,
+ 0x34d00, 0x34d3c,
+ 0x34d44, 0x34d7c,
+ 0x34de0, 0x34de0,
+ 0x34e00, 0x34ed4,
+ 0x34f00, 0x34fa4,
+ 0x34fc0, 0x34fc4,
+ 0x35000, 0x35004,
+ 0x35080, 0x350fc,
+ 0x35208, 0x35220,
+ 0x3523c, 0x35254,
+ 0x35300, 0x35300,
+ 0x35308, 0x3531c,
+ 0x35338, 0x3533c,
+ 0x35380, 0x35380,
+ 0x35388, 0x353a8,
+ 0x353b4, 0x353b4,
+ 0x35400, 0x35420,
+ 0x35438, 0x3543c,
+ 0x35480, 0x35480,
+ 0x354a8, 0x354a8,
+ 0x354b0, 0x354b4,
+ 0x354c8, 0x354d4,
+ 0x35a40, 0x35a4c,
+ 0x35af0, 0x35b20,
+ 0x35b38, 0x35b3c,
+ 0x35b80, 0x35b80,
+ 0x35ba8, 0x35ba8,
+ 0x35bb0, 0x35bb4,
+ 0x35bc8, 0x35bd4,
+ 0x36140, 0x3618c,
+ 0x361f0, 0x36200,
+ 0x36218, 0x36218,
+ 0x36400, 0x36400,
+ 0x36408, 0x3641c,
+ 0x36618, 0x36620,
+ 0x36664, 0x36664,
+ 0x366a8, 0x366a8,
+ 0x366ec, 0x366ec,
+ 0x36a00, 0x36abc,
+ 0x36b00, 0x36b78,
+ 0x36c00, 0x36c00,
+ 0x36c08, 0x36c3c,
+ 0x36e00, 0x36e2c,
+ 0x36f00, 0x36f2c,
+ 0x37000, 0x370ac,
+ 0x370c0, 0x371ac,
+ 0x371c0, 0x372c4,
+ 0x372e4, 0x373c4,
+ 0x373e4, 0x374ac,
+ 0x374c0, 0x375ac,
+ 0x375c0, 0x376c4,
+ 0x376e4, 0x377c4,
+ 0x377e4, 0x377fc,
+ 0x37814, 0x37814,
+ 0x37854, 0x37868,
+ 0x37880, 0x3788c,
+ 0x378c0, 0x378d0,
+ 0x378e8, 0x378ec,
+ 0x37900, 0x379ac,
+ 0x379c0, 0x37ac4,
+ 0x37ae4, 0x37b10,
+ 0x37b24, 0x37b50,
+ 0x37bf0, 0x37c10,
+ 0x37c24, 0x37c50,
+ 0x37cf0, 0x37cfc,
+ 0x40040, 0x40040,
+ 0x40080, 0x40084,
+ 0x40100, 0x40100,
+ 0x40140, 0x401bc,
+ 0x40200, 0x40214,
+ 0x40228, 0x40228,
+ 0x40240, 0x40258,
+ 0x40280, 0x40280,
+ 0x40304, 0x40304,
+ 0x40330, 0x4033c,
+ 0x41304, 0x413dc,
+ 0x41400, 0x4141c,
+ 0x41480, 0x414d0,
+ 0x44000, 0x4407c,
+ 0x440c0, 0x4427c,
+ 0x442c0, 0x4447c,
+ 0x444c0, 0x4467c,
+ 0x446c0, 0x4487c,
+ 0x448c0, 0x44a7c,
+ 0x44ac0, 0x44c7c,
+ 0x44cc0, 0x44e7c,
+ 0x44ec0, 0x4507c,
+ 0x450c0, 0x451fc,
+ 0x45800, 0x45868,
+ 0x45880, 0x45884,
+ 0x458a0, 0x458b0,
+ 0x45a00, 0x45a68,
+ 0x45a80, 0x45a84,
+ 0x45aa0, 0x45ab0,
+ 0x460c0, 0x460e4,
+ 0x47000, 0x4708c,
+ 0x47200, 0x47250,
+ 0x47400, 0x47420,
+ 0x47600, 0x47618,
+ 0x47800, 0x4782c,
+ 0x50000, 0x500cc,
+ 0x50400, 0x50400,
+ 0x50800, 0x508cc,
+ 0x50c00, 0x50c00,
+ 0x51000, 0x510b0,
+ 0x51300, 0x51324,
+ };
+
u32 *buf_end = (u32 *)((char *)buf + buf_size);
const unsigned int *reg_ranges;
int reg_ranges_size, range;
@@ -1328,6 +1678,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
break;
+ case CHELSIO_T6:
+ reg_ranges = t6_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
+ break;
+
default:
dev_err(adap->pdev_dev,
"Unsupported chip version %d\n", chip_version);
@@ -1374,17 +1729,16 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
}
/**
- * get_vpd_params - read VPD parameters from VPD EEPROM
+ * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
* @adapter: adapter to read
* @p: where to store the parameters
*
* Reads card parameters stored in VPD EEPROM.
*/
-int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
- u32 cclk_param, cclk_val;
- int i, ret, addr;
- int ec, sn, pn;
+ int i, ret = 0, addr;
+ int ec, sn, pn, na;
u8 *vpd, csum;
unsigned int vpdr_len, kw_offset, id_len;
@@ -1392,6 +1746,9 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (!vpd)
return -ENOMEM;
+ /* Card information normally starts at VPD_BASE but early cards had
+ * it at 0.
+ */
ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
if (ret < 0)
goto out;
@@ -1457,6 +1814,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
FIND_VPD_KW(ec, "EC");
FIND_VPD_KW(sn, "SN");
FIND_VPD_KW(pn, "PN");
+ FIND_VPD_KW(na, "NA");
#undef FIND_VPD_KW
memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
@@ -1469,18 +1827,42 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->pn, vpd + pn, min(i, PN_LEN));
strim(p->pn);
+ memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
+ strim((char *)p->na);
- /*
- * Ask firmware for the Core Clock since it knows how to translate the
+out:
+ vfree(vpd);
+ return ret;
+}
+
+/**
+ * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
+ * @adapter: adapter to read
+ * @p: where to store the parameters
+ *
+ * Reads card parameters stored in VPD EEPROM and retrieves the Core
+ * Clock. This can only be called after a connection to the firmware
+ * is established.
+ */
+int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+{
+ u32 cclk_param, cclk_val;
+ int ret;
+
+ /* Grab the raw VPD parameters.
+ */
+ ret = t4_get_raw_vpd_params(adapter, p);
+ if (ret)
+ return ret;
+
+ /* Ask firmware for the Core Clock since it knows how to translate the
* Reference Clock ('V2') VPD field into a Core Clock value ...
*/
cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
- ret = t4_query_params(adapter, adapter->mbox, 0, 0,
+ ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
1, &cclk_param, &cclk_val);
-out:
- vfree(vpd);
if (ret)
return ret;
p->cclk = cclk_val;
@@ -1618,7 +2000,7 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
if (ret)
return ret;
if (byte_oriented)
- *data = (__force __u32) (htonl(*data));
+ *data = (__force __u32)(cpu_to_be32(*data));
}
return 0;
}
@@ -1941,7 +2323,8 @@ static bool t4_fw_matches_chip(const struct adapter *adap,
* which will keep us "honest" in the future ...
*/
if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
- (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5))
+ (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
+ (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
return true;
dev_err(adap->pdev_dev,
@@ -1979,7 +2362,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
"FW image size not multiple of 512 bytes\n");
return -EINVAL;
}
- if (ntohs(hdr->len512) * 512 != size) {
+ if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
dev_err(adap->pdev_dev,
"FW image size differs from size in FW header\n");
return -EINVAL;
@@ -1993,7 +2376,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
return -EINVAL;
for (csum = 0, i = 0; i < size / sizeof(csum); i++)
- csum += ntohl(p[i]);
+ csum += be32_to_cpu(p[i]);
if (csum != 0xffffffff) {
dev_err(adap->pdev_dev,
@@ -2012,7 +2395,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
* first page with a bad version.
*/
memcpy(first_page, fw_data, SF_PAGE_SIZE);
- ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
+ ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
if (ret)
goto out;
@@ -2039,6 +2422,147 @@ out:
}
/**
+ * t4_phy_fw_ver - return current PHY firmware version
+ * @adap: the adapter
+ * @phy_fw_ver: return value buffer for PHY firmware version
+ *
+ * Returns the current version of external PHY firmware on the
+ * adapter.
+ */
+int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
+{
+ u32 param, val;
+ int ret;
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+ FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+ FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+ if (ret < 0)
+ return ret;
+ *phy_fw_ver = val;
+ return 0;
+}
+
+/**
+ * t4_load_phy_fw - download port PHY firmware
+ * @adap: the adapter
+ * @win: the PCI-E Memory Window index to use for t4_memory_rw()
+ * @win_lock: the lock to use to guard the memory copy
+ * @phy_fw_version: function to check PHY firmware versions
+ * @phy_fw_data: the PHY firmware image to write
+ * @phy_fw_size: image size
+ *
+ * Transfer the specified PHY firmware to the adapter. If a non-NULL
+ * @phy_fw_version is supplied, then it will be used to determine if
+ * it's necessary to perform the transfer by comparing the version
+ * of any existing adapter PHY firmware with that of the passed in
+ * PHY firmware image. If @win_lock is non-NULL then it will be used
+ * around the call to t4_memory_rw() which transfers the PHY firmware
+ * to the adapter.
+ *
+ * A negative error number will be returned if an error occurs. If
+ * version number support is available and there's no need to upgrade
+ * the firmware, 0 will be returned. If firmware is successfully
+ * transferred to the adapter, 1 will be retured.
+ *
+ * NOTE: some adapters only have local RAM to store the PHY firmware. As
+ * a result, a RESET of the adapter would cause that RAM to lose its
+ * contents. Thus, loading PHY firmware on such adapters must happen
+ * after any FW_RESET_CMDs ...
+ */
+int t4_load_phy_fw(struct adapter *adap,
+ int win, spinlock_t *win_lock,
+ int (*phy_fw_version)(const u8 *, size_t),
+ const u8 *phy_fw_data, size_t phy_fw_size)
+{
+ unsigned long mtype = 0, maddr = 0;
+ u32 param, val;
+ int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
+ int ret;
+
+ /* If we have version number support, then check to see if the adapter
+ * already has up-to-date PHY firmware loaded.
+ */
+ if (phy_fw_version) {
+ new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
+ ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+ if (ret < 0)
+ return ret;
+
+ if (cur_phy_fw_ver >= new_phy_fw_vers) {
+ CH_WARN(adap, "PHY Firmware already up-to-date, "
+ "version %#x\n", cur_phy_fw_ver);
+ return 0;
+ }
+ }
+
+ /* Ask the firmware where it wants us to copy the PHY firmware image.
+ * The size of the file requires a special version of the READ coommand
+ * which will pass the file size via the values field in PARAMS_CMD and
+ * retrieve the return value from firmware and place it in the same
+ * buffer values
+ */
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+ FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+ FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
+ val = phy_fw_size;
+ ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val, 1);
+ if (ret < 0)
+ return ret;
+ mtype = val >> 8;
+ maddr = (val & 0xff) << 16;
+
+ /* Copy the supplied PHY Firmware image to the adapter memory location
+ * allocated by the adapter firmware.
+ */
+ if (win_lock)
+ spin_lock_bh(win_lock);
+ ret = t4_memory_rw(adap, win, mtype, maddr,
+ phy_fw_size, (__be32 *)phy_fw_data,
+ T4_MEMORY_WRITE);
+ if (win_lock)
+ spin_unlock_bh(win_lock);
+ if (ret)
+ return ret;
+
+ /* Tell the firmware that the PHY firmware image has been written to
+ * RAM and it can now start copying it over to the PHYs. The chip
+ * firmware will RESET the affected PHYs as part of this operation
+ * leaving them running the new PHY firmware image.
+ */
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+ FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+ FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
+ ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val, 30000);
+
+ /* If we have version number support, then check to see that the new
+ * firmware got loaded properly.
+ */
+ if (phy_fw_version) {
+ ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+ if (ret < 0)
+ return ret;
+
+ if (cur_phy_fw_ver != new_phy_fw_vers) {
+ CH_WARN(adap, "PHY Firmware did not update: "
+ "version on adapter %#x, "
+ "version flashed %#x\n",
+ cur_phy_fw_ver, new_phy_fw_vers);
+ return -ENXIO;
+ }
+ }
+
+ return 1;
+}
+
+/**
* t4_fwcache - firmware cache operation
* @adap: the adapter
* @op : the operation (flush or flush and invalidate)
@@ -2051,7 +2575,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
c.op_to_vfn =
cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
- FW_PARAMS_CMD_PFN_V(adap->fn) |
+ FW_PARAMS_CMD_PFN_V(adap->pf) |
FW_PARAMS_CMD_VFN_V(0));
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
c.param[0].mnem =
@@ -2062,6 +2586,61 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
}
+void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
+ unsigned int *pif_req_wrptr,
+ unsigned int *pif_rsp_wrptr)
+{
+ int i, j;
+ u32 cfg, val, req, rsp;
+
+ cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
+ if (cfg & LADBGEN_F)
+ t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
+
+ val = t4_read_reg(adap, CIM_DEBUGSTS_A);
+ req = POLADBGWRPTR_G(val);
+ rsp = PILADBGWRPTR_G(val);
+ if (pif_req_wrptr)
+ *pif_req_wrptr = req;
+ if (pif_rsp_wrptr)
+ *pif_rsp_wrptr = rsp;
+
+ for (i = 0; i < CIM_PIFLA_SIZE; i++) {
+ for (j = 0; j < 6; j++) {
+ t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
+ PILADBGRDPTR_V(rsp));
+ *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
+ *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
+ req++;
+ rsp++;
+ }
+ req = (req + 2) & POLADBGRDPTR_M;
+ rsp = (rsp + 2) & PILADBGRDPTR_M;
+ }
+ t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
+}
+
+void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
+{
+ u32 cfg;
+ int i, j, idx;
+
+ cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
+ if (cfg & LADBGEN_F)
+ t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
+
+ for (i = 0; i < CIM_MALA_SIZE; i++) {
+ for (j = 0; j < 5; j++) {
+ idx = 8 * i + j;
+ t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
+ PILADBGRDPTR_V(idx));
+ *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
+ *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
+ }
+ }
+ t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
+}
+
void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
{
unsigned int i, j;
@@ -2082,7 +2661,7 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
FW_PORT_CAP_ANEG)
/**
- * t4_link_start - apply link configuration to MAC/PHY
+ * t4_link_l1cfg - apply link configuration to MAC/PHY
* @phy: the PHY to setup
* @mac: the MAC to setup
* @lc: the requested link configuration
@@ -2094,7 +2673,7 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
* - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
* otherwise do it later based on the outcome of auto-negotiation.
*/
-int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc)
{
struct fw_port_cmd c;
@@ -2107,19 +2686,22 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
fc |= FW_PORT_CAP_FC_TX;
memset(&c, 0, sizeof(c));
- c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
- c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
- FW_LEN16(c));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_PORT_CMD_PORTID_V(port));
+ c.action_to_len16 =
+ cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+ FW_LEN16(c));
if (!(lc->supported & FW_PORT_CAP_ANEG)) {
- c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
+ c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
+ fc);
lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
} else if (lc->autoneg == AUTONEG_DISABLE) {
- c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
+ c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
} else
- c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
+ c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -2137,11 +2719,13 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
struct fw_port_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
- c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
- FW_LEN16(c));
- c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_PORT_CMD_PORTID_V(port));
+ c.action_to_len16 =
+ cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+ FW_LEN16(c));
+ c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -2335,6 +2919,7 @@ static void tp_intr_handler(struct adapter *adapter)
static void sge_intr_handler(struct adapter *adapter)
{
u64 v;
+ u32 err;
static const struct intr_info sge_intr_info[] = {
{ ERR_CPL_EXCEED_IQE_SIZE_F,
@@ -2343,8 +2928,6 @@ static void sge_intr_handler(struct adapter *adapter)
"SGE GTS CIDX increment too large", -1, 0 },
{ ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
{ DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
- { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
- { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
{ ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
"SGE IQID > 1023 received CPL for FL", -1, 0 },
{ ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
@@ -2357,13 +2940,19 @@ static void sge_intr_handler(struct adapter *adapter)
0 },
{ ERR_ING_CTXT_PRIO_F,
"SGE too many priority ingress contexts", -1, 0 },
- { ERR_EGR_CTXT_PRIO_F,
- "SGE too many priority egress contexts", -1, 0 },
{ INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
{ EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
{ 0 }
};
+ static struct intr_info t4t5_sge_intr_info[] = {
+ { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
+ { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
+ { ERR_EGR_CTXT_PRIO_F,
+ "SGE too many priority egress contexts", -1, 0 },
+ { 0 }
+ };
+
v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
if (v) {
@@ -2373,8 +2962,23 @@ static void sge_intr_handler(struct adapter *adapter)
t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
}
- if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
- v != 0)
+ v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
+ t4t5_sge_intr_info);
+
+ err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
+ if (err & ERROR_QID_VALID_F) {
+ dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
+ ERROR_QID_G(err));
+ if (err & UNCAPTURED_ERROR_F)
+ dev_err(adapter->pdev_dev,
+ "SGE UNCAPTURED_ERROR set (clearing)\n");
+ t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
+ UNCAPTURED_ERROR_F);
+ }
+
+ if (v != 0)
t4_fatal_err(adapter);
}
@@ -2547,6 +3151,7 @@ static void cplsw_intr_handler(struct adapter *adapter)
*/
static void le_intr_handler(struct adapter *adap)
{
+ enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
static const struct intr_info le_intr_info[] = {
{ LIPMISS_F, "LE LIP miss", -1, 0 },
{ LIP0_F, "LE 0 LIP error", -1, 0 },
@@ -2556,7 +3161,18 @@ static void le_intr_handler(struct adapter *adap)
{ 0 }
};
- if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info))
+ static struct intr_info t6_le_intr_info[] = {
+ { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
+ { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
+ { TCAMINTPERR_F, "LE parity error", -1, 1 },
+ { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+ { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
+ (chip <= CHELSIO_T5) ?
+ le_intr_info : t6_le_intr_info))
t4_fatal_err(adap);
}
@@ -2825,7 +3441,7 @@ int t4_slow_intr_handler(struct adapter *adapter)
pcie_intr_handler(adapter);
if (cause & MC_F)
mem_intr_handler(adapter, MEM_MC);
- if (!is_t4(adapter->params.chip) && (cause & MC1_S))
+ if (is_t5(adapter->params.chip) && (cause & MC1_F))
mem_intr_handler(adapter, MEM_MC1);
if (cause & EDC0_F)
mem_intr_handler(adapter, MEM_EDC0);
@@ -2871,17 +3487,18 @@ int t4_slow_intr_handler(struct adapter *adapter)
*/
void t4_intr_enable(struct adapter *adapter)
{
+ u32 val = 0;
u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
- ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
+ ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
- ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
- DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
- EGRESS_SIZE_ERR_F);
+ DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
}
@@ -2945,18 +3562,18 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
struct fw_rss_ind_tbl_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
FW_RSS_IND_TBL_CMD_VIID_V(viid));
- cmd.retval_len16 = htonl(FW_LEN16(cmd));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
while (n > 0) {
int nq = min(n, 32);
__be32 *qp = &cmd.iq0_to_iq2;
- cmd.niqid = htons(nq);
- cmd.startidx = htons(start);
+ cmd.niqid = cpu_to_be16(nq);
+ cmd.startidx = cpu_to_be16(start);
start += nq;
n -= nq;
@@ -2974,7 +3591,7 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
if (++rsp >= rsp_end)
rsp = rspq;
- *qp++ = htonl(v);
+ *qp++ = cpu_to_be32(v);
nq -= 3;
}
@@ -3000,20 +3617,46 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
struct fw_rss_glb_config_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
- c.retval_len16 = htonl(FW_LEN16(c));
+ c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
- c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
+ c.u.manual.mode_pkd =
+ cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
c.u.basicvirtual.mode_pkd =
- htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
- c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
+ cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
+ c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
} else
return -EINVAL;
return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
}
+/**
+ * t4_config_vi_rss - configure per VI RSS settings
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: the VI id
+ * @flags: RSS flags
+ * @defq: id of the default RSS queue for the VI.
+ *
+ * Configures VI-specific RSS properties.
+ */
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ unsigned int flags, unsigned int defq)
+{
+ struct fw_rss_vi_config_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
+ FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
+ return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+}
+
/* Read an RSS table row */
static int rd_rss_row(struct adapter *adap, int row, u32 *val)
{
@@ -3045,6 +3688,40 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
}
/**
+ * t4_fw_tp_pio_rw - Access TP PIO through LDST
+ * @adap: the adapter
+ * @vals: where the indirect register values are stored/written
+ * @nregs: how many indirect registers to read/write
+ * @start_idx: index of first indirect register to read/write
+ * @rw: Read (1) or Write (0)
+ *
+ * Access TP PIO registers through LDST
+ */
+static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+ unsigned int start_index, unsigned int rw)
+{
+ int ret, i;
+ int cmd = FW_LDST_ADDRSPC_TP_PIO;
+ struct fw_ldst_cmd c;
+
+ for (i = 0 ; i < nregs; i++) {
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ (rw ? FW_CMD_READ_F :
+ FW_CMD_WRITE_F) |
+ FW_LDST_CMD_ADDRSPACE_V(cmd));
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+
+ c.u.addrval.addr = cpu_to_be32(start_index + i);
+ c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (!ret && rw)
+ vals[i] = be32_to_cpu(c.u.addrval.val);
+ }
+}
+
+/**
* t4_read_rss_key - read the global RSS key
* @adap: the adapter
* @key: 10-entry array holding the 320-bit RSS key
@@ -3053,8 +3730,11 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
*/
void t4_read_rss_key(struct adapter *adap, u32 *key)
{
- t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
- TP_RSS_SECRET_KEY0_A);
+ if (adap->flags & FW_OK)
+ t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
+ else
+ t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+ TP_RSS_SECRET_KEY0_A);
}
/**
@@ -3069,11 +3749,32 @@ void t4_read_rss_key(struct adapter *adap, u32 *key)
*/
void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
{
- t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
- TP_RSS_SECRET_KEY0_A);
- if (idx >= 0 && idx < 16)
- t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
- KEYWRADDR_V(idx) | KEYWREN_F);
+ u8 rss_key_addr_cnt = 16;
+ u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
+
+ /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
+ * allows access to key addresses 16-63 by using KeyWrAddrX
+ * as index[5:4](upper 2) into key table
+ */
+ if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
+ (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
+ rss_key_addr_cnt = 32;
+
+ if (adap->flags & FW_OK)
+ t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
+ else
+ t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+ TP_RSS_SECRET_KEY0_A);
+
+ if (idx >= 0 && idx < rss_key_addr_cnt) {
+ if (rss_key_addr_cnt > 16)
+ t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
+ KEYWRADDRX_V(idx >> 4) |
+ T6_VFWRADDR_V(idx) | KEYWREN_F);
+ else
+ t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
+ KEYWRADDR_V(idx) | KEYWREN_F);
+ }
}
/**
@@ -3088,8 +3789,12 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
u32 *valp)
{
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- valp, 1, TP_RSS_PF0_CONFIG_A + index);
+ if (adapter->flags & FW_OK)
+ t4_fw_tp_pio_rw(adapter, valp, 1,
+ TP_RSS_PF0_CONFIG_A + index, 1);
+ else
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ valp, 1, TP_RSS_PF0_CONFIG_A + index);
}
/**
@@ -3107,8 +3812,13 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
{
u32 vrt, mask, data;
- mask = VFWRADDR_V(VFWRADDR_M);
- data = VFWRADDR_V(index);
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
+ mask = VFWRADDR_V(VFWRADDR_M);
+ data = VFWRADDR_V(index);
+ } else {
+ mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
+ data = T6_VFWRADDR_V(index);
+ }
/* Request that the index'th VF Table values be read into VFL/VFH.
*/
@@ -3119,10 +3829,15 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
/* Grab the VFL/VFH values ...
*/
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- vfl, 1, TP_RSS_VFL_CONFIG_A);
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- vfh, 1, TP_RSS_VFH_CONFIG_A);
+ if (adapter->flags & FW_OK) {
+ t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
+ t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
+ } else {
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ vfl, 1, TP_RSS_VFL_CONFIG_A);
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ vfh, 1, TP_RSS_VFH_CONFIG_A);
+ }
}
/**
@@ -3135,8 +3850,11 @@ u32 t4_read_rss_pf_map(struct adapter *adapter)
{
u32 pfmap;
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &pfmap, 1, TP_RSS_PF_MAP_A);
+ if (adapter->flags & FW_OK)
+ t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
+ else
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &pfmap, 1, TP_RSS_PF_MAP_A);
return pfmap;
}
@@ -3150,8 +3868,11 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter)
{
u32 pfmask;
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &pfmask, 1, TP_RSS_PF_MSK_A);
+ if (adapter->flags & FW_OK)
+ t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
+ else
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &pfmask, 1, TP_RSS_PF_MSK_A);
return pfmask;
}
@@ -3176,18 +3897,18 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
if (v4) {
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
- v4->tcpOutRsts = STAT(OUT_RST);
- v4->tcpInSegs = STAT64(IN_SEG);
- v4->tcpOutSegs = STAT64(OUT_SEG);
- v4->tcpRetransSegs = STAT64(RXT_SEG);
+ v4->tcp_out_rsts = STAT(OUT_RST);
+ v4->tcp_in_segs = STAT64(IN_SEG);
+ v4->tcp_out_segs = STAT64(OUT_SEG);
+ v4->tcp_retrans_segs = STAT64(RXT_SEG);
}
if (v6) {
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
- v6->tcpOutRsts = STAT(OUT_RST);
- v6->tcpInSegs = STAT64(IN_SEG);
- v6->tcpOutSegs = STAT64(OUT_SEG);
- v6->tcpRetransSegs = STAT64(RXT_SEG);
+ v6->tcp_out_rsts = STAT(OUT_RST);
+ v6->tcp_in_segs = STAT64(IN_SEG);
+ v6->tcp_out_segs = STAT64(OUT_SEG);
+ v6->tcp_retrans_segs = STAT64(RXT_SEG);
}
#undef STAT64
#undef STAT
@@ -3195,6 +3916,130 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
}
/**
+ * t4_tp_get_err_stats - read TP's error MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's error counters.
+ */
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
+{
+ /* T6 and later has 2 channels */
+ if (adap->params.arch.nchan == NCHAN) {
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_cong_drops, 8,
+ TP_MIB_TNL_CNG_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_tx_drops, 4,
+ TP_MIB_TNL_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_vlan_drops, 4,
+ TP_MIB_OFD_VLN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp6_in_errs, 4,
+ TP_MIB_TCP_V6IN_ERR_0_A);
+ } else {
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_cong_drops, 2,
+ TP_MIB_TNL_CNG_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_chan_drops, 2,
+ TP_MIB_OFD_CHN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_vlan_drops, 2,
+ TP_MIB_OFD_VLN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A);
+ }
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
+}
+
+/**
+ * t4_tp_get_cpl_stats - read TP's CPL MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's CPL counters.
+ */
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
+{
+ /* T6 and later has 2 channels */
+ if (adap->params.arch.nchan == NCHAN) {
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+ 8, TP_MIB_CPL_IN_REQ_0_A);
+ } else {
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+ 2, TP_MIB_CPL_IN_REQ_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
+ 2, TP_MIB_CPL_OUT_RSP_0_A);
+ }
+}
+
+/**
+ * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's RDMA counters.
+ */
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
+{
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt,
+ 2, TP_MIB_RQE_DFR_PKT_A);
+}
+
+/**
+ * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
+ * @adap: the adapter
+ * @idx: the port index
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's FCoE counters for the selected port.
+ */
+void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+ struct tp_fcoe_stats *st)
+{
+ u32 val[2];
+
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp,
+ 1, TP_MIB_FCOE_DDP_0_A + idx);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop,
+ 1, TP_MIB_FCOE_DROP_0_A + idx);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
+ 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx);
+ st->octets_ddp = ((u64)val[0] << 32) | val[1];
+}
+
+/**
+ * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's counters for non-TCP directly-placed packets.
+ */
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
+{
+ u32 val[4];
+
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4,
+ TP_MIB_USM_PKTS_A);
+ st->frames = val[0];
+ st->drops = val[1];
+ st->octets = ((u64)val[2] << 32) | val[3];
+}
+
+/**
* t4_read_mtu_tbl - returns the values in the HW path MTU table
* @adap: the adapter
* @mtus: where to store the MTU values
@@ -3346,6 +4191,52 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
}
}
+/* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
+ * clocks. The formula is
+ *
+ * bytes/s = bytes256 * 256 * ClkFreq / 4096
+ *
+ * which is equivalent to
+ *
+ * bytes/s = 62.5 * bytes256 * ClkFreq_ms
+ */
+static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
+{
+ u64 v = bytes256 * adap->params.vpd.cclk;
+
+ return v * 62 + v / 2;
+}
+
+/**
+ * t4_get_chan_txrate - get the current per channel Tx rates
+ * @adap: the adapter
+ * @nic_rate: rates for NIC traffic
+ * @ofld_rate: rates for offloaded traffic
+ *
+ * Return the current Tx rates in bytes/s for NIC and offloaded traffic
+ * for each channel.
+ */
+void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
+{
+ u32 v;
+
+ v = t4_read_reg(adap, TP_TX_TRATE_A);
+ nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
+ nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
+ if (adap->params.arch.nchan == NCHAN) {
+ nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
+ nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
+ }
+
+ v = t4_read_reg(adap, TP_TX_ORATE_A);
+ ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
+ ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
+ if (adap->params.arch.nchan == NCHAN) {
+ ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
+ ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
+ }
+}
+
/**
* t4_pmtx_get_stats - returns the HW stats from PMTX
* @adap: the adapter
@@ -3401,7 +4292,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
}
/**
- * get_mps_bg_map - return the buffer groups associated with a port
+ * t4_get_mps_bg_map - return the buffer groups associated with a port
* @adap: the adapter
* @idx: the port index
*
@@ -3409,7 +4300,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
* with the given port. Bit i is set if buffer group i is used by the
* port.
*/
-static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
+unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
{
u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
@@ -3451,6 +4342,28 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
}
/**
+ * t4_get_port_stats_offset - collect port stats relative to a previous
+ * snapshot
+ * @adap: The adapter
+ * @idx: The port
+ * @stats: Current stats to fill
+ * @offset: Previous stats snapshot
+ */
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+ struct port_stats *stats,
+ struct port_stats *offset)
+{
+ u64 *s, *o;
+ int i;
+
+ t4_get_port_stats(adap, idx, stats);
+ for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
+ i < (sizeof(struct port_stats) / sizeof(u64));
+ i++, s++, o++)
+ *s -= *o;
+}
+
+/**
* t4_get_port_stats - collect port statistics
* @adap: the adapter
* @idx: the port index
@@ -3460,7 +4373,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
*/
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
{
- u32 bgmap = get_mps_bg_map(adap, idx);
+ u32 bgmap = t4_get_mps_bg_map(adap, idx);
#define GET_STAT(name) \
t4_read_reg64(adap, \
@@ -3534,103 +4447,51 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
}
/**
- * t4_wol_magic_enable - enable/disable magic packet WoL
- * @adap: the adapter
- * @port: the physical port index
- * @addr: MAC address expected in magic packets, %NULL to disable
- *
- * Enables/disables magic packet wake-on-LAN for the selected port.
- */
-void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
- const u8 *addr)
-{
- u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
-
- if (is_t4(adap->params.chip)) {
- mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
- mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
- port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
- } else {
- mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
- mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
- port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
- }
-
- if (addr) {
- t4_write_reg(adap, mag_id_reg_l,
- (addr[2] << 24) | (addr[3] << 16) |
- (addr[4] << 8) | addr[5]);
- t4_write_reg(adap, mag_id_reg_h,
- (addr[0] << 8) | addr[1]);
- }
- t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F,
- addr ? MAGICEN_F : 0);
-}
-
-/**
- * t4_wol_pat_enable - enable/disable pattern-based WoL
+ * t4_get_lb_stats - collect loopback port statistics
* @adap: the adapter
- * @port: the physical port index
- * @map: bitmap of which HW pattern filters to set
- * @mask0: byte mask for bytes 0-63 of a packet
- * @mask1: byte mask for bytes 64-127 of a packet
- * @crc: Ethernet CRC for selected bytes
- * @enable: enable/disable switch
+ * @idx: the loopback port index
+ * @p: the stats structure to fill
*
- * Sets the pattern filters indicated in @map to mask out the bytes
- * specified in @mask0/@mask1 in received packets and compare the CRC of
- * the resulting packet against @crc. If @enable is %true pattern-based
- * WoL is enabled, otherwise disabled.
+ * Return HW statistics for the given loopback port.
*/
-int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
- u64 mask0, u64 mask1, unsigned int crc, bool enable)
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
{
- int i;
- u32 port_cfg_reg;
-
- if (is_t4(adap->params.chip))
- port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
- else
- port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
-
- if (!enable) {
- t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0);
- return 0;
- }
- if (map > 0xff)
- return -EINVAL;
+ u32 bgmap = t4_get_mps_bg_map(adap, idx);
-#define EPIO_REG(name) \
+#define GET_STAT(name) \
+ t4_read_reg64(adap, \
(is_t4(adap->params.chip) ? \
- PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \
- T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A))
-
- t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
- t4_write_reg(adap, EPIO_REG(DATA2), mask1);
- t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
-
- for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
- if (!(map & 1))
- continue;
+ PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
+ T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
+#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
- /* write byte masks */
- t4_write_reg(adap, EPIO_REG(DATA0), mask0);
- t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F);
- t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
- return -ETIMEDOUT;
-
- /* write CRC */
- t4_write_reg(adap, EPIO_REG(DATA0), crc);
- t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F);
- t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
- return -ETIMEDOUT;
- }
-#undef EPIO_REG
+ p->octets = GET_STAT(BYTES);
+ p->frames = GET_STAT(FRAMES);
+ p->bcast_frames = GET_STAT(BCAST);
+ p->mcast_frames = GET_STAT(MCAST);
+ p->ucast_frames = GET_STAT(UCAST);
+ p->error_frames = GET_STAT(ERROR);
+
+ p->frames_64 = GET_STAT(64B);
+ p->frames_65_127 = GET_STAT(65B_127B);
+ p->frames_128_255 = GET_STAT(128B_255B);
+ p->frames_256_511 = GET_STAT(256B_511B);
+ p->frames_512_1023 = GET_STAT(512B_1023B);
+ p->frames_1024_1518 = GET_STAT(1024B_1518B);
+ p->frames_1519_max = GET_STAT(1519B_MAX);
+ p->drop = GET_STAT(DROP_FRAMES);
+
+ p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
+ p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
+ p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
+ p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
+ p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
+ p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
+ p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
+ p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
- t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F);
- return 0;
+#undef GET_STAT
+#undef GET_STAT_COM
}
/* t4_mk_filtdelwr - create a delete filter WR
@@ -3644,33 +4505,38 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
{
memset(wr, 0, sizeof(*wr));
- wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
- wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16));
- wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) |
- FW_FILTER_WR_NOREPLY_V(qid < 0));
- wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F);
+ wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
+ wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
+ wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
+ FW_FILTER_WR_NOREPLY_V(qid < 0));
+ wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
if (qid >= 0)
- wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid));
+ wr->rx_chan_rx_rpl_iq =
+ cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
}
#define INIT_CMD(var, cmd, rd_wr) do { \
- (var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \
- FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \
- (var).retval_len16 = htonl(FW_LEN16(var)); \
+ (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
+ FW_CMD_REQUEST_F | \
+ FW_CMD_##rd_wr##_F); \
+ (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
} while (0)
int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val)
{
+ u32 ldst_addrspace;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F |
- FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE));
- c.cycles_to_len16 = htonl(FW_LEN16(c));
- c.u.addrval.addr = htonl(addr);
- c.u.addrval.val = htonl(val);
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ ldst_addrspace);
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.addrval.addr = cpu_to_be32(addr);
+ c.u.addrval.val = cpu_to_be32(val);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3690,19 +4556,22 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 *valp)
{
int ret;
+ u32 ldst_addrspace;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
- c.cycles_to_len16 = htonl(FW_LEN16(c));
- c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
- FW_LDST_CMD_MMD_V(mmd));
- c.u.mdio.raddr = htons(reg);
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ ldst_addrspace);
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
+ FW_LDST_CMD_MMD_V(mmd));
+ c.u.mdio.raddr = cpu_to_be16(reg);
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0)
- *valp = ntohs(c.u.mdio.rval);
+ *valp = be16_to_cpu(c.u.mdio.rval);
return ret;
}
@@ -3720,16 +4589,19 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 val)
{
+ u32 ldst_addrspace;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
- c.cycles_to_len16 = htonl(FW_LEN16(c));
- c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
- FW_LDST_CMD_MMD_V(mmd));
- c.u.mdio.raddr = htons(reg);
- c.u.mdio.rval = htons(val);
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ ldst_addrspace);
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
+ FW_LDST_CMD_MMD_V(mmd));
+ c.u.mdio.raddr = cpu_to_be16(reg);
+ c.u.mdio.rval = cpu_to_be16(val);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3841,6 +4713,32 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
}
/**
+ * t4_sge_ctxt_flush - flush the SGE context cache
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ *
+ * Issues a FW command through the given mailbox to flush the
+ * SGE context cache.
+ */
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
+{
+ int ret;
+ u32 ldst_addrspace;
+ struct fw_ldst_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ ldst_addrspace);
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ return ret;
+}
+
+/**
* t4_fw_hello - establish communication with FW
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -3863,11 +4761,11 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
retry:
memset(&c, 0, sizeof(c));
INIT_CMD(c, HELLO, WRITE);
- c.err_to_clearinit = htonl(
+ c.err_to_clearinit = cpu_to_be32(
FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
- FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ? mbox :
- FW_HELLO_CMD_MBMASTER_M) |
+ FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
+ mbox : FW_HELLO_CMD_MBMASTER_M) |
FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
FW_HELLO_CMD_CLEARINIT_F);
@@ -3888,7 +4786,7 @@ retry:
return ret;
}
- v = ntohl(c.err_to_clearinit);
+ v = be32_to_cpu(c.err_to_clearinit);
master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
if (state) {
if (v & FW_HELLO_CMD_ERR_F)
@@ -4017,7 +4915,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
memset(&c, 0, sizeof(c));
INIT_CMD(c, RESET, WRITE);
- c.val = htonl(reset);
+ c.val = cpu_to_be32(reset);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4050,8 +4948,8 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
memset(&c, 0, sizeof(c));
INIT_CMD(c, RESET, WRITE);
- c.val = htonl(PIORST_F | PIORSTMODE_F);
- c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
+ c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
+ c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4190,7 +5088,7 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
* the newly loaded firmware will handle this right by checking
* its header flags to see if it advertises the capability.
*/
- reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
+ reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
return t4_fw_restart(adap, mbox, reset);
}
@@ -4321,7 +5219,7 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
}
/**
- * t4_query_params - query FW or device parameters
+ * t4_query_params_rw - query FW or device parameters
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF
@@ -4329,13 +5227,14 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
* @nparams: the number of parameters
* @params: the parameter names
* @val: the parameter values
+ * @rw: Write and read flag
*
* Reads the value of FW or device parameters. Up to 7 parameters can be
* queried at once.
*/
-int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
- unsigned int vf, unsigned int nparams, const u32 *params,
- u32 *val)
+int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val, int rw)
{
int i, ret;
struct fw_params_cmd c;
@@ -4345,22 +5244,35 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
return -EINVAL;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) |
- FW_PARAMS_CMD_VFN_V(vf));
- c.retval_len16 = htonl(FW_LEN16(c));
- for (i = 0; i < nparams; i++, p += 2)
- *p = htonl(*params++);
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_PARAMS_CMD_PFN_V(pf) |
+ FW_PARAMS_CMD_VFN_V(vf));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+
+ for (i = 0; i < nparams; i++) {
+ *p++ = cpu_to_be32(*params++);
+ if (rw)
+ *p = cpu_to_be32(*(val + i));
+ p++;
+ }
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0)
for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
- *val++ = ntohl(*p);
+ *val++ = be32_to_cpu(*p);
return ret;
}
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val)
+{
+ return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
+}
+
/**
- * t4_set_params_nosleep - sets FW or device parameters
+ * t4_set_params_timeout - sets FW or device parameters
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF
@@ -4368,15 +5280,15 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
* @nparams: the number of parameters
* @params: the parameter names
* @val: the parameter values
+ * @timeout: the timeout time
*
- * Does not ever sleep
* Sets the value of FW or device parameters. Up to 7 parameters can be
* specified at once.
*/
-int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
unsigned int pf, unsigned int vf,
unsigned int nparams, const u32 *params,
- const u32 *val)
+ const u32 *val, int timeout)
{
struct fw_params_cmd c;
__be32 *p = &c.param[0].mnem;
@@ -4386,9 +5298,9 @@ int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
- FW_PARAMS_CMD_PFN_V(pf) |
- FW_PARAMS_CMD_VFN_V(vf));
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_PARAMS_CMD_PFN_V(pf) |
+ FW_PARAMS_CMD_VFN_V(vf));
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
while (nparams--) {
@@ -4396,7 +5308,7 @@ int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
*p++ = cpu_to_be32(*val++);
}
- return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+ return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
}
/**
@@ -4416,23 +5328,8 @@ int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
const u32 *val)
{
- struct fw_params_cmd c;
- __be32 *p = &c.param[0].mnem;
-
- if (nparams > 7)
- return -EINVAL;
-
- memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) |
- FW_PARAMS_CMD_VFN_V(vf));
- c.retval_len16 = htonl(FW_LEN16(c));
- while (nparams--) {
- *p++ = htonl(*params++);
- *p++ = htonl(*val++);
- }
-
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
+ FW_CMD_MAX_TIMEOUT);
}
/**
@@ -4465,20 +5362,21 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_pfvf_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
- FW_PFVF_CMD_VFN_V(vf));
- c.retval_len16 = htonl(FW_LEN16(c));
- c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
- FW_PFVF_CMD_NIQ_V(rxq));
- c.type_to_neq = htonl(FW_PFVF_CMD_CMASK_V(cmask) |
- FW_PFVF_CMD_PMASK_V(pmask) |
- FW_PFVF_CMD_NEQ_V(txq));
- c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC_V(tc) | FW_PFVF_CMD_NVI_V(vi) |
- FW_PFVF_CMD_NEXACTF_V(nexact));
- c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS_V(rcaps) |
- FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
- FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
+ FW_PFVF_CMD_VFN_V(vf));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
+ FW_PFVF_CMD_NIQ_V(rxq));
+ c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
+ FW_PFVF_CMD_PMASK_V(pmask) |
+ FW_PFVF_CMD_NEQ_V(txq));
+ c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
+ FW_PFVF_CMD_NVI_V(vi) |
+ FW_PFVF_CMD_NEXACTF_V(nexact));
+ c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
+ FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
+ FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4507,10 +5405,10 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
struct fw_vi_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
- c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+ FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
c.portid_pkd = FW_VI_CMD_PORTID_V(port);
c.nmac = nmac - 1;
@@ -4532,8 +5430,35 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
}
}
if (rss_size)
- *rss_size = FW_VI_CMD_RSSSIZE_G(ntohs(c.rsssize_pkd));
- return FW_VI_CMD_VIID_G(ntohs(c.type_viid));
+ *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
+ return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
+}
+
+/**
+ * t4_free_vi - free a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @viid: virtual interface identifiler
+ *
+ * Free a previously allocated virtual interface.
+ */
+int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int viid)
+{
+ struct fw_vi_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F |
+ FW_VI_CMD_PFN_V(pf) |
+ FW_VI_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
+ c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
+
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
}
/**
@@ -4569,14 +5494,16 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID_V(viid));
- c.retval_len16 = htonl(FW_LEN16(c));
- c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU_V(mtu) |
- FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
- FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
- FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
- FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_VI_RXMODE_CMD_VIID_V(viid));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ c.mtu_to_vlanexen =
+ cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
+ FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
+ FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
+ FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
+ FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
}
@@ -4606,43 +5533,71 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
{
- int i, ret;
+ int offset, ret = 0;
struct fw_vi_mac_cmd c;
- struct fw_vi_mac_exact *p;
- unsigned int max_naddr = is_t4(adap->params.chip) ?
- NUM_MPS_CLS_SRAM_L_INSTANCES :
- NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ unsigned int nfilters = 0;
+ unsigned int max_naddr = adap->params.arch.mps_tcam_size;
+ unsigned int rem = naddr;
- if (naddr > 7)
+ if (naddr > max_naddr)
return -EINVAL;
- memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) |
- FW_VI_MAC_CMD_VIID_V(viid));
- c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS_V(free) |
- FW_CMD_LEN16_V((naddr + 2) / 2));
-
- for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
- p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
- FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
- memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
- }
+ for (offset = 0; offset < naddr ; /**/) {
+ unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
+ rem : ARRAY_SIZE(c.u.exact));
+ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+ u.exact[fw_naddr]), 16);
+ struct fw_vi_mac_exact *p;
+ int i;
- ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
- if (ret)
- return ret;
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_CMD_EXEC_V(free) |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ c.freemacs_to_len16 =
+ cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
+ FW_CMD_LEN16_V(len16));
+
+ for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+ p->valid_to_idx =
+ cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_IDX_V(
+ FW_VI_MAC_ADD_MAC));
+ memcpy(p->macaddr, addr[offset + i],
+ sizeof(p->macaddr));
+ }
- for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
- u16 index = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
+ /* It's okay if we run out of space in our MAC address arena.
+ * Some of the addresses we submit may get stored so we need
+ * to run through the reply to see what the results were ...
+ */
+ ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret && ret != -FW_ENOMEM)
+ break;
- if (idx)
- idx[i] = index >= max_naddr ? 0xffff : index;
- if (index < max_naddr)
- ret++;
- else if (hash)
- *hash |= (1ULL << hash_mac_addr(addr[i]));
+ for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+ u16 index = FW_VI_MAC_CMD_IDX_G(
+ be16_to_cpu(p->valid_to_idx));
+
+ if (idx)
+ idx[offset + i] = (index >= max_naddr ?
+ 0xffff : index);
+ if (index < max_naddr)
+ nfilters++;
+ else if (hash)
+ *hash |= (1ULL <<
+ hash_mac_addr(addr[offset + i]));
+ }
+
+ free = false;
+ offset += fw_naddr;
+ rem -= fw_naddr;
}
+
+ if (ret == 0 || ret == -FW_ENOMEM)
+ ret = nfilters;
return ret;
}
@@ -4671,26 +5626,25 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
int ret, mode;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p = c.u.exact;
- unsigned int max_mac_addr = is_t4(adap->params.chip) ?
- NUM_MPS_CLS_SRAM_L_INSTANCES :
- NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
if (idx < 0) /* new allocation */
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID_V(viid));
- c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1));
- p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
- FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
- FW_VI_MAC_CMD_IDX_V(idx));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
+ p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
+ FW_VI_MAC_CMD_IDX_V(idx));
memcpy(p->macaddr, addr, sizeof(p->macaddr));
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0) {
- ret = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
+ ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
if (ret >= max_mac_addr)
ret = -ENOMEM;
}
@@ -4714,11 +5668,12 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
struct fw_vi_mac_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID_V(viid));
- c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN_F |
- FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
- FW_CMD_LEN16_V(1));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_VI_ENABLE_CMD_VIID_V(viid));
+ c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
+ FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
+ FW_CMD_LEN16_V(1));
c.u.hash.hashvec = cpu_to_be64(vec);
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
}
@@ -4741,12 +5696,13 @@ int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
struct fw_vi_enable_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
-
- c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
- FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(c) |
- FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_VI_ENABLE_CMD_VIID_V(viid));
+ c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
+ FW_VI_ENABLE_CMD_EEN_V(tx_en) |
+ FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
+ FW_LEN16(c));
return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4781,10 +5737,11 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
struct fw_vi_enable_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
- c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
- c.blinkdur = htons(nblinks);
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_VI_ENABLE_CMD_VIID_V(viid));
+ c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
+ c.blinkdur = cpu_to_be16(nblinks);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4808,14 +5765,14 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_iq_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
- FW_IQ_CMD_VFN_V(vf));
- c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | FW_LEN16(c));
- c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iqtype));
- c.iqid = htons(iqid);
- c.fl0id = htons(fl0id);
- c.fl1id = htons(fl1id);
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
+ FW_IQ_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
+ c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
+ c.iqid = cpu_to_be16(iqid);
+ c.fl0id = cpu_to_be16(fl0id);
+ c.fl1id = cpu_to_be16(fl1id);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4835,11 +5792,12 @@ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_eq_eth_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) |
- FW_EQ_ETH_CMD_VFN_V(vf));
- c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
- c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID_V(eqid));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_EQ_ETH_CMD_PFN_V(pf) |
+ FW_EQ_ETH_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
+ c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4859,11 +5817,12 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_eq_ctrl_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) |
- FW_EQ_CTRL_CMD_VFN_V(vf));
- c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
- c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID_V(eqid));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_EQ_CTRL_CMD_PFN_V(pf) |
+ FW_EQ_CTRL_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
+ c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4883,11 +5842,12 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_eq_ofld_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) |
- FW_EQ_OFLD_CMD_VFN_V(vf));
- c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
- c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eqid));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_EQ_OFLD_CMD_PFN_V(pf) |
+ FW_EQ_OFLD_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
+ c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4905,11 +5865,11 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
if (opcode == FW_PORT_CMD) { /* link/module state change message */
int speed = 0, fc = 0;
const struct fw_port_cmd *p = (void *)rpl;
- int chan = FW_PORT_CMD_PORTID_G(ntohl(p->op_to_portid));
+ int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
int port = adap->chan_map[chan];
struct port_info *pi = adap2pinfo(adap, port);
struct link_config *lc = &pi->link_cfg;
- u32 stat = ntohl(p->u.info.lstatus_to_modtype);
+ u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
@@ -5043,6 +6003,22 @@ static int get_flash_params(struct adapter *adap)
return 0;
}
+static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
+{
+ u16 val;
+ u32 pcie_cap;
+
+ pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ pci_read_config_word(adapter->pdev,
+ pcie_cap + PCI_EXP_DEVCTL2, &val);
+ val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
+ val |= range;
+ pci_write_config_word(adapter->pdev,
+ pcie_cap + PCI_EXP_DEVCTL2, val);
+ }
+}
+
/**
* t4_prep_adapter - prepare SW and HW for operation
* @adapter: the adapter
@@ -5075,9 +6051,30 @@ int t4_prep_adapter(struct adapter *adapter)
switch (ver) {
case CHELSIO_T4:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
+ adapter->params.arch.sge_fl_db = DBPRIO_F;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 128;
+ adapter->params.arch.nchan = NCHAN;
+ adapter->params.arch.vfcount = 128;
break;
case CHELSIO_T5:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+ adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 128;
+ adapter->params.arch.nchan = NCHAN;
+ adapter->params.arch.vfcount = 128;
+ break;
+ case CHELSIO_T6:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+ adapter->params.arch.sge_fl_db = 0;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 256;
+ adapter->params.arch.nchan = 2;
+ adapter->params.arch.vfcount = 256;
break;
default:
dev_err(adapter->pdev_dev, "Device %d is not supported\n",
@@ -5094,14 +6091,18 @@ int t4_prep_adapter(struct adapter *adapter)
adapter->params.nports = 1;
adapter->params.portvec = 1;
adapter->params.vpd.cclk = 50000;
+
+ /* Set pci completion timeout value to 4 seconds. */
+ set_pcie_completion_timeout(adapter, 0xd);
return 0;
}
/**
- * cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
* @adapter: the adapter
* @qid: the Queue ID
* @qtype: the Ingress or Egress type for @qid
+ * @user: true if this request is for a user mode queue
* @pbar2_qoffset: BAR2 Queue Offset
* @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
*
@@ -5122,9 +6123,10 @@ int t4_prep_adapter(struct adapter *adapter)
* Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
* then these "Inferred Queue ID" register may not be used.
*/
-int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
+int t4_bar2_sge_qregs(struct adapter *adapter,
unsigned int qid,
enum t4_bar2_qtype qtype,
+ int user,
u64 *pbar2_qoffset,
unsigned int *pbar2_qid)
{
@@ -5132,9 +6134,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
u64 bar2_page_offset, bar2_qoffset;
unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
- /* T4 doesn't support BAR2 SGE Queue registers.
- */
- if (is_t4(adapter->params.chip))
+ /* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
+ if (!user && is_t4(adapter->params.chip))
return -EINVAL;
/* Get our SGE Page Size parameters.
@@ -5154,7 +6155,7 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
* o The BAR2 Queue ID.
* o The BAR2 Queue ID Offset into the BAR2 page.
*/
- bar2_page_offset = ((qid >> qpp_shift) << page_shift);
+ bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
bar2_qid = qid & qpp_mask;
bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
@@ -5223,18 +6224,19 @@ int t4_init_devlog_params(struct adapter *adap)
/* Otherwise, ask the firmware for it's Device Log Parameters.
*/
memset(&devlog_cmd, 0, sizeof(devlog_cmd));
- devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F);
- devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
+ devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
+ devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
&devlog_cmd);
if (ret)
return ret;
- devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
+ devlog_meminfo =
+ be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
- dparams->size = ntohl(devlog_cmd.memsize_devlog);
+ dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
return 0;
}
@@ -5255,13 +6257,13 @@ int t4_init_sge_params(struct adapter *adapter)
*/
hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
s_hps = (HOSTPAGESIZEPF0_S +
- (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
+ (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
/* Extract the SGE Egress and Ingess Queues Per Page for our PF.
*/
s_qpp = (QUEUESPERPAGEPF0_S +
- (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
+ (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
@@ -5292,12 +6294,19 @@ int t4_init_tp_params(struct adapter *adap)
/* Cache the adapter's Compressed Filter Mode and global Incress
* Configuration.
*/
- t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &adap->params.tp.vlan_pri_map, 1,
- TP_VLAN_PRI_MAP_A);
- t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &adap->params.tp.ingress_config, 1,
- TP_INGRESS_CONFIG_A);
+ if (adap->flags & FW_OK) {
+ t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
+ TP_VLAN_PRI_MAP_A, 1);
+ t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
+ TP_INGRESS_CONFIG_A, 1);
+ } else {
+ t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &adap->params.tp.vlan_pri_map, 1,
+ TP_VLAN_PRI_MAP_A);
+ t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &adap->params.tp.ingress_config, 1,
+ TP_INGRESS_CONFIG_A);
+ }
/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
* shift positions of several elements of the Compressed Filter Tuple
@@ -5373,6 +6382,29 @@ int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
return field_shift;
}
+int t4_init_rss_mode(struct adapter *adap, int mbox)
+{
+ int i, ret;
+ struct fw_rss_vi_config_cmd rvc;
+
+ memset(&rvc, 0, sizeof(rvc));
+
+ for_each_port(adap, i) {
+ struct port_info *p = adap2pinfo(adap, i);
+
+ rvc.op_to_viid =
+ cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
+ rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
+ ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
+ if (ret)
+ return ret;
+ p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
+ }
+ return 0;
+}
+
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
{
u8 addr[6];
@@ -5390,10 +6422,10 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
while ((adap->params.portvec & (1 << j)) == 0)
j++;
- c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F |
- FW_PORT_CMD_PORTID_V(j));
- c.action_to_len16 = htonl(
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_PORT_CMD_PORTID_V(j));
+ c.action_to_len16 = cpu_to_be32(
FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
FW_LEN16(c));
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
@@ -5411,22 +6443,23 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
adap->port[i]->dev_port = j;
- ret = ntohl(c.u.info.lstatus_to_modtype);
+ ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
FW_PORT_CMD_MDIOADDR_G(ret) : -1;
p->port_type = FW_PORT_CMD_PTYPE_G(ret);
p->mod_type = FW_PORT_MOD_TYPE_NA;
- rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F |
- FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
- rvc.retval_len16 = htonl(FW_LEN16(rvc));
+ rvc.op_to_viid =
+ cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
+ rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
if (ret)
return ret;
- p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
+ p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
- init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
+ init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
j++;
}
return 0;
@@ -5717,3 +6750,130 @@ void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
cfg | adap->params.tp.la_mask);
}
+
+/* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
+ * seconds). If we find one of the SGE Ingress DMA State Machines in the same
+ * state for more than the Warning Threshold then we'll issue a warning about
+ * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
+ * appears to be hung every Warning Repeat second till the situation clears.
+ * If the situation clears, we'll note that as well.
+ */
+#define SGE_IDMA_WARN_THRESH 1
+#define SGE_IDMA_WARN_REPEAT 300
+
+/**
+ * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
+ * @adapter: the adapter
+ * @idma: the adapter IDMA Monitor state
+ *
+ * Initialize the state of an SGE Ingress DMA Monitor.
+ */
+void t4_idma_monitor_init(struct adapter *adapter,
+ struct sge_idma_monitor_state *idma)
+{
+ /* Initialize the state variables for detecting an SGE Ingress DMA
+ * hang. The SGE has internal counters which count up on each clock
+ * tick whenever the SGE finds its Ingress DMA State Engines in the
+ * same state they were on the previous clock tick. The clock used is
+ * the Core Clock so we have a limit on the maximum "time" they can
+ * record; typically a very small number of seconds. For instance,
+ * with a 600MHz Core Clock, we can only count up to a bit more than
+ * 7s. So we'll synthesize a larger counter in order to not run the
+ * risk of having the "timers" overflow and give us the flexibility to
+ * maintain a Hung SGE State Machine of our own which operates across
+ * a longer time frame.
+ */
+ idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
+ idma->idma_stalled[0] = 0;
+ idma->idma_stalled[1] = 0;
+}
+
+/**
+ * t4_idma_monitor - monitor SGE Ingress DMA state
+ * @adapter: the adapter
+ * @idma: the adapter IDMA Monitor state
+ * @hz: number of ticks/second
+ * @ticks: number of ticks since the last IDMA Monitor call
+ */
+void t4_idma_monitor(struct adapter *adapter,
+ struct sge_idma_monitor_state *idma,
+ int hz, int ticks)
+{
+ int i, idma_same_state_cnt[2];
+
+ /* Read the SGE Debug Ingress DMA Same State Count registers. These
+ * are counters inside the SGE which count up on each clock when the
+ * SGE finds its Ingress DMA State Engines in the same states they
+ * were in the previous clock. The counters will peg out at
+ * 0xffffffff without wrapping around so once they pass the 1s
+ * threshold they'll stay above that till the IDMA state changes.
+ */
+ t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
+ idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
+ idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+
+ for (i = 0; i < 2; i++) {
+ u32 debug0, debug11;
+
+ /* If the Ingress DMA Same State Counter ("timer") is less
+ * than 1s, then we can reset our synthesized Stall Timer and
+ * continue. If we have previously emitted warnings about a
+ * potential stalled Ingress Queue, issue a note indicating
+ * that the Ingress Queue has resumed forward progress.
+ */
+ if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
+ if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
+ dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
+ "resumed after %d seconds\n",
+ i, idma->idma_qid[i],
+ idma->idma_stalled[i] / hz);
+ idma->idma_stalled[i] = 0;
+ continue;
+ }
+
+ /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
+ * domain. The first time we get here it'll be because we
+ * passed the 1s Threshold; each additional time it'll be
+ * because the RX Timer Callback is being fired on its regular
+ * schedule.
+ *
+ * If the stall is below our Potential Hung Ingress Queue
+ * Warning Threshold, continue.
+ */
+ if (idma->idma_stalled[i] == 0) {
+ idma->idma_stalled[i] = hz;
+ idma->idma_warn[i] = 0;
+ } else {
+ idma->idma_stalled[i] += ticks;
+ idma->idma_warn[i] -= ticks;
+ }
+
+ if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
+ continue;
+
+ /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
+ */
+ if (idma->idma_warn[i] > 0)
+ continue;
+ idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
+
+ /* Read and save the SGE IDMA State and Queue ID information.
+ * We do this every time in case it changes across time ...
+ * can't be too careful ...
+ */
+ t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
+ debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+ idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
+
+ t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
+ debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+ idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
+
+ dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
+ "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
+ i, idma->idma_qid[i], idma->idma_state[i],
+ idma->idma_stalled[i] / hz,
+ debug0, debug11);
+ t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
+ }
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 380b15c04..c8488f430 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -52,8 +52,6 @@ enum {
MBOX_LEN = 64, /* mailbox size in bytes */
TRACE_LEN = 112, /* length of trace data and mask */
FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
- NWOL_PAT = 8, /* # of WoL patterns */
- WOL_PAT_LEN = 128, /* length of WoL patterns */
};
enum {
@@ -61,6 +59,8 @@ enum {
CIM_NUM_OBQ = 6, /* # of CIM OBQs */
CIM_NUM_OBQ_T5 = 8, /* # of CIM OBQs for T5 adapter */
CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */
+ CIM_PIFLA_SIZE = 64, /* # of 192-bit words in CIM PIF LA */
+ CIM_MALA_SIZE = 64, /* # of 160-bit words in CIM MA LA */
CIM_IBQ_SIZE = 128, /* # of 128-bit words in a CIM IBQ */
CIM_OBQ_SIZE = 128, /* # of 128-bit words in a CIM OBQ */
TPLA_SIZE = 128, /* # of 64-bit words in TP LA */
@@ -152,17 +152,33 @@ struct rsp_ctrl {
};
};
-#define RSPD_NEWBUF 0x80000000U
-#define RSPD_LEN(x) (((x) >> 0) & 0x7fffffffU)
-#define RSPD_QID(x) RSPD_LEN(x)
+#define RSPD_NEWBUF_S 31
+#define RSPD_NEWBUF_V(x) ((x) << RSPD_NEWBUF_S)
+#define RSPD_NEWBUF_F RSPD_NEWBUF_V(1U)
-#define RSPD_GEN(x) ((x) >> 7)
-#define RSPD_TYPE(x) (((x) >> 4) & 3)
+#define RSPD_LEN_S 0
+#define RSPD_LEN_M 0x7fffffff
+#define RSPD_LEN_G(x) (((x) >> RSPD_LEN_S) & RSPD_LEN_M)
-#define V_QINTR_CNT_EN 0x0
-#define QINTR_CNT_EN 0x1
-#define QINTR_TIMER_IDX(x) ((x) << 1)
-#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7)
+#define RSPD_QID_S RSPD_LEN_S
+#define RSPD_QID_M RSPD_LEN_M
+#define RSPD_QID_G(x) RSPD_LEN_G(x)
+
+#define RSPD_GEN_S 7
+
+#define RSPD_TYPE_S 4
+#define RSPD_TYPE_M 0x3
+#define RSPD_TYPE_G(x) (((x) >> RSPD_TYPE_S) & RSPD_TYPE_M)
+
+/* Rx queue interrupt deferral fields: counter enable and timer index */
+#define QINTR_CNT_EN_S 0
+#define QINTR_CNT_EN_V(x) ((x) << QINTR_CNT_EN_S)
+#define QINTR_CNT_EN_F QINTR_CNT_EN_V(1U)
+
+#define QINTR_TIMER_IDX_S 1
+#define QINTR_TIMER_IDX_M 0x7
+#define QINTR_TIMER_IDX_V(x) ((x) << QINTR_TIMER_IDX_S)
+#define QINTR_TIMER_IDX_G(x) (((x) >> QINTR_TIMER_IDX_S) & QINTR_TIMER_IDX_M)
/*
* Flash layout.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 30a2f56e9..132cb8fc0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -634,26 +634,9 @@ struct cpl_tid_release {
struct cpl_tx_pkt_core {
__be32 ctrl0;
-#define TXPKT_VF(x) ((x) << 0)
-#define TXPKT_PF(x) ((x) << 8)
-#define TXPKT_VF_VLD (1 << 11)
-#define TXPKT_OVLAN_IDX(x) ((x) << 12)
-#define TXPKT_INTF(x) ((x) << 16)
-#define TXPKT_INS_OVLAN (1 << 21)
-#define TXPKT_OPCODE(x) ((x) << 24)
__be16 pack;
__be16 len;
__be64 ctrl1;
-#define TXPKT_CSUM_END(x) ((x) << 12)
-#define TXPKT_CSUM_START(x) ((x) << 20)
-#define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20)
-#define TXPKT_CSUM_LOC(x) ((u64)(x) << 30)
-#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34)
-#define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40)
-#define TXPKT_VLAN(x) ((u64)(x) << 44)
-#define TXPKT_VLAN_VLD (1ULL << 60)
-#define TXPKT_IPCSUM_DIS (1ULL << 62)
-#define TXPKT_L4CSUM_DIS (1ULL << 63)
};
struct cpl_tx_pkt {
@@ -663,16 +646,69 @@ struct cpl_tx_pkt {
#define cpl_tx_pkt_xt cpl_tx_pkt
+/* cpl_tx_pkt_core.ctrl0 fields */
+#define TXPKT_VF_S 0
+#define TXPKT_VF_V(x) ((x) << TXPKT_VF_S)
+
+#define TXPKT_PF_S 8
+#define TXPKT_PF_V(x) ((x) << TXPKT_PF_S)
+
+#define TXPKT_VF_VLD_S 11
+#define TXPKT_VF_VLD_V(x) ((x) << TXPKT_VF_VLD_S)
+#define TXPKT_VF_VLD_F TXPKT_VF_VLD_V(1U)
+
+#define TXPKT_OVLAN_IDX_S 12
+#define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S)
+
+#define TXPKT_INTF_S 16
+#define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S)
+
+#define TXPKT_INS_OVLAN_S 21
+#define TXPKT_INS_OVLAN_V(x) ((x) << TXPKT_INS_OVLAN_S)
+#define TXPKT_INS_OVLAN_F TXPKT_INS_OVLAN_V(1U)
+
+#define TXPKT_OPCODE_S 24
+#define TXPKT_OPCODE_V(x) ((x) << TXPKT_OPCODE_S)
+
+/* cpl_tx_pkt_core.ctrl1 fields */
+#define TXPKT_CSUM_END_S 12
+#define TXPKT_CSUM_END_V(x) ((x) << TXPKT_CSUM_END_S)
+
+#define TXPKT_CSUM_START_S 20
+#define TXPKT_CSUM_START_V(x) ((x) << TXPKT_CSUM_START_S)
+
+#define TXPKT_IPHDR_LEN_S 20
+#define TXPKT_IPHDR_LEN_V(x) ((__u64)(x) << TXPKT_IPHDR_LEN_S)
+
+#define TXPKT_CSUM_LOC_S 30
+#define TXPKT_CSUM_LOC_V(x) ((__u64)(x) << TXPKT_CSUM_LOC_S)
+
+#define TXPKT_ETHHDR_LEN_S 34
+#define TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << TXPKT_ETHHDR_LEN_S)
+
+#define T6_TXPKT_ETHHDR_LEN_S 32
+#define T6_TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << T6_TXPKT_ETHHDR_LEN_S)
+
+#define TXPKT_CSUM_TYPE_S 40
+#define TXPKT_CSUM_TYPE_V(x) ((__u64)(x) << TXPKT_CSUM_TYPE_S)
+
+#define TXPKT_VLAN_S 44
+#define TXPKT_VLAN_V(x) ((__u64)(x) << TXPKT_VLAN_S)
+
+#define TXPKT_VLAN_VLD_S 60
+#define TXPKT_VLAN_VLD_V(x) ((__u64)(x) << TXPKT_VLAN_VLD_S)
+#define TXPKT_VLAN_VLD_F TXPKT_VLAN_VLD_V(1ULL)
+
+#define TXPKT_IPCSUM_DIS_S 62
+#define TXPKT_IPCSUM_DIS_V(x) ((__u64)(x) << TXPKT_IPCSUM_DIS_S)
+#define TXPKT_IPCSUM_DIS_F TXPKT_IPCSUM_DIS_V(1ULL)
+
+#define TXPKT_L4CSUM_DIS_S 63
+#define TXPKT_L4CSUM_DIS_V(x) ((__u64)(x) << TXPKT_L4CSUM_DIS_S)
+#define TXPKT_L4CSUM_DIS_F TXPKT_L4CSUM_DIS_V(1ULL)
+
struct cpl_tx_pkt_lso_core {
__be32 lso_ctrl;
-#define LSO_TCPHDR_LEN(x) ((x) << 0)
-#define LSO_IPHDR_LEN(x) ((x) << 4)
-#define LSO_ETHHDR_LEN(x) ((x) << 16)
-#define LSO_IPV6(x) ((x) << 20)
-#define LSO_LAST_SLICE (1 << 22)
-#define LSO_FIRST_SLICE (1 << 23)
-#define LSO_OPCODE(x) ((x) << 24)
-#define LSO_T5_XFER_SIZE(x) ((x) << 0)
__be16 ipid_ofst;
__be16 mss;
__be32 seqno_offset;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 1a9a6f334..d7ca10692 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -153,6 +153,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */
CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */
CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 326674b19..375a82557 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -418,6 +418,20 @@
#define SGE_INGRESS_QUEUES_PER_PAGE_PF_A 0x10f4
#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
+#define SGE_ERROR_STATS_A 0x1100
+
+#define UNCAPTURED_ERROR_S 18
+#define UNCAPTURED_ERROR_V(x) ((x) << UNCAPTURED_ERROR_S)
+#define UNCAPTURED_ERROR_F UNCAPTURED_ERROR_V(1U)
+
+#define ERROR_QID_VALID_S 17
+#define ERROR_QID_VALID_V(x) ((x) << ERROR_QID_VALID_S)
+#define ERROR_QID_VALID_F ERROR_QID_VALID_V(1U)
+
+#define ERROR_QID_S 0
+#define ERROR_QID_M 0x1ffffU
+#define ERROR_QID_G(x) (((x) >> ERROR_QID_S) & ERROR_QID_M)
+
#define HP_INT_THRESH_S 28
#define HP_INT_THRESH_M 0xfU
#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
@@ -448,8 +462,13 @@
#define SGE_STAT_MATCH_A 0x10e8
#define SGE_STAT_CFG_A 0x10ec
+#define STATMODE_S 2
+#define STATMODE_V(x) ((x) << STATMODE_S)
+
#define STATSOURCE_T5_S 9
+#define STATSOURCE_T5_M 0xfU
#define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S)
+#define STATSOURCE_T5_G(x) (((x) >> STATSOURCE_T5_S) & STATSOURCE_T5_M)
#define SGE_DBFIFO_STATUS2_A 0x1118
@@ -705,6 +724,10 @@
#define REGISTER_S 0
#define REGISTER_V(x) ((x) << REGISTER_S)
+#define T6_ENABLE_S 31
+#define T6_ENABLE_V(x) ((x) << T6_ENABLE_S)
+#define T6_ENABLE_F T6_ENABLE_V(1U)
+
#define PFNUM_S 0
#define PFNUM_V(x) ((x) << PFNUM_S)
@@ -1338,6 +1361,42 @@
#define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S)
#define FLMTXFLSTEMPTY_F FLMTXFLSTEMPTY_V(1U)
+#define TP_TX_ORATE_A 0x7ebc
+
+#define OFDRATE3_S 24
+#define OFDRATE3_M 0xffU
+#define OFDRATE3_G(x) (((x) >> OFDRATE3_S) & OFDRATE3_M)
+
+#define OFDRATE2_S 16
+#define OFDRATE2_M 0xffU
+#define OFDRATE2_G(x) (((x) >> OFDRATE2_S) & OFDRATE2_M)
+
+#define OFDRATE1_S 8
+#define OFDRATE1_M 0xffU
+#define OFDRATE1_G(x) (((x) >> OFDRATE1_S) & OFDRATE1_M)
+
+#define OFDRATE0_S 0
+#define OFDRATE0_M 0xffU
+#define OFDRATE0_G(x) (((x) >> OFDRATE0_S) & OFDRATE0_M)
+
+#define TP_TX_TRATE_A 0x7ed0
+
+#define TNLRATE3_S 24
+#define TNLRATE3_M 0xffU
+#define TNLRATE3_G(x) (((x) >> TNLRATE3_S) & TNLRATE3_M)
+
+#define TNLRATE2_S 16
+#define TNLRATE2_M 0xffU
+#define TNLRATE2_G(x) (((x) >> TNLRATE2_S) & TNLRATE2_M)
+
+#define TNLRATE1_S 8
+#define TNLRATE1_M 0xffU
+#define TNLRATE1_G(x) (((x) >> TNLRATE1_S) & TNLRATE1_M)
+
+#define TNLRATE0_S 0
+#define TNLRATE0_M 0xffU
+#define TNLRATE0_G(x) (((x) >> TNLRATE0_S) & TNLRATE0_M)
+
#define TP_VLAN_PRI_MAP_A 0x140
#define FRAGMENTATION_S 9
@@ -1399,6 +1458,8 @@
#define CSUM_HAS_PSEUDO_HDR_F CSUM_HAS_PSEUDO_HDR_V(1U)
#define TP_MIB_MAC_IN_ERR_0_A 0x0
+#define TP_MIB_HDR_IN_ERR_0_A 0x4
+#define TP_MIB_TCP_IN_ERR_0_A 0x8
#define TP_MIB_TCP_OUT_RST_A 0xc
#define TP_MIB_TCP_IN_SEG_HI_A 0x10
#define TP_MIB_TCP_IN_SEG_LO_A 0x11
@@ -1407,11 +1468,19 @@
#define TP_MIB_TCP_RXT_SEG_HI_A 0x14
#define TP_MIB_TCP_RXT_SEG_LO_A 0x15
#define TP_MIB_TNL_CNG_DROP_0_A 0x18
+#define TP_MIB_OFD_CHN_DROP_0_A 0x1c
#define TP_MIB_TCP_V6IN_ERR_0_A 0x28
#define TP_MIB_TCP_V6OUT_RST_A 0x2c
#define TP_MIB_OFD_ARP_DROP_A 0x36
+#define TP_MIB_CPL_IN_REQ_0_A 0x38
+#define TP_MIB_CPL_OUT_RSP_0_A 0x3c
#define TP_MIB_TNL_DROP_0_A 0x44
+#define TP_MIB_FCOE_DDP_0_A 0x48
+#define TP_MIB_FCOE_DROP_0_A 0x4c
+#define TP_MIB_FCOE_BYTE_0_HI_A 0x50
#define TP_MIB_OFD_VLN_DROP_0_A 0x58
+#define TP_MIB_USM_PKTS_A 0x5c
+#define TP_MIB_RQE_DFR_PKT_A 0x64
#define ULP_TX_INT_CAUSE_A 0x8dcc
@@ -1572,6 +1641,7 @@
#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
+#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES_L 0x528
#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
@@ -2054,6 +2124,11 @@
#define VFLKPIDX_M 0xffU
#define VFLKPIDX_G(x) (((x) >> VFLKPIDX_S) & VFLKPIDX_M)
+#define T6_VFWRADDR_S 8
+#define T6_VFWRADDR_M 0xffU
+#define T6_VFWRADDR_V(x) ((x) << T6_VFWRADDR_S)
+#define T6_VFWRADDR_G(x) (((x) >> T6_VFWRADDR_S) & T6_VFWRADDR_M)
+
#define TP_RSS_CONFIG_CNG_A 0x7e04
#define TP_RSS_SECRET_KEY0_A 0x40
#define TP_RSS_PF0_CONFIG_A 0x30
@@ -2175,7 +2250,28 @@
#define MPS_RX_PERR_INT_CAUSE_A 0x11074
#define MPS_CLS_TCAM_Y_L_A 0xf000
+#define MPS_CLS_TCAM_DATA0_A 0xf000
+#define MPS_CLS_TCAM_DATA1_A 0xf004
+
+#define DMACH_S 0
+#define DMACH_M 0xffffU
+#define DMACH_G(x) (((x) >> DMACH_S) & DMACH_M)
+
#define MPS_CLS_TCAM_X_L_A 0xf008
+#define MPS_CLS_TCAM_DATA2_CTL_A 0xf008
+
+#define CTLCMDTYPE_S 31
+#define CTLCMDTYPE_V(x) ((x) << CTLCMDTYPE_S)
+#define CTLCMDTYPE_F CTLCMDTYPE_V(1U)
+
+#define CTLTCAMSEL_S 25
+#define CTLTCAMSEL_V(x) ((x) << CTLTCAMSEL_S)
+
+#define CTLTCAMINDEX_S 17
+#define CTLTCAMINDEX_V(x) ((x) << CTLTCAMINDEX_S)
+
+#define CTLXYBITSEL_S 16
+#define CTLXYBITSEL_V(x) ((x) << CTLXYBITSEL_S)
#define MPS_CLS_TCAM_Y_L(idx) (MPS_CLS_TCAM_Y_L_A + (idx) * 16)
#define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512
@@ -2184,6 +2280,45 @@
#define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512
#define MPS_CLS_SRAM_L_A 0xe000
+
+#define T6_MULTILISTEN0_S 26
+
+#define T6_SRAM_PRIO3_S 23
+#define T6_SRAM_PRIO3_M 0x7U
+#define T6_SRAM_PRIO3_G(x) (((x) >> T6_SRAM_PRIO3_S) & T6_SRAM_PRIO3_M)
+
+#define T6_SRAM_PRIO2_S 20
+#define T6_SRAM_PRIO2_M 0x7U
+#define T6_SRAM_PRIO2_G(x) (((x) >> T6_SRAM_PRIO2_S) & T6_SRAM_PRIO2_M)
+
+#define T6_SRAM_PRIO1_S 17
+#define T6_SRAM_PRIO1_M 0x7U
+#define T6_SRAM_PRIO1_G(x) (((x) >> T6_SRAM_PRIO1_S) & T6_SRAM_PRIO1_M)
+
+#define T6_SRAM_PRIO0_S 14
+#define T6_SRAM_PRIO0_M 0x7U
+#define T6_SRAM_PRIO0_G(x) (((x) >> T6_SRAM_PRIO0_S) & T6_SRAM_PRIO0_M)
+
+#define T6_SRAM_VLD_S 13
+#define T6_SRAM_VLD_V(x) ((x) << T6_SRAM_VLD_S)
+#define T6_SRAM_VLD_F T6_SRAM_VLD_V(1U)
+
+#define T6_REPLICATE_S 12
+#define T6_REPLICATE_V(x) ((x) << T6_REPLICATE_S)
+#define T6_REPLICATE_F T6_REPLICATE_V(1U)
+
+#define T6_PF_S 9
+#define T6_PF_M 0x7U
+#define T6_PF_G(x) (((x) >> T6_PF_S) & T6_PF_M)
+
+#define T6_VF_VALID_S 8
+#define T6_VF_VALID_V(x) ((x) << T6_VF_VALID_S)
+#define T6_VF_VALID_F T6_VF_VALID_V(1U)
+
+#define T6_VF_S 0
+#define T6_VF_M 0xffU
+#define T6_VF_G(x) (((x) >> T6_VF_S) & T6_VF_M)
+
#define MPS_CLS_SRAM_H_A 0xe004
#define MPS_CLS_SRAM_L(idx) (MPS_CLS_SRAM_L_A + (idx) * 8)
@@ -2433,6 +2568,8 @@
#define CIM_F CIM_V(1U)
#define MC1_S 31
+#define MC1_V(x) ((x) << MC1_S)
+#define MC1_F MC1_V(1U)
#define PL_INT_ENABLE_A 0x19410
#define PL_INT_MAP0_A 0x19414
@@ -2463,6 +2600,18 @@
#define REV_V(x) ((x) << REV_S)
#define REV_G(x) (((x) >> REV_S) & REV_M)
+#define T6_UNKNOWNCMD_S 3
+#define T6_UNKNOWNCMD_V(x) ((x) << T6_UNKNOWNCMD_S)
+#define T6_UNKNOWNCMD_F T6_UNKNOWNCMD_V(1U)
+
+#define T6_LIP0_S 2
+#define T6_LIP0_V(x) ((x) << T6_LIP0_S)
+#define T6_LIP0_F T6_LIP0_V(1U)
+
+#define T6_LIPMISS_S 1
+#define T6_LIPMISS_V(x) ((x) << T6_LIPMISS_S)
+#define T6_LIPMISS_F T6_LIPMISS_V(1U)
+
#define LE_DB_INT_CAUSE_A 0x19c3c
#define REQQPARERR_S 16
@@ -2485,6 +2634,14 @@
#define LIP0_V(x) ((x) << LIP0_S)
#define LIP0_F LIP0_V(1U)
+#define TCAMINTPERR_S 13
+#define TCAMINTPERR_V(x) ((x) << TCAMINTPERR_S)
+#define TCAMINTPERR_F TCAMINTPERR_V(1U)
+
+#define SSRAMINTPERR_S 10
+#define SSRAMINTPERR_V(x) ((x) << SSRAMINTPERR_S)
+#define SSRAMINTPERR_F SSRAMINTPERR_V(1U)
+
#define NCSI_INT_CAUSE_A 0x1a0d8
#define CIM_DM_PRTY_ERR_S 8
@@ -2638,6 +2795,33 @@
#define CIM_IBQ_DBG_DATA_A 0x7b68
#define CIM_OBQ_DBG_DATA_A 0x7b6c
+#define CIM_DEBUGCFG_A 0x7b70
+#define CIM_DEBUGSTS_A 0x7b74
+
+#define POLADBGRDPTR_S 23
+#define POLADBGRDPTR_M 0x1ffU
+#define POLADBGRDPTR_V(x) ((x) << POLADBGRDPTR_S)
+
+#define POLADBGWRPTR_S 16
+#define POLADBGWRPTR_M 0x1ffU
+#define POLADBGWRPTR_G(x) (((x) >> POLADBGWRPTR_S) & POLADBGWRPTR_M)
+
+#define PILADBGRDPTR_S 14
+#define PILADBGRDPTR_M 0x1ffU
+#define PILADBGRDPTR_V(x) ((x) << PILADBGRDPTR_S)
+
+#define PILADBGWRPTR_S 0
+#define PILADBGWRPTR_M 0x1ffU
+#define PILADBGWRPTR_G(x) (((x) >> PILADBGWRPTR_S) & PILADBGWRPTR_M)
+
+#define LADBGEN_S 12
+#define LADBGEN_V(x) ((x) << LADBGEN_S)
+#define LADBGEN_F LADBGEN_V(1U)
+
+#define CIM_PO_LA_DEBUGDATA_A 0x7b78
+#define CIM_PI_LA_DEBUGDATA_A 0x7b7c
+#define CIM_PO_LA_MADEBUGDATA_A 0x7b80
+#define CIM_PI_LA_MADEBUGDATA_A 0x7b84
#define UPDBGLARDEN_S 1
#define UPDBGLARDEN_V(x) ((x) << UPDBGLARDEN_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
index 19b2dcf6a..7bdee3bf7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -61,6 +61,30 @@
#define SGE_TIMERREGS 6
#define TIMERREG_COUNTER0_X 0
+#define FETCHBURSTMIN_64B_X 2
+
+#define FETCHBURSTMAX_256B_X 2
+#define FETCHBURSTMAX_512B_X 3
+
+#define HOSTFCMODE_STATUS_PAGE_X 2
+
+#define CIDXFLUSHTHRESH_32_X 5
+
+#define UPDATEDELIVERY_INTERRUPT_X 1
+
+#define RSPD_TYPE_FLBUF_X 0
+#define RSPD_TYPE_CPL_X 1
+#define RSPD_TYPE_INTR_X 2
+
+/* Congestion Manager Definitions.
+ */
+#define CONMCTXT_CNGTPMODE_S 19
+#define CONMCTXT_CNGTPMODE_V(x) ((x) << CONMCTXT_CNGTPMODE_S)
+#define CONMCTXT_CNGCHMAP_S 0
+#define CONMCTXT_CNGCHMAP_V(x) ((x) << CONMCTXT_CNGCHMAP_S)
+#define CONMCTXT_CNGTPMODE_CHANNEL_X 2
+#define CONMCTXT_CNGTPMODE_QUEUE_X 1
+
/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
* The User Doorbells are each 128 bytes in length with a Simple Doorbell at
* offsets 8x and a Write Combining single 64-byte Egress Queue Unit
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 03fbfd1fb..ab4674684 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -772,7 +772,7 @@ struct fw_ldst_cmd {
} addrval;
struct fw_ldst_idctxt {
__be32 physid;
- __be32 msg_pkd;
+ __be32 msg_ctxtflush;
__be32 ctxt_data7;
__be32 ctxt_data6;
__be32 ctxt_data5;
@@ -788,15 +788,27 @@ struct fw_ldst_cmd {
__be16 vctl;
__be16 rval;
} mdio;
- struct fw_ldst_mps {
- __be16 fid_ctl;
- __be16 rplcpf_pkd;
- __be32 rplc127_96;
- __be32 rplc95_64;
- __be32 rplc63_32;
- __be32 rplc31_0;
- __be32 atrb;
- __be16 vlan[16];
+ union fw_ldst_mps {
+ struct fw_ldst_mps_rplc {
+ __be16 fid_idx;
+ __be16 rplcpf_pkd;
+ __be32 rplc255_224;
+ __be32 rplc223_192;
+ __be32 rplc191_160;
+ __be32 rplc159_128;
+ __be32 rplc127_96;
+ __be32 rplc95_64;
+ __be32 rplc63_32;
+ __be32 rplc31_0;
+ } rplc;
+ struct fw_ldst_mps_atrb {
+ __be16 fid_mpsid;
+ __be16 r2[3];
+ __be32 r3[2];
+ __be32 r4;
+ __be32 atrb;
+ __be16 vlan[16];
+ } atrb;
} mps;
struct fw_ldst_func {
u8 access_ctl;
@@ -822,6 +834,10 @@ struct fw_ldst_cmd {
#define FW_LDST_CMD_MSG_S 31
#define FW_LDST_CMD_MSG_V(x) ((x) << FW_LDST_CMD_MSG_S)
+#define FW_LDST_CMD_CTXTFLUSH_S 30
+#define FW_LDST_CMD_CTXTFLUSH_V(x) ((x) << FW_LDST_CMD_CTXTFLUSH_S)
+#define FW_LDST_CMD_CTXTFLUSH_F FW_LDST_CMD_CTXTFLUSH_V(1U)
+
#define FW_LDST_CMD_PADDR_S 8
#define FW_LDST_CMD_PADDR_V(x) ((x) << FW_LDST_CMD_PADDR_S)
@@ -831,8 +847,8 @@ struct fw_ldst_cmd {
#define FW_LDST_CMD_FID_S 15
#define FW_LDST_CMD_FID_V(x) ((x) << FW_LDST_CMD_FID_S)
-#define FW_LDST_CMD_CTL_S 0
-#define FW_LDST_CMD_CTL_V(x) ((x) << FW_LDST_CMD_CTL_S)
+#define FW_LDST_CMD_IDX_S 0
+#define FW_LDST_CMD_IDX_V(x) ((x) << FW_LDST_CMD_IDX_S)
#define FW_LDST_CMD_RPLCPF_S 0
#define FW_LDST_CMD_RPLCPF_V(x) ((x) << FW_LDST_CMD_RPLCPF_S)
@@ -1061,6 +1077,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
FW_PARAMS_PARAM_DEV_CF = 0x0D,
+ FW_PARAMS_PARAM_DEV_PHYFW = 0x0F,
FW_PARAMS_PARAM_DEV_DIAG = 0x11,
FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */
FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */
@@ -1123,6 +1140,12 @@ enum fw_params_param_dmaq {
FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
+ FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20,
+};
+
+enum fw_params_param_dev_phyfw {
+ FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD = 0x00,
+ FW_PARAMS_PARAM_DEV_PHYFW_VERSION = 0x01,
};
enum fw_params_param_dev_diag {
@@ -1377,6 +1400,7 @@ struct fw_iq_cmd {
#define FW_IQ_CMD_IQFLINTCONGEN_S 27
#define FW_IQ_CMD_IQFLINTCONGEN_V(x) ((x) << FW_IQ_CMD_IQFLINTCONGEN_S)
+#define FW_IQ_CMD_IQFLINTCONGEN_F FW_IQ_CMD_IQFLINTCONGEN_V(1U)
#define FW_IQ_CMD_IQFLINTISCSIC_S 26
#define FW_IQ_CMD_IQFLINTISCSIC_V(x) ((x) << FW_IQ_CMD_IQFLINTISCSIC_S)
@@ -1399,6 +1423,7 @@ struct fw_iq_cmd {
#define FW_IQ_CMD_FL0CONGCIF_S 11
#define FW_IQ_CMD_FL0CONGCIF_V(x) ((x) << FW_IQ_CMD_FL0CONGCIF_S)
+#define FW_IQ_CMD_FL0CONGCIF_F FW_IQ_CMD_FL0CONGCIF_V(1U)
#define FW_IQ_CMD_FL0ONCHIP_S 10
#define FW_IQ_CMD_FL0ONCHIP_V(x) ((x) << FW_IQ_CMD_FL0ONCHIP_S)
@@ -1589,6 +1614,7 @@ struct fw_eq_eth_cmd {
#define FW_EQ_ETH_CMD_FETCHRO_S 22
#define FW_EQ_ETH_CMD_FETCHRO_V(x) ((x) << FW_EQ_ETH_CMD_FETCHRO_S)
+#define FW_EQ_ETH_CMD_FETCHRO_F FW_EQ_ETH_CMD_FETCHRO_V(1U)
#define FW_EQ_ETH_CMD_HOSTFCMODE_S 20
#define FW_EQ_ETH_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_ETH_CMD_HOSTFCMODE_S)
@@ -2526,13 +2552,8 @@ enum fw_port_mod_sub_type {
FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC,
};
-/* port stats */
-#define FW_NUM_PORT_STATS 50
-#define FW_NUM_PORT_TX_STATS 23
-#define FW_NUM_PORT_RX_STATS 27
-
enum fw_port_stats_tx_index {
- FW_STAT_TX_PORT_BYTES_IX,
+ FW_STAT_TX_PORT_BYTES_IX = 0,
FW_STAT_TX_PORT_FRAMES_IX,
FW_STAT_TX_PORT_BCAST_IX,
FW_STAT_TX_PORT_MCAST_IX,
@@ -2554,11 +2575,12 @@ enum fw_port_stats_tx_index {
FW_STAT_TX_PORT_PPP4_IX,
FW_STAT_TX_PORT_PPP5_IX,
FW_STAT_TX_PORT_PPP6_IX,
- FW_STAT_TX_PORT_PPP7_IX
+ FW_STAT_TX_PORT_PPP7_IX,
+ FW_NUM_PORT_TX_STATS
};
enum fw_port_stat_rx_index {
- FW_STAT_RX_PORT_BYTES_IX,
+ FW_STAT_RX_PORT_BYTES_IX = 0,
FW_STAT_RX_PORT_FRAMES_IX,
FW_STAT_RX_PORT_BCAST_IX,
FW_STAT_RX_PORT_MCAST_IX,
@@ -2584,9 +2606,14 @@ enum fw_port_stat_rx_index {
FW_STAT_RX_PORT_PPP5_IX,
FW_STAT_RX_PORT_PPP6_IX,
FW_STAT_RX_PORT_PPP7_IX,
- FW_STAT_RX_PORT_LESS_64B_IX
+ FW_STAT_RX_PORT_LESS_64B_IX,
+ FW_STAT_RX_PORT_MAC_ERROR_IX,
+ FW_NUM_PORT_RX_STATS
};
+/* port stats */
+#define FW_NUM_PORT_STATS (FW_NUM_PORT_TX_STATS + FW_NUM_PORT_RX_STATS)
+
struct fw_port_stats_cmd {
__be32 op_to_portid;
__be32 retval_len16;
@@ -3015,7 +3042,8 @@ struct fw_hdr {
enum fw_hdr_chip {
FW_HDR_CHIP_T4,
- FW_HDR_CHIP_T5
+ FW_HDR_CHIP_T5,
+ FW_HDR_CHIP_T6
};
#define FW_HDR_FW_VER_MAJOR_S 24
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index b9d1cbac0..32b213559 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -45,4 +45,9 @@
#define T5FW_VERSION_MICRO 0x20
#define T5FW_VERSION_BUILD 0x00
+#define T6FW_VERSION_MAJOR 0x01
+#define T6FW_VERSION_MINOR 0x0D
+#define T6FW_VERSION_MICRO 0x2D
+#define T6FW_VERSION_BUILD 0x00
+
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 1d893b0b7..b2b5e5bbe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1021,7 +1021,7 @@ static int closest_thres(const struct sge *s, int thres)
static unsigned int qtimer_val(const struct adapter *adapter,
const struct sge_rspq *rspq)
{
- unsigned int timer_idx = QINTR_TIMER_IDX_GET(rspq->intr_params);
+ unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
return timer_idx < SGE_NTIMERS
? adapter->sge.timer_val[timer_idx]
@@ -1086,8 +1086,8 @@ static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
* Update the response queue's interrupt coalescing parameters and
* return success.
*/
- rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
- (cnt > 0 ? QINTR_CNT_EN : 0));
+ rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
+ QINTR_CNT_EN_V(cnt > 0));
return 0;
}
@@ -1439,7 +1439,7 @@ static int cxgb4vf_get_coalesce(struct net_device *dev,
coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
coalesce->rx_max_coalesced_frames =
- ((rspq->intr_params & QINTR_CNT_EN)
+ ((rspq->intr_params & QINTR_CNT_EN_F)
? adapter->sge.counter_val[rspq->pktcnt_idx]
: 0);
return 0;
@@ -2393,8 +2393,9 @@ static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
u8 pkt_cnt_idx, unsigned int size,
unsigned int iqe_size)
{
- rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
- (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0));
+ rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
+ (pkt_cnt_idx < SGE_NCOUNTERS ?
+ QINTR_CNT_EN_F : 0));
rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
? pkt_cnt_idx
: 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 482f6de68..ad53e5ad2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -524,7 +524,7 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
*/
static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
{
- u32 val;
+ u32 val = adapter->params.arch.sge_fl_db;
/* The SGE keeps track of its Producer and Consumer Indices in terms
* of Egress Queue Units so we can only tell it about integral numbers
@@ -532,11 +532,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
*/
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
if (is_t4(adapter->params.chip))
- val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
+ val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
else
- val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) |
- DBTYPE_F;
- val |= DBPRIO_F;
+ val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT);
/* Make sure all memory writes to the Free List queue are
* committed before we tell the hardware about them.
@@ -1084,7 +1082,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
* Figure out what HW csum a packet wants and return the appropriate control
* bits.
*/
-static u64 hwcsum(const struct sk_buff *skb)
+static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
{
int csum_type;
const struct iphdr *iph = ip_hdr(skb);
@@ -1100,7 +1098,7 @@ nocsum:
* unknown protocol, disable HW csum
* and hope a bad packet is detected
*/
- return TXPKT_L4CSUM_DIS;
+ return TXPKT_L4CSUM_DIS_F;
}
} else {
/*
@@ -1116,16 +1114,21 @@ nocsum:
goto nocsum;
}
- if (likely(csum_type >= TX_CSUM_TCPIP))
- return TXPKT_CSUM_TYPE(csum_type) |
- TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
- TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
- else {
+ if (likely(csum_type >= TX_CSUM_TCPIP)) {
+ u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
+ int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+
+ if (chip <= CHELSIO_T5)
+ hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+ else
+ hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+ return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
+ } else {
int start = skb_transport_offset(skb);
- return TXPKT_CSUM_TYPE(csum_type) |
- TXPKT_CSUM_START(start) |
- TXPKT_CSUM_LOC(start + skb->csum_offset);
+ return TXPKT_CSUM_TYPE_V(csum_type) |
+ TXPKT_CSUM_START_V(start) |
+ TXPKT_CSUM_LOC_V(start + skb->csum_offset);
}
}
@@ -1160,7 +1163,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
u32 wr_mid;
u64 cntrl, *end;
- int qidx, credits;
+ int qidx, credits, max_pkt_len;
unsigned int flits, ndesc;
struct adapter *adapter;
struct sge_eth_txq *txq;
@@ -1183,6 +1186,13 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->len < fw_hdr_copy_len))
goto out_free;
+ /* Discard the packet if the length is greater than mtu */
+ max_pkt_len = ETH_HLEN + dev->mtu;
+ if (skb_vlan_tag_present(skb))
+ max_pkt_len += VLAN_HLEN;
+ if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ goto out_free;
+
/*
* Figure out which TX Queue we're going to use.
*/
@@ -1281,29 +1291,35 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* Fill in the LSO CPL message.
*/
lso->lso_ctrl =
- cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE |
- LSO_LAST_SLICE |
- LSO_IPV6(v6) |
- LSO_ETHHDR_LEN(eth_xtra_len/4) |
- LSO_IPHDR_LEN(l3hdr_len/4) |
- LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+ cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F |
+ LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
lso->ipid_ofst = cpu_to_be16(0);
lso->mss = cpu_to_be16(ssi->gso_size);
lso->seqno_offset = cpu_to_be32(0);
if (is_t4(adapter->params.chip))
lso->len = cpu_to_be32(skb->len);
else
- lso->len = cpu_to_be32(LSO_T5_XFER_SIZE(skb->len));
+ lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
/*
* Set up TX Packet CPL pointer, control word and perform
* accounting.
*/
cpl = (void *)(lso + 1);
- cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN(l3hdr_len) |
- TXPKT_ETHHDR_LEN(eth_xtra_len));
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
txq->tso++;
txq->tx_cso += ssi->gso_segs;
} else {
@@ -1320,10 +1336,11 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
*/
cpl = (void *)(wr + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+ cntrl = hwcsum(adapter->params.chip, skb) |
+ TXPKT_IPCSUM_DIS_F;
txq->tx_cso++;
} else
- cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
}
/*
@@ -1332,15 +1349,15 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (skb_vlan_tag_present(skb)) {
txq->vlan_ins++;
- cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
}
/*
* Fill in the TX Packet CPL message header.
*/
- cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) |
- TXPKT_INTF(pi->port_id) |
- TXPKT_PF(0));
+ cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+ TXPKT_INTF_V(pi->port_id) |
+ TXPKT_PF_V(0));
cpl->pack = cpu_to_be16(0);
cpl->len = cpu_to_be16(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1663,7 +1680,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
static inline bool is_new_response(const struct rsp_ctrl *rc,
const struct sge_rspq *rspq)
{
- return RSPD_GEN(rc->type_gen) == rspq->gen;
+ return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen;
}
/**
@@ -1752,8 +1769,8 @@ static int process_responses(struct sge_rspq *rspq, int budget)
* SGE.
*/
dma_rmb();
- rsp_type = RSPD_TYPE(rc->type_gen);
- if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+ rsp_type = RSPD_TYPE_G(rc->type_gen);
+ if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
struct page_frag *fp;
struct pkt_gl gl;
const struct rx_sw_desc *sdesc;
@@ -1764,7 +1781,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
* If we get a "new buffer" message from the SGE we
* need to move on to the next Free List buffer.
*/
- if (len & RSPD_NEWBUF) {
+ if (len & RSPD_NEWBUF_F) {
/*
* We get one "new buffer" message when we
* first start up a queue so we need to ignore
@@ -1775,7 +1792,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1);
rspq->offset = 0;
}
- len = RSPD_LEN(len);
+ len = RSPD_LEN_G(len);
}
gl.tot_len = len;
@@ -1818,10 +1835,10 @@ static int process_responses(struct sge_rspq *rspq, int budget)
rspq->offset += ALIGN(fp->size, s->fl_align);
else
restore_rx_bufs(&gl, &rxq->fl, frag);
- } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+ } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
ret = rspq->handler(rspq, rspq->cur_desc, NULL);
} else {
- WARN_ON(rsp_type > RSP_TYPE_CPL);
+ WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
ret = 0;
}
@@ -1833,7 +1850,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
*/
const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
rspq->next_intr_params =
- QINTR_TIMER_IDX(NOMEM_TIMER_IDX);
+ QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
break;
}
@@ -1875,7 +1892,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
intr_params = rspq->next_intr_params;
rspq->next_intr_params = rspq->intr_params;
} else
- intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX);
+ intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX);
if (unlikely(work_done == 0))
rspq->unhandled_irqs++;
@@ -1936,10 +1953,10 @@ static unsigned int process_intrq(struct adapter *adapter)
* never happen ...
*/
dma_rmb();
- if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
+ if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) {
dev_err(adapter->pdev_dev,
"Unexpected INTRQ response type %d\n",
- RSPD_TYPE(rc->type_gen));
+ RSPD_TYPE_G(rc->type_gen));
continue;
}
@@ -1951,7 +1968,7 @@ static unsigned int process_intrq(struct adapter *adapter)
* want to either make them fatal and/or conditionalized under
* DEBUG.
*/
- qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid));
+ qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
iq_idx = IQ_IDX(s, qid);
if (unlikely(iq_idx >= MAX_INGQ)) {
dev_err(adapter->pdev_dev,
@@ -2154,8 +2171,8 @@ static void __iomem *bar2_address(struct adapter *adapter,
u64 bar2_qoffset;
int ret;
- ret = t4_bar2_sge_qregs(adapter, qid, qtype,
- &bar2_qoffset, pbar2_qid);
+ ret = t4vf_bar2_sge_qregs(adapter, qid, qtype,
+ &bar2_qoffset, pbar2_qid);
if (ret)
return NULL;
@@ -2239,12 +2256,18 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
if (fl) {
+ enum chip_type chip =
+ CHELSIO_CHIP_VERSION(adapter->params.chip);
/*
* Allocate the ring for the hardware free list (with space
* for its status page) along with the associated software
* descriptor ring. The free list size needs to be a multiple
- * of the Egress Queue Unit.
+ * of the Egress Queue Unit and at least 2 Egress Units larger
+ * than the SGE's Egress Congrestion Threshold
+ * (fl_starve_thres - 1).
*/
+ if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT)
+ fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT;
fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
sizeof(__be64), sizeof(struct rx_sw_desc),
@@ -2274,7 +2297,9 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
cmd.fl0dcaen_to_fl0cidxfthresh =
cpu_to_be16(
FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) |
- FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B));
+ FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
+ FETCHBURSTMAX_512B_X :
+ FETCHBURSTMAX_256B_X));
cmd.fl0size = cpu_to_be16(flsz);
cmd.fl0addr = cpu_to_be64(fl->addr);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index b9debb4f2..88b8981b4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -51,6 +51,7 @@
*/
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
+#define CHELSIO_T6 0x6
enum chip_type {
T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
@@ -156,6 +157,12 @@ struct vpd_params {
u32 cclk; /* Core Clock (KHz) */
};
+/* Stores chip specific parameters */
+struct arch_specific_params {
+ u32 sge_fl_db;
+ u16 mps_tcam_size;
+};
+
/*
* Global Receive Side Scaling (RSS) parameters in host-native format.
*/
@@ -215,6 +222,7 @@ struct adapter_params {
struct vpd_params vpd; /* Vital Product Data */
struct rss_params rss; /* Receive Side Scaling */
struct vf_resources vfres; /* Virtual Function Resource limits */
+ struct arch_specific_params arch; /* chip specific params */
enum chip_type chip; /* chip code */
u8 nports; /* # of Ethernet "ports" */
};
@@ -284,11 +292,11 @@ int t4vf_fw_reset(struct adapter *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
-int t4_bar2_sge_qregs(struct adapter *adapter,
- unsigned int qid,
- enum t4_bar2_qtype qtype,
- u64 *pbar2_qoffset,
- unsigned int *pbar2_qid);
+int t4vf_bar2_sge_qregs(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid);
int t4vf_get_sge_params(struct adapter *);
int t4vf_get_vpd_params(struct adapter *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 966ee900e..0db6dc9e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -428,7 +428,7 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
}
/**
- * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information
* @adapter: the adapter
* @qid: the Queue ID
* @qtype: the Ingress or Egress type for @qid
@@ -452,11 +452,11 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
* Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
* then these "Inferred Queue ID" register may not be used.
*/
-int t4_bar2_sge_qregs(struct adapter *adapter,
- unsigned int qid,
- enum t4_bar2_qtype qtype,
- u64 *pbar2_qoffset,
- unsigned int *pbar2_qid)
+int t4vf_bar2_sge_qregs(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid)
{
unsigned int page_shift, page_size, qpp_shift, qpp_mask;
u64 bar2_page_offset, bar2_qoffset;
@@ -1191,9 +1191,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
unsigned nfilters = 0;
unsigned int rem = naddr;
struct fw_vi_mac_cmd cmd, rpl;
- unsigned int max_naddr = is_t4(adapter->params.chip) ?
- NUM_MPS_CLS_SRAM_L_INSTANCES :
- NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
if (naddr > max_naddr)
return -EINVAL;
@@ -1285,9 +1283,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
struct fw_vi_mac_exact *p = &cmd.u.exact[0];
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
u.exact[1]), 16);
- unsigned int max_naddr = is_t4(adapter->params.chip) ?
- NUM_MPS_CLS_SRAM_L_INSTANCES :
- NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size;
/*
* If this is a new allocation, determine whether it should be
@@ -1310,7 +1306,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
if (ret == 0) {
p = &rpl.u.exact[0];
ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
- if (ret >= max_naddr)
+ if (ret >= max_mac_addr)
ret = -ENOMEM;
}
return ret;
@@ -1590,11 +1586,25 @@ int t4vf_prep_adapter(struct adapter *adapter)
switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) {
case CHELSIO_T4:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
+ adapter->params.arch.sge_fl_db = DBPRIO_F;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_CLS_SRAM_L_INSTANCES;
break;
case CHELSIO_T5:
chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
+ adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ break;
+
+ case CHELSIO_T6:
+ chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid);
+ adapter->params.arch.sge_fl_db = 0;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
break;
}
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 905ac5f5d..5ab912937 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_CIRRUS
default y
depends on ISA || EISA || ARM || MAC
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
@@ -23,9 +21,7 @@ config CS89x0
depends on ISA || EISA || ARM
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
- network (Ethernet) card of this type, say Y and read the
- Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto> as well as
+ network (Ethernet) card of this type, say Y and read the file
<file:Documentation/networking/cs89x0.txt>.
To compile this driver as a module, choose M here. The module
@@ -55,9 +51,7 @@ config MAC89x0
depends on MAC
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
- Nubus or LC-PDS network (Ethernet) card of this type, say Y and
- read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ Nubus or LC-PDS network (Ethernet) card of this type, say Y here.
To compile this driver as a module, choose M here. This module will
be called mac89x0.
diff --git a/drivers/net/ethernet/cisco/Kconfig b/drivers/net/ethernet/cisco/Kconfig
index 1c7b884e3..15b713a89 100644
--- a/drivers/net/ethernet/cisco/Kconfig
+++ b/drivers/net/ethernet/cisco/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_CISCO
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 0be6850be..d106186f4 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -5,7 +5,7 @@
#include <linux/in.h>
#include <linux/types.h>
#include <linux/skbuff.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
#include "enic_res.h"
#include "enic_clsf.h"
@@ -15,14 +15,14 @@
* @rq: rq number to steer to
*
* This function returns filter_id(hardware_id) of the filter
- * added. In case of error it returns an negative number.
+ * added. In case of error it returns a negative number.
*/
int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
{
int res;
struct filter data;
- switch (keys->ip_proto) {
+ switch (keys->basic.ip_proto) {
case IPPROTO_TCP:
data.u.ipv4.protocol = PROTO_TCP;
break;
@@ -33,10 +33,10 @@ int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
return -EPROTONOSUPPORT;
};
data.type = FILTER_IPV4_5TUPLE;
- data.u.ipv4.src_addr = ntohl(keys->src);
- data.u.ipv4.dst_addr = ntohl(keys->dst);
- data.u.ipv4.src_port = ntohs(keys->port16[0]);
- data.u.ipv4.dst_port = ntohs(keys->port16[1]);
+ data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src);
+ data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst);
+ data.u.ipv4.src_port = ntohs(keys->ports.src);
+ data.u.ipv4.dst_port = ntohs(keys->ports.dst);
data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
spin_lock_bh(&enic->devcmd_lock);
@@ -158,11 +158,11 @@ static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
struct enic_rfs_fltr_node *tpos;
hlist_for_each_entry(tpos, h, node)
- if (tpos->keys.src == k->src &&
- tpos->keys.dst == k->dst &&
- tpos->keys.ports == k->ports &&
- tpos->keys.ip_proto == k->ip_proto &&
- tpos->keys.n_proto == k->n_proto)
+ if (tpos->keys.addrs.v4addrs.src == k->addrs.v4addrs.src &&
+ tpos->keys.addrs.v4addrs.dst == k->addrs.v4addrs.dst &&
+ tpos->keys.ports.ports == k->ports.ports &&
+ tpos->keys.basic.ip_proto == k->basic.ip_proto &&
+ tpos->keys.basic.n_proto == k->basic.n_proto)
return tpos;
return NULL;
}
@@ -177,9 +177,10 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
int res, i;
enic = netdev_priv(dev);
- res = skb_flow_dissect(skb, &keys);
- if (!res || keys.n_proto != htons(ETH_P_IP) ||
- (keys.ip_proto != IPPROTO_TCP && keys.ip_proto != IPPROTO_UDP))
+ res = skb_flow_dissect_flow_keys(skb, &keys);
+ if (!res || keys.basic.n_proto != htons(ETH_P_IP) ||
+ (keys.basic.ip_proto != IPPROTO_TCP &&
+ keys.basic.ip_proto != IPPROTO_UDP))
return -EPROTONOSUPPORT;
tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 68d47b196..f3f1601a7 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -348,7 +348,7 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
n = htbl_fltr_search(enic, (u16)fsp->location);
if (!n)
return -EINVAL;
- switch (n->keys.ip_proto) {
+ switch (n->keys.basic.ip_proto) {
case IPPROTO_TCP:
fsp->flow_type = TCP_V4_FLOW;
break;
@@ -360,16 +360,16 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
break;
}
- fsp->h_u.tcp_ip4_spec.ip4src = n->keys.src;
+ fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
- fsp->h_u.tcp_ip4_spec.ip4dst = n->keys.dst;
+ fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
- fsp->h_u.tcp_ip4_spec.psrc = n->keys.port16[0];
+ fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
- fsp->h_u.tcp_ip4_spec.pdst = n->keys.port16[1];
+ fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst;
fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
fsp->ring_cookie = n->rq_id;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index eadae1b41..918a8e421 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1170,7 +1170,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
wq_work_done,
0 /* dont unmask intr */,
0 /* dont reset intr timer */);
- return rq_work_done;
+ return budget;
}
if (budget > 0)
@@ -1191,6 +1191,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
0 /* don't reset intr timer */);
err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+ enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
/* Buffer allocation failed. Stay in polling
* mode so we can try to fill the ring again.
@@ -1208,7 +1209,6 @@ static int enic_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
vnic_intr_unmask(&enic->intr[intr]);
}
- enic_poll_unlock_napi(&enic->rq[cq_rq]);
return rq_work_done;
}
@@ -1414,7 +1414,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
enic_calc_int_moderation(enic, &enic->rq[rq]);
- enic_poll_unlock_napi(&enic->rq[rq]);
+ enic_poll_unlock_napi(&enic->rq[rq], napi);
if (work_done < work_to_do) {
/* Some work done, but not enough to stay in polling,
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index 8111d5202..b9c82f143 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -21,6 +21,7 @@
#define _VNIC_RQ_H_
#include <linux/pci.h>
+#include <linux/netdevice.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
@@ -75,6 +76,12 @@ struct vnic_rq_buf {
uint64_t wr_id;
};
+enum enic_poll_state {
+ ENIC_POLL_STATE_IDLE,
+ ENIC_POLL_STATE_NAPI,
+ ENIC_POLL_STATE_POLL
+};
+
struct vnic_rq {
unsigned int index;
struct vnic_dev *vdev;
@@ -86,19 +93,7 @@ struct vnic_rq {
void *os_buf_head;
unsigned int pkts_outstanding;
#ifdef CONFIG_NET_RX_BUSY_POLL
-#define ENIC_POLL_STATE_IDLE 0
-#define ENIC_POLL_STATE_NAPI (1 << 0) /* NAPI owns this poll */
-#define ENIC_POLL_STATE_POLL (1 << 1) /* poll owns this poll */
-#define ENIC_POLL_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this poll */
-#define ENIC_POLL_STATE_POLL_YIELD (1 << 3) /* poll yielded this poll */
-#define ENIC_POLL_YIELD (ENIC_POLL_STATE_NAPI_YIELD | \
- ENIC_POLL_STATE_POLL_YIELD)
-#define ENIC_POLL_LOCKED (ENIC_POLL_STATE_NAPI | \
- ENIC_POLL_STATE_POLL)
-#define ENIC_POLL_USER_PEND (ENIC_POLL_STATE_POLL | \
- ENIC_POLL_STATE_POLL_YIELD)
- unsigned int bpoll_state;
- spinlock_t bpoll_lock;
+ atomic_t bpoll_state;
#endif /* CONFIG_NET_RX_BUSY_POLL */
};
@@ -215,76 +210,43 @@ static inline int vnic_rq_fill(struct vnic_rq *rq,
#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
{
- spin_lock_init(&rq->bpoll_lock);
- rq->bpoll_state = ENIC_POLL_STATE_IDLE;
+ atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
}
static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
{
- bool rc = true;
-
- spin_lock(&rq->bpoll_lock);
- if (rq->bpoll_state & ENIC_POLL_LOCKED) {
- WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
- rq->bpoll_state |= ENIC_POLL_STATE_NAPI_YIELD;
- rc = false;
- } else {
- rq->bpoll_state = ENIC_POLL_STATE_NAPI;
- }
- spin_unlock(&rq->bpoll_lock);
+ int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE,
+ ENIC_POLL_STATE_NAPI);
- return rc;
+ return (rc == ENIC_POLL_STATE_IDLE);
}
-static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
+static inline void enic_poll_unlock_napi(struct vnic_rq *rq,
+ struct napi_struct *napi)
{
- bool rc = false;
-
- spin_lock(&rq->bpoll_lock);
- WARN_ON(rq->bpoll_state &
- (ENIC_POLL_STATE_POLL | ENIC_POLL_STATE_NAPI_YIELD));
- if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
- rc = true;
- rq->bpoll_state = ENIC_POLL_STATE_IDLE;
- spin_unlock(&rq->bpoll_lock);
-
- return rc;
+ WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_NAPI);
+ napi_gro_flush(napi, false);
+ atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
}
static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
{
- bool rc = true;
-
- spin_lock_bh(&rq->bpoll_lock);
- if (rq->bpoll_state & ENIC_POLL_LOCKED) {
- rq->bpoll_state |= ENIC_POLL_STATE_POLL_YIELD;
- rc = false;
- } else {
- rq->bpoll_state |= ENIC_POLL_STATE_POLL;
- }
- spin_unlock_bh(&rq->bpoll_lock);
+ int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE,
+ ENIC_POLL_STATE_POLL);
- return rc;
+ return (rc == ENIC_POLL_STATE_IDLE);
}
-static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
-{
- bool rc = false;
- spin_lock_bh(&rq->bpoll_lock);
- WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
- if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
- rc = true;
- rq->bpoll_state = ENIC_POLL_STATE_IDLE;
- spin_unlock_bh(&rq->bpoll_lock);
-
- return rc;
+static inline void enic_poll_unlock_poll(struct vnic_rq *rq)
+{
+ WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_POLL);
+ atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
}
static inline bool enic_poll_busy_polling(struct vnic_rq *rq)
{
- WARN_ON(!(rq->bpoll_state & ENIC_POLL_LOCKED));
- return rq->bpoll_state & ENIC_POLL_USER_PEND;
+ return atomic_read(&rq->bpoll_state) & ENIC_POLL_STATE_POLL;
}
#else
@@ -298,7 +260,8 @@ static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
return true;
}
-static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
+static inline bool enic_poll_unlock_napi(struct vnic_rq *rq,
+ struct napi_struct *napi)
{
return false;
}
diff --git a/drivers/net/ethernet/dec/Kconfig b/drivers/net/ethernet/dec/Kconfig
index 68262aa57..740bbad5e 100644
--- a/drivers/net/ethernet/dec/Kconfig
+++ b/drivers/net/ethernet/dec/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_DEC
default y
depends on PCI || EISA || CARDBUS
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
index eb9ba6e97..1003201b5 100644
--- a/drivers/net/ethernet/dec/tulip/Kconfig
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -21,8 +21,7 @@ config DE2104X
of this type. (If your card is NOT SMC EtherPower 10/100 PCI
(smc9332dst), you can also try the driver for "Generic DECchip"
cards, below. However, most people with a network card of this type
- will say Y here.) Do read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ will say Y here.)
To compile this driver as a module, choose M here. The module will
be called de2104x.
@@ -50,8 +49,7 @@ config TULIP
of this type. (If your card is NOT SMC EtherPower 10/100 PCI
(smc9332dst), you can also try the driver for "Generic DECchip"
cards, above. However, most people with a network card of this type
- will say Y here.) Do read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ will say Y here.)
To compile this driver as a module, choose M here. The module will
be called tulip.
@@ -113,9 +111,7 @@ config DE4X5
---help---
This is support for the DIGITAL series of PCI/EISA Ethernet cards.
These include the DE425, DE434, DE435, DE450 and DE500 models. If
- you have a network card of this type, say Y and read the
- Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. More specific
+ you have a network card of this type, say Y. More specific
information is contained in
<file:Documentation/networking/de4x5.txt>.
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index badff181e..8966f3159 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -5189,16 +5189,16 @@ de4x5_parse_params(struct net_device *dev)
if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
- if (strstr(p, "TP")) {
- lp->params.autosense = TP;
- } else if (strstr(p, "TP_NW")) {
+ if (strstr(p, "TP_NW")) {
lp->params.autosense = TP_NW;
+ } else if (strstr(p, "TP")) {
+ lp->params.autosense = TP;
+ } else if (strstr(p, "BNC_AUI")) {
+ lp->params.autosense = BNC;
} else if (strstr(p, "BNC")) {
lp->params.autosense = BNC;
} else if (strstr(p, "AUI")) {
lp->params.autosense = AUI;
- } else if (strstr(p, "BNC_AUI")) {
- lp->params.autosense = BNC;
} else if (strstr(p, "10Mb")) {
lp->params.autosense = _10Mb;
} else if (strstr(p, "100Mb")) {
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 2c30c0c83..447d09272 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -1115,7 +1115,7 @@ static void uli526x_timer(unsigned long data)
netif_carrier_off(dev);
}
}
- db->init=0;
+ db->init = 0;
/* Timer active again */
db->timer.expires = ULI526X_TIMER_WUT;
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig
index c543ac11c..f6e858d0b 100644
--- a/drivers/net/ethernet/dlink/Kconfig
+++ b/drivers/net/ethernet/dlink/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_DLINK
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 1274b6fda..cf0a5fcda 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -463,10 +463,8 @@ rio_open (struct net_device *dev)
dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
}
- init_timer (&np->timer);
+ setup_timer(&np->timer, rio_timer, (unsigned long)dev);
np->timer.expires = jiffies + 1*HZ;
- np->timer.data = (unsigned long) dev;
- np->timer.function = rio_timer;
add_timer (&np->timer);
/* Start Tx/Rx */
diff --git a/drivers/net/ethernet/emulex/Kconfig b/drivers/net/ethernet/emulex/Kconfig
index 1b8d638c6..fdbb27ceb 100644
--- a/drivers/net/ethernet/emulex/Kconfig
+++ b/drivers/net/ethernet/emulex/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_EMULEX
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index ea94a8eb6..710856326 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -5,6 +5,15 @@ config BE2NET
This driver implements the NIC functionality for ServerEngines'
10Gbps network adapter - BladeEngine.
+config BE2NET_HWMON
+ bool "HWMON support for be2net driver"
+ depends on BE2NET && HWMON
+ depends on !(BE2NET=y && HWMON=m)
+ default y
+ ---help---
+ Say Y here if you want to expose thermal sensor data on
+ be2net network adapter.
+
config BE2NET_VXLAN
bool "VXLAN offload support on be2net driver"
default y
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 1bf1cdce7..8d12b41b3 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -31,11 +31,13 @@
#include <linux/slab.h>
#include <linux/u64_stats_sync.h>
#include <linux/cpumask.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "10.6.0.1"
+#define DRV_VER "10.6.0.2"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -314,7 +316,6 @@ struct be_rx_obj {
} ____cacheline_aligned_in_smp;
struct be_drv_stats {
- u32 be_on_die_temperature;
u32 eth_red_drops;
u32 dma_map_errors;
u32 rx_drops_no_pbuf;
@@ -366,6 +367,7 @@ struct be_vf_cfg {
u32 tx_rate;
u32 plink_tracking;
u32 privileges;
+ bool spoofchk;
};
enum vf_state {
@@ -382,6 +384,7 @@ enum vf_state {
#define BE_FLAGS_SETUP_DONE BIT(9)
#define BE_FLAGS_EVT_INCOMPATIBLE_SFP BIT(10)
#define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11)
+#define BE_FLAGS_OS2BMC BIT(12)
#define BE_UC_PMAC_COUNT 30
#define BE_VF_UC_PMAC_COUNT 2
@@ -426,6 +429,8 @@ struct be_resources {
u32 vf_if_cap_flags; /* VF if capability flags */
};
+#define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC)
+
struct rss_info {
u64 rss_flags;
u8 rsstable[RSS_INDIR_TABLE_LEN];
@@ -433,6 +438,12 @@ struct rss_info {
u8 rss_hkey[RSS_HASH_KEY_LEN];
};
+#define BE_INVALID_DIE_TEMP 0xFF
+struct be_hwmon {
+ struct device *hwmon_dev;
+ u8 be_on_die_temp; /* Unit: millidegree Celsius */
+};
+
/* Macros to read/write the 'features' word of be_wrb_params structure.
*/
#define BE_WRB_F_BIT(name) BE_WRB_F_##name##_BIT
@@ -453,7 +464,8 @@ enum {
BE_WRB_F_LSO_BIT, /* LSO */
BE_WRB_F_LSO6_BIT, /* LSO6 */
BE_WRB_F_VLAN_BIT, /* VLAN */
- BE_WRB_F_VLAN_SKIP_HW_BIT /* Skip VLAN tag (workaround) */
+ BE_WRB_F_VLAN_SKIP_HW_BIT, /* Skip VLAN tag (workaround) */
+ BE_WRB_F_OS2BMC_BIT /* Send packet to the management ring */
};
/* The structure below provides a HW-agnostic abstraction of WRB params
@@ -514,6 +526,7 @@ struct be_adapter {
u16 work_counter;
struct delayed_work be_err_detection_work;
+ u8 err_flags;
u32 flags;
u32 cmd_privileges;
/* Ethtool knobs and info */
@@ -572,8 +585,11 @@ struct be_adapter {
u16 qnq_vid;
u32 msg_enable;
int be_get_temp_freq;
+ struct be_hwmon hwmon_info;
u8 pf_number;
struct rss_info rss_info;
+ /* Filters for packets that need to be sent to BMC */
+ u32 bmc_filt_mask;
};
#define be_physfn(adapter) (!adapter->virtfn)
@@ -772,26 +788,36 @@ static inline bool is_ipv4_pkt(struct sk_buff *skb)
return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
}
-static inline bool be_multi_rxq(const struct be_adapter *adapter)
+#define BE_ERROR_EEH 1
+#define BE_ERROR_UE BIT(1)
+#define BE_ERROR_FW BIT(2)
+#define BE_ERROR_HW (BE_ERROR_EEH | BE_ERROR_UE)
+#define BE_ERROR_ANY (BE_ERROR_EEH | BE_ERROR_UE | BE_ERROR_FW)
+#define BE_CLEAR_ALL 0xFF
+
+static inline u8 be_check_error(struct be_adapter *adapter, u32 err_type)
{
- return adapter->num_rx_qs > 1;
+ return (adapter->err_flags & err_type);
}
-static inline bool be_error(struct be_adapter *adapter)
+static inline void be_set_error(struct be_adapter *adapter, int err_type)
{
- return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->err_flags |= err_type;
+ netif_carrier_off(netdev);
+
+ dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
}
-static inline bool be_hw_error(struct be_adapter *adapter)
+static inline void be_clear_error(struct be_adapter *adapter, int err_type)
{
- return adapter->eeh_error || adapter->hw_error;
+ adapter->err_flags &= ~err_type;
}
-static inline void be_clear_all_error(struct be_adapter *adapter)
+static inline bool be_multi_rxq(const struct be_adapter *adapter)
{
- adapter->eeh_error = false;
- adapter->hw_error = false;
- adapter->fw_timeout = false;
+ return adapter->num_rx_qs > 1;
}
void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
@@ -804,6 +830,7 @@ bool be_pause_supported(struct be_adapter *adapter);
u32 be_get_fw_log_level(struct be_adapter *adapter);
int be_update_queues(struct be_adapter *adapter);
int be_poll(struct napi_struct *napi, int budget);
+void be_eqd_update(struct be_adapter *adapter, bool force_update);
/*
* internal function to initialize-cleanup roce device.
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index c5e1d0ac7..9eac3227d 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -93,7 +93,7 @@ static void be_mcc_notify(struct be_adapter *adapter)
struct be_queue_info *mccq = &adapter->mcc_obj.q;
u32 val = 0;
- if (be_error(adapter))
+ if (be_check_error(adapter, BE_ERROR_ANY))
return;
val |= mccq->id & DB_MCCQ_RING_ID_MASK;
@@ -140,6 +140,7 @@ static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
if (base_status == MCC_STATUS_NOT_SUPPORTED ||
base_status == MCC_STATUS_ILLEGAL_REQUEST ||
addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
+ addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
(opcode == OPCODE_COMMON_WRITE_FLASHROM &&
(base_status == MCC_STATUS_ILLEGAL_FIELD ||
addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
@@ -191,10 +192,12 @@ static void be_async_cmd_process(struct be_adapter *adapter,
if (base_status == MCC_STATUS_SUCCESS) {
struct be_cmd_resp_get_cntl_addnl_attribs *resp =
(void *)resp_hdr;
- adapter->drv_stats.be_on_die_temperature =
+ adapter->hwmon_info.be_on_die_temp =
resp->on_die_temperature;
} else {
adapter->be_get_temp_freq = 0;
+ adapter->hwmon_info.be_on_die_temp =
+ BE_INVALID_DIE_TEMP;
}
return;
}
@@ -330,6 +333,21 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
}
}
+#define MGMT_ENABLE_MASK 0x4
+static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
+ u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);
+
+ if (evt_dw1 & MGMT_ENABLE_MASK) {
+ adapter->flags |= BE_FLAGS_OS2BMC;
+ adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
+ } else {
+ adapter->flags &= ~BE_FLAGS_OS2BMC;
+ }
+}
+
static void be_async_grp5_evt_process(struct be_adapter *adapter,
struct be_mcc_compl *compl)
{
@@ -346,6 +364,10 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
case ASYNC_EVENT_PVID_STATE:
be_async_grp5_pvid_state_process(adapter, compl);
break;
+ /* Async event to disable/enable os2bmc and/or mac-learning */
+ case ASYNC_EVENT_FW_CONTROL:
+ be_async_grp5_fw_control_process(adapter, compl);
+ break;
default:
break;
}
@@ -486,7 +508,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
for (i = 0; i < mcc_timeout; i++) {
- if (be_error(adapter))
+ if (be_check_error(adapter, BE_ERROR_ANY))
return -EIO;
local_bh_disable();
@@ -499,7 +521,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
}
if (i == mcc_timeout) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
- adapter->fw_timeout = true;
+ be_set_error(adapter, BE_ERROR_FW);
return -EIO;
}
return status;
@@ -538,7 +560,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
u32 ready;
do {
- if (be_error(adapter))
+ if (be_check_error(adapter, BE_ERROR_ANY))
return -EIO;
ready = ioread32(db);
@@ -551,7 +573,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
if (msecs > 4000) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
- adapter->fw_timeout = true;
+ be_set_error(adapter, BE_ERROR_FW);
be_detect_error(adapter);
return -1;
}
@@ -1457,7 +1479,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
*if_handle = le32_to_cpu(resp->interface_id);
/* Hack to retrieve VF's pmac-id on BE3 */
- if (BE3_chip(adapter) && !be_physfn(adapter))
+ if (BE3_chip(adapter) && be_virtfn(adapter))
adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
}
return status;
@@ -3156,7 +3178,7 @@ int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
}
int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
- u32 domain, u16 intf_id, u16 hsw_mode)
+ u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_set_hsw_config *req;
@@ -3192,6 +3214,14 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
ctxt, hsw_mode);
}
+ /* Enable/disable both mac and vlan spoof checking */
+ if (!BEx_chip(adapter) && spoofchk) {
+ AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
+ ctxt, spoofchk);
+ AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
+ ctxt, spoofchk);
+ }
+
be_dws_cpu_to_le(req->context, sizeof(req->context));
status = be_mcc_notify_wait(adapter);
@@ -3202,7 +3232,7 @@ err:
/* Get Hyper switch config */
int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
- u32 domain, u16 intf_id, u8 *mode)
+ u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_hsw_config *req;
@@ -3250,6 +3280,10 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
if (mode)
*mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
port_fwd_type, &resp->context);
+ if (spoofchk)
+ *spoofchk =
+ AMAP_GET_BITS(struct amap_get_hsw_resp_context,
+ spoofchk, &resp->context);
}
err:
@@ -3261,7 +3295,7 @@ static bool be_is_wol_excluded(struct be_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- if (!be_physfn(adapter))
+ if (be_virtfn(adapter))
return true;
switch (pdev->subsystem_device) {
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 1ec22300e..00e3a6b6b 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -65,7 +65,8 @@ enum mcc_base_status {
enum mcc_addl_status {
MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16,
MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d,
- MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a
+ MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a,
+ MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab
};
#define CQE_BASE_STATUS_MASK 0xFFFF
@@ -104,6 +105,7 @@ struct be_mcc_compl {
#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1
#define ASYNC_EVENT_CODE_SLIPORT 0x11
#define ASYNC_EVENT_PORT_MISCONFIG 0x9
+#define ASYNC_EVENT_FW_CONTROL 0x5
enum {
LINK_DOWN = 0x0,
@@ -180,6 +182,22 @@ struct be_async_event_misconfig_port {
u32 flags;
} __packed;
+#define BMC_FILT_BROADCAST_ARP BIT(0)
+#define BMC_FILT_BROADCAST_DHCP_CLIENT BIT(1)
+#define BMC_FILT_BROADCAST_DHCP_SERVER BIT(2)
+#define BMC_FILT_BROADCAST_NET_BIOS BIT(3)
+#define BMC_FILT_BROADCAST BIT(7)
+#define BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER BIT(8)
+#define BMC_FILT_MULTICAST_IPV6_RA BIT(9)
+#define BMC_FILT_MULTICAST_IPV6_RAS BIT(10)
+#define BMC_FILT_MULTICAST BIT(15)
+struct be_async_fw_control {
+ u32 event_data_word1;
+ u32 event_data_word2;
+ u32 evt_tag;
+ u32 event_data_word4;
+} __packed;
+
struct be_mcc_mailbox {
struct be_mcc_wrb wrb;
struct be_mcc_compl compl;
@@ -602,6 +620,11 @@ enum be_if_flags {
BE_IF_FLAGS_VLAN_PROMISCUOUS |\
BE_IF_FLAGS_MCAST_PROMISCUOUS)
+#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
+ BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
+
+#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
+
/* An RX interface is an object with one or more MAC addresses and
* filtering capabilities. */
struct be_cmd_req_if_create {
@@ -1109,10 +1132,6 @@ struct be_cmd_req_query_fw_cfg {
u32 rsvd[31];
};
-/* ASIC revisions */
-#define ASIC_REV_B0 0x10
-#define ASIC_REV_P2 0x11
-
struct be_cmd_resp_query_fw_cfg {
struct be_cmd_resp_hdr hdr;
u32 be_config_number;
@@ -1745,18 +1764,24 @@ struct be_cmd_req_set_mac_list {
#define PORT_FWD_TYPE_VEPA 0x3
#define PORT_FWD_TYPE_VEB 0x2
+#define ENABLE_MAC_SPOOFCHK 0x2
+#define DISABLE_MAC_SPOOFCHK 0x3
+
struct amap_set_hsw_context {
u8 interface_id[16];
- u8 rsvd0[14];
+ u8 rsvd0[8];
+ u8 mac_spoofchk[2];
+ u8 rsvd1[4];
u8 pvid_valid;
u8 pport;
- u8 rsvd1[6];
+ u8 rsvd2[6];
u8 port_fwd_type[3];
- u8 rsvd2[7];
+ u8 rsvd3[5];
+ u8 vlan_spoofchk[2];
u8 pvid[16];
- u8 rsvd3[32];
u8 rsvd4[32];
u8 rsvd5[32];
+ u8 rsvd6[32];
} __packed;
struct be_cmd_req_set_hsw_config {
@@ -1774,11 +1799,13 @@ struct amap_get_hsw_req_context {
struct amap_get_hsw_resp_context {
u8 rsvd0[6];
u8 port_fwd_type[3];
- u8 rsvd1[7];
+ u8 rsvd1[5];
+ u8 spoofchk;
+ u8 rsvd2;
u8 pvid[16];
- u8 rsvd2[32];
u8 rsvd3[32];
u8 rsvd4[32];
+ u8 rsvd5[32];
} __packed;
struct be_cmd_req_get_hsw_config {
@@ -2334,9 +2361,9 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count,
u32 domain);
int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom);
int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain,
- u16 intf_id, u16 hsw_mode);
+ u16 intf_id, u16 hsw_mode, u8 spoofchk);
int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain,
- u16 intf_id, u8 *mode);
+ u16 intf_id, u8 *mode, bool *spoofchk);
int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level);
int be_cmd_get_fw_log_level(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 2835dee5d..b2476dbfd 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -123,7 +123,6 @@ static const struct be_ethtool_stat et_stats[] = {
{DRVSTAT_INFO(dma_map_errors)},
/* Number of packets dropped due to random early drop function */
{DRVSTAT_INFO(eth_red_drops)},
- {DRVSTAT_INFO(be_on_die_temperature)},
{DRVSTAT_INFO(rx_roce_bytes_lsd)},
{DRVSTAT_INFO(rx_roce_bytes_msd)},
{DRVSTAT_INFO(rx_roce_frames)},
@@ -368,6 +367,14 @@ static int be_set_coalesce(struct net_device *netdev,
aic++;
}
+ /* For Skyhawk, the EQD setting happens via EQ_DB when AIC is enabled.
+ * When AIC is disabled, persistently force set EQD value via the
+ * FW cmd, so that we don't have to calculate the delay multiplier
+ * encode value each time EQ_DB is rung
+ */
+ if (!et->use_adaptive_rx_coalesce && skyhawk_chip(adapter))
+ be_eqd_update(adapter, true);
+
return 0;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 48840889d..c684bb32b 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -132,6 +132,18 @@
#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
/* Rearm bit */
#define DB_EQ_REARM_SHIFT (29) /* bit 29 */
+/* Rearm to interrupt delay encoding */
+#define DB_EQ_R2I_DLY_SHIFT (30) /* bits 30 - 31 */
+
+/* Rearm to interrupt (R2I) delay multiplier encoding represents 3 different
+ * values configured in CEV_REARM2IRPT_DLY_MULT_CSR register. This value is
+ * programmed by host driver while ringing an EQ doorbell(EQ_DB) if a delay
+ * between rearming the EQ and next interrupt on this EQ is desired.
+ */
+#define R2I_DLY_ENC_0 0 /* No delay */
+#define R2I_DLY_ENC_1 1 /* maps to 160us EQ delay */
+#define R2I_DLY_ENC_2 2 /* maps to 96us EQ delay */
+#define R2I_DLY_ENC_3 3 /* maps to 48us EQ delay */
/********* Compl Q door bell *************/
#define DB_CQ_OFFSET 0x120
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index e43cc8a73..6ca693b03 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -179,7 +179,7 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
if (lancer_chip(adapter))
return;
- if (adapter->eeh_error)
+ if (be_check_error(adapter, BE_ERROR_EEH))
return;
status = be_cmd_intr_set(adapter, enable);
@@ -191,6 +191,9 @@ static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
{
u32 val = 0;
+ if (be_check_error(adapter, BE_ERROR_HW))
+ return;
+
val |= qid & DB_RQ_RING_ID_MASK;
val |= posted << DB_RQ_NUM_POSTED_SHIFT;
@@ -203,6 +206,9 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
{
u32 val = 0;
+ if (be_check_error(adapter, BE_ERROR_HW))
+ return;
+
val |= txo->q.id & DB_TXULP_RING_ID_MASK;
val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
@@ -211,14 +217,15 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
}
static void be_eq_notify(struct be_adapter *adapter, u16 qid,
- bool arm, bool clear_int, u16 num_popped)
+ bool arm, bool clear_int, u16 num_popped,
+ u32 eq_delay_mult_enc)
{
u32 val = 0;
val |= qid & DB_EQ_RING_ID_MASK;
val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
- if (adapter->eeh_error)
+ if (be_check_error(adapter, BE_ERROR_HW))
return;
if (arm)
@@ -227,6 +234,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
val |= 1 << DB_EQ_CLR_SHIFT;
val |= 1 << DB_EQ_EVNT_SHIFT;
val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
+ val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
iowrite32(val, adapter->db + DB_EQ_OFFSET);
}
@@ -238,7 +246,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
DB_CQ_RING_ID_EXT_MASK_SHIFT);
- if (adapter->eeh_error)
+ if (be_check_error(adapter, BE_ERROR_HW))
return;
if (arm)
@@ -265,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
return 0;
+ /* if device is not running, copy MAC to netdev->dev_addr */
+ if (!netif_running(netdev))
+ goto done;
+
/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
* privilege or if PF did not provision the new MAC address.
* On BE3, this cmd will always fail if the VF doesn't have the
@@ -299,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
status = -EPERM;
goto err;
}
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- dev_info(dev, "MAC address changed to %pM\n", mac);
+done:
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
return 0;
err:
dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
@@ -662,6 +674,8 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
netif_carrier_on(netdev);
else
netif_carrier_off(netdev);
+
+ netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
}
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
@@ -810,6 +824,8 @@ static void wrb_fill_hdr(struct be_adapter *adapter,
SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
+ SET_TX_WRB_HDR_BITS(mgmt, hdr,
+ BE_WRB_F_GET(wrb_params->features, OS2BMC));
}
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
@@ -1146,6 +1162,130 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
txo->pend_wrb_cnt = 0;
}
+/* OS2BMC related */
+
+#define DHCP_CLIENT_PORT 68
+#define DHCP_SERVER_PORT 67
+#define NET_BIOS_PORT1 137
+#define NET_BIOS_PORT2 138
+#define DHCPV6_RAS_PORT 547
+
+#define is_mc_allowed_on_bmc(adapter, eh) \
+ (!is_multicast_filt_enabled(adapter) && \
+ is_multicast_ether_addr(eh->h_dest) && \
+ !is_broadcast_ether_addr(eh->h_dest))
+
+#define is_bc_allowed_on_bmc(adapter, eh) \
+ (!is_broadcast_filt_enabled(adapter) && \
+ is_broadcast_ether_addr(eh->h_dest))
+
+#define is_arp_allowed_on_bmc(adapter, skb) \
+ (is_arp(skb) && is_arp_filt_enabled(adapter))
+
+#define is_broadcast_packet(eh, adapter) \
+ (is_multicast_ether_addr(eh->h_dest) && \
+ !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
+
+#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
+
+#define is_arp_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
+
+#define is_dhcp_client_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
+
+#define is_dhcp_srvr_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
+
+#define is_nbios_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
+
+#define is_ipv6_na_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & \
+ BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
+
+#define is_ipv6_ra_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
+
+#define is_ipv6_ras_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
+
+#define is_broadcast_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
+
+#define is_multicast_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
+
+static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
+ struct sk_buff **skb)
+{
+ struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
+ bool os2bmc = false;
+
+ if (!be_is_os2bmc_enabled(adapter))
+ goto done;
+
+ if (!is_multicast_ether_addr(eh->h_dest))
+ goto done;
+
+ if (is_mc_allowed_on_bmc(adapter, eh) ||
+ is_bc_allowed_on_bmc(adapter, eh) ||
+ is_arp_allowed_on_bmc(adapter, (*skb))) {
+ os2bmc = true;
+ goto done;
+ }
+
+ if ((*skb)->protocol == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *hdr = ipv6_hdr((*skb));
+ u8 nexthdr = hdr->nexthdr;
+
+ if (nexthdr == IPPROTO_ICMPV6) {
+ struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
+
+ switch (icmp6->icmp6_type) {
+ case NDISC_ROUTER_ADVERTISEMENT:
+ os2bmc = is_ipv6_ra_filt_enabled(adapter);
+ goto done;
+ case NDISC_NEIGHBOUR_ADVERTISEMENT:
+ os2bmc = is_ipv6_na_filt_enabled(adapter);
+ goto done;
+ default:
+ break;
+ }
+ }
+ }
+
+ if (is_udp_pkt((*skb))) {
+ struct udphdr *udp = udp_hdr((*skb));
+
+ switch (udp->dest) {
+ case DHCP_CLIENT_PORT:
+ os2bmc = is_dhcp_client_filt_enabled(adapter);
+ goto done;
+ case DHCP_SERVER_PORT:
+ os2bmc = is_dhcp_srvr_filt_enabled(adapter);
+ goto done;
+ case NET_BIOS_PORT1:
+ case NET_BIOS_PORT2:
+ os2bmc = is_nbios_filt_enabled(adapter);
+ goto done;
+ case DHCPV6_RAS_PORT:
+ os2bmc = is_ipv6_ras_filt_enabled(adapter);
+ goto done;
+ default:
+ break;
+ }
+ }
+done:
+ /* For packets over a vlan, which are destined
+ * to BMC, asic expects the vlan to be inline in the packet.
+ */
+ if (os2bmc)
+ *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
+
+ return os2bmc;
+}
+
static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1167,6 +1307,18 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
goto drop;
}
+ /* if os2bmc is enabled and if the pkt is destined to bmc,
+ * enqueue the pkt a 2nd time with mgmt bit set.
+ */
+ if (be_send_pkt_to_bmc(adapter, &skb)) {
+ BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
+ wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
+ if (unlikely(!wrb_cnt))
+ goto drop;
+ else
+ skb_get(skb);
+ }
+
if (be_is_txq_full(txo)) {
netif_stop_subqueue(netdev, q_idx);
tx_stats(txo)->tx_stops++;
@@ -1265,7 +1417,8 @@ static int be_vid_config(struct be_adapter *adapter)
if (status) {
dev_err(dev, "Setting HW VLAN filtering failed\n");
/* Set to VLAN promisc mode as setting VLAN filter failed */
- if (addl_status(status) ==
+ if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
+ addl_status(status) ==
MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
return be_set_vlan_promisc(adapter);
} else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
@@ -1466,6 +1619,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
+ vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
return 0;
}
@@ -1478,7 +1632,7 @@ static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
int status;
/* Enable Transparent VLAN Tagging */
- status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
+ status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
if (status)
return status;
@@ -1507,7 +1661,7 @@ static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
/* Reset Transparent VLAN Tagging. */
status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
- vf_cfg->if_handle, 0);
+ vf_cfg->if_handle, 0, 0);
if (status)
return status;
@@ -1642,6 +1796,39 @@ static int be_set_vf_link_state(struct net_device *netdev, int vf,
return 0;
}
+static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+ u8 spoofchk;
+ int status;
+
+ if (!sriov_enabled(adapter))
+ return -EPERM;
+
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ if (BEx_chip(adapter))
+ return -EOPNOTSUPP;
+
+ if (enable == vf_cfg->spoofchk)
+ return 0;
+
+ spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
+
+ status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
+ 0, spoofchk);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Spoofchk change on VF %d failed: %#x\n", vf, status);
+ return be_cmd_status(status);
+ }
+
+ vf_cfg->spoofchk = enable;
+ return 0;
+}
+
static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
ulong now)
{
@@ -1650,61 +1837,110 @@ static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
aic->jiffies = now;
}
-static void be_eqd_update(struct be_adapter *adapter)
+static int be_get_new_eqd(struct be_eq_obj *eqo)
{
- struct be_set_eqd set_eqd[MAX_EVT_QS];
- int eqd, i, num = 0, start;
+ struct be_adapter *adapter = eqo->adapter;
+ int eqd, start;
struct be_aic_obj *aic;
- struct be_eq_obj *eqo;
struct be_rx_obj *rxo;
struct be_tx_obj *txo;
- u64 rx_pkts, tx_pkts;
+ u64 rx_pkts = 0, tx_pkts = 0;
ulong now;
u32 pps, delta;
+ int i;
- for_all_evt_queues(adapter, eqo, i) {
- aic = &adapter->aic_obj[eqo->idx];
- if (!aic->enable) {
- if (aic->jiffies)
- aic->jiffies = 0;
- eqd = aic->et_eqd;
- goto modify_eqd;
- }
+ aic = &adapter->aic_obj[eqo->idx];
+ if (!aic->enable) {
+ if (aic->jiffies)
+ aic->jiffies = 0;
+ eqd = aic->et_eqd;
+ return eqd;
+ }
- rxo = &adapter->rx_obj[eqo->idx];
+ for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
do {
start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
- rx_pkts = rxo->stats.rx_pkts;
+ rx_pkts += rxo->stats.rx_pkts;
} while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
+ }
- txo = &adapter->tx_obj[eqo->idx];
+ for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
do {
start = u64_stats_fetch_begin_irq(&txo->stats.sync);
- tx_pkts = txo->stats.tx_reqs;
+ tx_pkts += txo->stats.tx_reqs;
} while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
+ }
- /* Skip, if wrapped around or first calculation */
- now = jiffies;
- if (!aic->jiffies || time_before(now, aic->jiffies) ||
- rx_pkts < aic->rx_pkts_prev ||
- tx_pkts < aic->tx_reqs_prev) {
- be_aic_update(aic, rx_pkts, tx_pkts, now);
- continue;
- }
+ /* Skip, if wrapped around or first calculation */
+ now = jiffies;
+ if (!aic->jiffies || time_before(now, aic->jiffies) ||
+ rx_pkts < aic->rx_pkts_prev ||
+ tx_pkts < aic->tx_reqs_prev) {
+ be_aic_update(aic, rx_pkts, tx_pkts, now);
+ return aic->prev_eqd;
+ }
- delta = jiffies_to_msecs(now - aic->jiffies);
- pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
- (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
- eqd = (pps / 15000) << 2;
+ delta = jiffies_to_msecs(now - aic->jiffies);
+ if (delta == 0)
+ return aic->prev_eqd;
- if (eqd < 8)
- eqd = 0;
- eqd = min_t(u32, eqd, aic->max_eqd);
- eqd = max_t(u32, eqd, aic->min_eqd);
+ pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
+ (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
+ eqd = (pps / 15000) << 2;
- be_aic_update(aic, rx_pkts, tx_pkts, now);
-modify_eqd:
- if (eqd != aic->prev_eqd) {
+ if (eqd < 8)
+ eqd = 0;
+ eqd = min_t(u32, eqd, aic->max_eqd);
+ eqd = max_t(u32, eqd, aic->min_eqd);
+
+ be_aic_update(aic, rx_pkts, tx_pkts, now);
+
+ return eqd;
+}
+
+/* For Skyhawk-R only */
+static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
+{
+ struct be_adapter *adapter = eqo->adapter;
+ struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
+ ulong now = jiffies;
+ int eqd;
+ u32 mult_enc;
+
+ if (!aic->enable)
+ return 0;
+
+ if (time_before_eq(now, aic->jiffies) ||
+ jiffies_to_msecs(now - aic->jiffies) < 1)
+ eqd = aic->prev_eqd;
+ else
+ eqd = be_get_new_eqd(eqo);
+
+ if (eqd > 100)
+ mult_enc = R2I_DLY_ENC_1;
+ else if (eqd > 60)
+ mult_enc = R2I_DLY_ENC_2;
+ else if (eqd > 20)
+ mult_enc = R2I_DLY_ENC_3;
+ else
+ mult_enc = R2I_DLY_ENC_0;
+
+ aic->prev_eqd = eqd;
+
+ return mult_enc;
+}
+
+void be_eqd_update(struct be_adapter *adapter, bool force_update)
+{
+ struct be_set_eqd set_eqd[MAX_EVT_QS];
+ struct be_aic_obj *aic;
+ struct be_eq_obj *eqo;
+ int i, num = 0, eqd;
+
+ for_all_evt_queues(adapter, eqo, i) {
+ aic = &adapter->aic_obj[eqo->idx];
+ eqd = be_get_new_eqd(eqo);
+ if (force_update || eqd != aic->prev_eqd) {
set_eqd[num].delay_multiplier = (eqd * 65)/100;
set_eqd[num].eq_id = eqo->q.id;
aic->prev_eqd = eqd;
@@ -2212,13 +2448,27 @@ static void be_eq_clean(struct be_eq_obj *eqo)
{
int num = events_get(eqo);
- be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
}
-static void be_rx_cq_clean(struct be_rx_obj *rxo)
+/* Free posted rx buffers that were not used */
+static void be_rxq_clean(struct be_rx_obj *rxo)
{
- struct be_rx_page_info *page_info;
struct be_queue_info *rxq = &rxo->q;
+ struct be_rx_page_info *page_info;
+
+ while (atomic_read(&rxq->used) > 0) {
+ page_info = get_rx_page_info(rxo);
+ put_page(page_info->page);
+ memset(page_info, 0, sizeof(*page_info));
+ }
+ BUG_ON(atomic_read(&rxq->used));
+ rxq->tail = 0;
+ rxq->head = 0;
+}
+
+static void be_rx_cq_clean(struct be_rx_obj *rxo)
+{
struct be_queue_info *rx_cq = &rxo->cq;
struct be_rx_compl_info *rxcp;
struct be_adapter *adapter = rxo->adapter;
@@ -2236,7 +2486,9 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
if (lancer_chip(adapter))
break;
- if (flush_wait++ > 10 || be_hw_error(adapter)) {
+ if (flush_wait++ > 50 ||
+ be_check_error(adapter,
+ BE_ERROR_HW)) {
dev_warn(&adapter->pdev->dev,
"did not receive flush compl\n");
break;
@@ -2253,16 +2505,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
/* After cleanup, leave the CQ in unarmed state */
be_cq_notify(adapter, rx_cq->id, false, 0);
-
- /* Then free posted rx buffers that were not used */
- while (atomic_read(&rxq->used) > 0) {
- page_info = get_rx_page_info(rxo);
- put_page(page_info->page);
- memset(page_info, 0, sizeof(*page_info));
- }
- BUG_ON(atomic_read(&rxq->used));
- rxq->tail = 0;
- rxq->head = 0;
}
static void be_tx_compl_clean(struct be_adapter *adapter)
@@ -2297,7 +2539,8 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
pending_txqs--;
}
- if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
+ if (pending_txqs == 0 || ++timeo > 10 ||
+ be_check_error(adapter, BE_ERROR_HW))
break;
mdelay(1);
@@ -2341,8 +2584,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
napi_hash_del(&eqo->napi);
netif_napi_del(&eqo->napi);
+ free_cpumask_var(eqo->affinity_mask);
}
- free_cpumask_var(eqo->affinity_mask);
be_queue_free(adapter, &eqo->q);
}
}
@@ -2359,13 +2602,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
for_all_evt_queues(adapter, eqo, i) {
int numa_node = dev_to_node(&adapter->pdev->dev);
- if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
- return -ENOMEM;
- cpumask_set_cpu(cpumask_local_spread(i, numa_node),
- eqo->affinity_mask);
- netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
- BE_NAPI_WEIGHT);
- napi_hash_add(&eqo->napi);
+
aic = &adapter->aic_obj[i];
eqo->adapter = adapter;
eqo->idx = i;
@@ -2381,6 +2618,14 @@ static int be_evt_queues_create(struct be_adapter *adapter)
rc = be_cmd_eq_create(adapter, eqo);
if (rc)
return rc;
+
+ if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ eqo->affinity_mask);
+ netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
+ BE_NAPI_WEIGHT);
+ napi_hash_add(&eqo->napi);
}
return 0;
}
@@ -2573,7 +2818,7 @@ static irqreturn_t be_intx(int irq, void *dev)
if (num_evts)
eqo->spurious_intr = 0;
}
- be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
+ be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
/* Return IRQ_HANDLED only for the the first spurious intr
* after a valid intr to stop the kernel from branding
@@ -2589,7 +2834,7 @@ static irqreturn_t be_msix(int irq, void *dev)
{
struct be_eq_obj *eqo = dev;
- be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
napi_schedule(&eqo->napi);
return IRQ_HANDLED;
}
@@ -2838,6 +3083,7 @@ int be_poll(struct napi_struct *napi, int budget)
int max_work = 0, work, i, num_evts;
struct be_rx_obj *rxo;
struct be_tx_obj *txo;
+ u32 mult_enc = 0;
num_evts = events_get(eqo);
@@ -2863,10 +3109,18 @@ int be_poll(struct napi_struct *napi, int budget)
if (max_work < budget) {
napi_complete(napi);
- be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
+
+ /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
+ * delay via a delay multiplier encoding value
+ */
+ if (skyhawk_chip(adapter))
+ mult_enc = be_get_eq_delay_mult_enc(eqo);
+
+ be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
+ mult_enc);
} else {
/* As we'll continue in polling mode, count and clear events */
- be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
+ be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
}
return max_work;
}
@@ -2898,22 +3152,19 @@ void be_detect_error(struct be_adapter *adapter)
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
- bool error_detected = false;
struct device *dev = &adapter->pdev->dev;
- struct net_device *netdev = adapter->netdev;
- if (be_hw_error(adapter))
+ if (be_check_error(adapter, BE_ERROR_HW))
return;
if (lancer_chip(adapter)) {
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+ be_set_error(adapter, BE_ERROR_UE);
sliport_err1 = ioread32(adapter->db +
SLIPORT_ERROR1_OFFSET);
sliport_err2 = ioread32(adapter->db +
SLIPORT_ERROR2_OFFSET);
- adapter->hw_error = true;
- error_detected = true;
/* Do not log error messages if its a FW reset */
if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
@@ -2945,12 +3196,12 @@ void be_detect_error(struct be_adapter *adapter)
*/
if (ue_lo || ue_hi) {
- error_detected = true;
dev_err(dev,
"Unrecoverable Error detected in the adapter");
dev_err(dev, "Please reboot server to recover");
if (skyhawk_chip(adapter))
- adapter->hw_error = true;
+ be_set_error(adapter, BE_ERROR_UE);
+
for (i = 0; ue_lo; ue_lo >>= 1, i++) {
if (ue_lo & 1)
dev_err(dev, "UE: %s bit set\n",
@@ -2963,8 +3214,6 @@ void be_detect_error(struct be_adapter *adapter)
}
}
}
- if (error_detected)
- netif_carrier_off(netdev);
}
static void be_msix_disable(struct be_adapter *adapter)
@@ -3015,7 +3264,7 @@ fail:
dev_warn(dev, "MSIx enable failed\n");
/* INTx is not supported in VFs, so fail probe if enable_msix fails */
- if (!be_physfn(adapter))
+ if (be_virtfn(adapter))
return num_vec;
return 0;
}
@@ -3062,7 +3311,7 @@ static int be_irq_register(struct be_adapter *adapter)
if (status == 0)
goto done;
/* INTx is not supported for VF */
- if (!be_physfn(adapter))
+ if (be_virtfn(adapter))
return status;
}
@@ -3115,13 +3364,54 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
for_all_rx_queues(adapter, rxo, i) {
q = &rxo->q;
if (q->created) {
+ /* If RXQs are destroyed while in an "out of buffer"
+ * state, there is a possibility of an HW stall on
+ * Lancer. So, post 64 buffers to each queue to relieve
+ * the "out of buffer" condition.
+ * Make sure there's space in the RXQ before posting.
+ */
+ if (lancer_chip(adapter)) {
+ be_rx_cq_clean(rxo);
+ if (atomic_read(&q->used) == 0)
+ be_post_rx_frags(rxo, GFP_KERNEL,
+ MAX_RX_POST);
+ }
+
be_cmd_rxq_destroy(adapter, q);
be_rx_cq_clean(rxo);
+ be_rxq_clean(rxo);
}
be_queue_free(adapter, q);
}
}
+static void be_disable_if_filters(struct be_adapter *adapter)
+{
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id[0], 0);
+
+ be_clear_uc_list(adapter);
+
+ /* The IFACE flags are enabled in the open path and cleared
+ * in the close path. When a VF gets detached from the host and
+ * assigned to a VM the following happens:
+ * - VF's IFACE flags get cleared in the detach path
+ * - IFACE create is issued by the VF in the attach path
+ * Due to a bug in the BE3/Skyhawk-R FW
+ * (Lancer FW doesn't have the bug), the IFACE capability flags
+ * specified along with the IFACE create cmd issued by a VF are not
+ * honoured by FW. As a consequence, if a *new* driver
+ * (that enables/disables IFACE flags in open/close)
+ * is loaded in the host and an *old* driver is * used by a VM/VF,
+ * the IFACE gets created *without* the needed flags.
+ * To avoid this, disable RX-filter flags only for Lancer.
+ */
+ if (lancer_chip(adapter)) {
+ be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
+ adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
+ }
+}
+
static int be_close(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -3134,6 +3424,8 @@ static int be_close(struct net_device *netdev)
if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
return 0;
+ be_disable_if_filters(adapter);
+
be_roce_dev_close(adapter);
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3153,7 +3445,6 @@ static int be_close(struct net_device *netdev)
be_tx_compl_clean(adapter);
be_rx_qs_destroy(adapter);
- be_clear_uc_list(adapter);
for_all_evt_queues(adapter, eqo, i) {
if (msix_enabled(adapter))
@@ -3229,9 +3520,37 @@ static int be_rx_qs_create(struct be_adapter *adapter)
memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
- /* First time posting */
+ /* Post 1 less than RXQ-len to avoid head being equal to tail,
+ * which is a queue empty condition
+ */
for_all_rx_queues(adapter, rxo, i)
- be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
+ be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
+
+ return 0;
+}
+
+static int be_enable_if_filters(struct be_adapter *adapter)
+{
+ int status;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
+ if (status)
+ return status;
+
+ /* For BE3 VFs, the PF programs the initial MAC address */
+ if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
+ status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
+ adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+ if (status)
+ return status;
+ }
+
+ if (adapter->vlans_added)
+ be_vid_config(adapter);
+
+ be_set_rx_mode(adapter->netdev);
+
return 0;
}
@@ -3248,6 +3567,10 @@ static int be_open(struct net_device *netdev)
if (status)
goto err;
+ status = be_enable_if_filters(adapter);
+ if (status)
+ goto err;
+
status = be_irq_register(adapter);
if (status)
goto err;
@@ -3263,7 +3586,7 @@ static int be_open(struct net_device *netdev)
for_all_evt_queues(adapter, eqo, i) {
napi_enable(&eqo->napi);
be_enable_busy_poll(eqo);
- be_eq_notify(adapter, eqo->q.id, true, true, 0);
+ be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
}
adapter->flags |= BE_FLAGS_NAPI_ENABLED;
@@ -3444,16 +3767,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
}
}
-static void be_mac_clear(struct be_adapter *adapter)
-{
- if (adapter->pmac_id) {
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[0], 0);
- kfree(adapter->pmac_id);
- adapter->pmac_id = NULL;
- }
-}
-
#ifdef CONFIG_BE2NET_VXLAN
static void be_disable_vxlan_offloads(struct be_adapter *adapter)
{
@@ -3528,8 +3841,8 @@ static int be_clear(struct be_adapter *adapter)
#ifdef CONFIG_BE2NET_VXLAN
be_disable_vxlan_offloads(adapter);
#endif
- /* delete the primary mac along with the uc-mac list */
- be_mac_clear(adapter);
+ kfree(adapter->pmac_id);
+ adapter->pmac_id = NULL;
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
@@ -3540,30 +3853,16 @@ static int be_clear(struct be_adapter *adapter)
return 0;
}
-static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
- u32 cap_flags, u32 vf)
-{
- u32 en_flags;
-
- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
- BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
-
- en_flags &= cap_flags;
-
- return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
-}
-
static int be_vfs_if_create(struct be_adapter *adapter)
{
struct be_resources res = {0};
+ u32 cap_flags, en_flags, vf;
struct be_vf_cfg *vf_cfg;
- u32 cap_flags, vf;
int status;
/* If a FW profile exists, then cap_flags are updated */
cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST;
+ BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
for_all_vfs(adapter, vf_cfg, vf) {
if (!BE3_chip(adapter)) {
@@ -3579,8 +3878,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
}
}
- status = be_if_create(adapter, &vf_cfg->if_handle,
- cap_flags, vf + 1);
+ en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST |
+ BE_IF_FLAGS_PASS_L3L4_ERRORS);
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
if (status)
return status;
}
@@ -3610,6 +3913,7 @@ static int be_vf_setup(struct be_adapter *adapter)
struct device *dev = &adapter->pdev->dev;
struct be_vf_cfg *vf_cfg;
int status, old_vfs, vf;
+ bool spoofchk;
old_vfs = pci_num_vf(adapter->pdev);
@@ -3657,6 +3961,12 @@ static int be_vf_setup(struct be_adapter *adapter)
if (!old_vfs)
be_cmd_config_qos(adapter, 0, 0, vf + 1);
+ status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
+ vf_cfg->if_handle, NULL,
+ &spoofchk);
+ if (!status)
+ vf_cfg->spoofchk = spoofchk;
+
if (!old_vfs) {
be_cmd_enable_vf(adapter, vf + 1);
be_cmd_set_logical_link_config(adapter,
@@ -3733,8 +4043,9 @@ static void BEx_get_resources(struct be_adapter *adapter,
* *only* if it is RSS-capable.
*/
if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
- !be_physfn(adapter) || (be_is_mc(adapter) &&
- !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
+ be_virtfn(adapter) ||
+ (be_is_mc(adapter) &&
+ !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
res->max_tx_qs = 1;
} else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
struct be_resources super_nic_res = {0};
@@ -3944,15 +4255,8 @@ static int be_mac_setup(struct be_adapter *adapter)
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- } else {
- /* Maybe the HW was reset; dev_addr must be re-programmed */
- memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
}
- /* For BE3-R VFs, the PF programs the initial MAC address */
- if (!(BEx_chip(adapter) && be_virtfn(adapter)))
- be_cmd_pmac_add(adapter, mac, adapter->if_handle,
- &adapter->pmac_id[0], 0);
return 0;
}
@@ -4075,7 +4379,7 @@ static int be_func_init(struct be_adapter *adapter)
msleep(100);
/* We can clear all errors when function reset succeeds */
- be_clear_all_error(adapter);
+ be_clear_error(adapter, BE_CLEAR_ALL);
}
/* Tell FW we're ready to fire cmds */
@@ -4092,6 +4396,7 @@ static int be_func_init(struct be_adapter *adapter)
static int be_setup(struct be_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
+ u32 en_flags;
int status;
status = be_func_init(adapter);
@@ -4114,8 +4419,11 @@ static int be_setup(struct be_adapter *adapter)
if (status)
goto err;
- status = be_if_create(adapter, &adapter->if_handle,
- be_if_cap_flags(adapter), 0);
+ /* will enable all the needed filter flags in be_open() */
+ en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
+ en_flags = en_flags & be_if_cap_flags(adapter);
+ status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
+ &adapter->if_handle, 0);
if (status)
goto err;
@@ -4141,11 +4449,6 @@ static int be_setup(struct be_adapter *adapter)
dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
}
- if (adapter->vlans_added)
- be_vid_config(adapter);
-
- be_set_rx_mode(adapter->netdev);
-
status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
if (status)
@@ -4182,7 +4485,7 @@ static void be_netpoll(struct net_device *netdev)
int i;
for_all_evt_queues(adapter, eqo, i) {
- be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
napi_schedule(&eqo->napi);
}
}
@@ -4666,14 +4969,11 @@ static int lancer_fw_download(struct be_adapter *adapter,
return 0;
}
-#define BE2_UFI 2
-#define BE3_UFI 3
-#define BE3R_UFI 10
-#define SH_UFI 4
-#define SH_P2_UFI 11
-
-static int be_get_ufi_type(struct be_adapter *adapter,
- struct flash_file_hdr_g3 *fhdr)
+/* Check if the flash image file is compatible with the adapter that
+ * is being flashed.
+ */
+static bool be_check_ufi_compatibility(struct be_adapter *adapter,
+ struct flash_file_hdr_g3 *fhdr)
{
if (!fhdr) {
dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
@@ -4685,43 +4985,22 @@ static int be_get_ufi_type(struct be_adapter *adapter,
*/
switch (fhdr->build[0]) {
case BLD_STR_UFI_TYPE_SH:
- return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
- SH_UFI;
+ if (!skyhawk_chip(adapter))
+ return false;
+ break;
case BLD_STR_UFI_TYPE_BE3:
- return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
- BE3_UFI;
+ if (!BE3_chip(adapter))
+ return false;
+ break;
case BLD_STR_UFI_TYPE_BE2:
- return BE2_UFI;
- default:
- return -1;
- }
-}
-
-/* Check if the flash image file is compatible with the adapter that
- * is being flashed.
- * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
- * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
- */
-static bool be_check_ufi_compatibility(struct be_adapter *adapter,
- struct flash_file_hdr_g3 *fhdr)
-{
- int ufi_type = be_get_ufi_type(adapter, fhdr);
-
- switch (ufi_type) {
- case SH_P2_UFI:
- return skyhawk_chip(adapter);
- case SH_UFI:
- return (skyhawk_chip(adapter) &&
- adapter->asic_rev < ASIC_REV_P2);
- case BE3R_UFI:
- return BE3_chip(adapter);
- case BE3_UFI:
- return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
- case BE2_UFI:
- return BE2_chip(adapter);
+ if (!BE2_chip(adapter))
+ return false;
+ break;
default:
return false;
}
+
+ return (fhdr->asic_type_rev >= adapter->asic_rev);
}
static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
@@ -4829,7 +5108,7 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
adapter->if_handle,
mode == BRIDGE_MODE_VEPA ?
PORT_FWD_TYPE_VEPA :
- PORT_FWD_TYPE_VEB);
+ PORT_FWD_TYPE_VEB, 0);
if (status)
goto err;
@@ -4861,7 +5140,8 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
hsw_mode = PORT_FWD_TYPE_VEB;
} else {
status = be_cmd_get_hsw_config(adapter, NULL, 0,
- adapter->if_handle, &hsw_mode);
+ adapter->if_handle, &hsw_mode,
+ NULL);
if (status)
return 0;
}
@@ -4869,7 +5149,7 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
hsw_mode == PORT_FWD_TYPE_VEPA ?
BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
- 0, 0, nlflags);
+ 0, 0, nlflags, filter_mask, NULL);
}
#ifdef CONFIG_BE2NET_VXLAN
@@ -4894,7 +5174,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
struct device *dev = &adapter->pdev->dev;
int status;
- if (lancer_chip(adapter) || BEx_chip(adapter))
+ if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@@ -4941,7 +5221,7 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (lancer_chip(adapter) || BEx_chip(adapter))
+ if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
if (adapter->vxlan_port != port)
@@ -5014,6 +5294,7 @@ static const struct net_device_ops be_netdev_ops = {
.ndo_set_vf_rate = be_set_vf_tx_rate,
.ndo_get_vf_config = be_get_vf_config,
.ndo_set_vf_link_state = be_set_vf_link_state,
+ .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = be_netpoll,
#endif
@@ -5118,7 +5399,7 @@ static void be_err_detection_task(struct work_struct *work)
be_detect_error(adapter);
- if (adapter->hw_error) {
+ if (be_check_error(adapter, BE_ERROR_HW)) {
be_cleanup(adapter);
/* As of now error recovery support is in Lancer only */
@@ -5182,7 +5463,9 @@ static void be_worker(struct work_struct *work)
be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
}
- be_eqd_update(adapter);
+ /* EQ-delay update for Skyhawk is done while notifying EQ */
+ if (!skyhawk_chip(adapter))
+ be_eqd_update(adapter, false);
if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
be_log_sfp_info(adapter);
@@ -5202,7 +5485,7 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
static int db_bar(struct be_adapter *adapter)
{
- if (lancer_chip(adapter) || !be_physfn(adapter))
+ if (lancer_chip(adapter) || be_virtfn(adapter))
return 0;
else
return 4;
@@ -5381,6 +5664,30 @@ static void be_remove(struct pci_dev *pdev)
free_netdev(adapter->netdev);
}
+static ssize_t be_hwmon_show_temp(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct be_adapter *adapter = dev_get_drvdata(dev);
+
+ /* Unit: millidegree Celsius */
+ if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
+ return -EIO;
+ else
+ return sprintf(buf, "%u\n",
+ adapter->hwmon_info.be_on_die_temp * 1000);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+ be_hwmon_show_temp, NULL, 1);
+
+static struct attribute *be_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(be_hwmon);
+
static char *mc_name(struct be_adapter *adapter)
{
char *str = ""; /* default */
@@ -5500,6 +5807,16 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
be_schedule_err_detection(adapter);
+ /* On Die temperature not supported for VF. */
+ if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
+ adapter->hwmon_info.hwmon_dev =
+ devm_hwmon_device_register_with_groups(&pdev->dev,
+ DRV_NAME,
+ adapter,
+ be_hwmon_groups);
+ adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
+ }
+
dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
func_name(adapter), mc_name(adapter), adapter->port_name);
@@ -5592,8 +5909,8 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
dev_err(&adapter->pdev->dev, "EEH error detected\n");
- if (!adapter->eeh_error) {
- adapter->eeh_error = true;
+ if (!be_check_error(adapter, BE_ERROR_EEH)) {
+ be_set_error(adapter, BE_ERROR_EEH);
be_cancel_err_detection(adapter);
@@ -5640,7 +5957,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
pci_cleanup_aer_uncorrect_error_status(pdev);
- be_clear_all_error(adapter);
+ be_clear_error(adapter, BE_CLEAR_ALL);
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 132866433..60368207b 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index e6f7eb1a7..cde6ef905 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/ezchip/Kconfig b/drivers/net/ethernet/ezchip/Kconfig
new file mode 100644
index 000000000..48ecbc8aa
--- /dev/null
+++ b/drivers/net/ethernet/ezchip/Kconfig
@@ -0,0 +1,26 @@
+#
+# EZchip network device configuration
+#
+
+config NET_VENDOR_EZCHIP
+ bool "EZchip devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) device belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about EZchip devices. If you say Y, you will be asked for
+ your specific device in the following questions.
+
+if NET_VENDOR_EZCHIP
+
+config EZCHIP_NPS_MANAGEMENT_ENET
+ tristate "EZchip NPS management enet support"
+ depends on OF_IRQ && OF_NET
+ ---help---
+ Simple LAN device for debug or management purposes.
+ Device supports interrupts for RX and TX(completion).
+ Device does not have DMA ability.
+
+endif
diff --git a/drivers/net/ethernet/ezchip/Makefile b/drivers/net/ethernet/ezchip/Makefile
new file mode 100644
index 000000000..e490176a8
--- /dev/null
+++ b/drivers/net/ethernet/ezchip/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_EZCHIP_NPS_MANAGEMENT_ENET) += nps_enet.o
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
new file mode 100644
index 000000000..24a85b292
--- /dev/null
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -0,0 +1,658 @@
+/*
+ * Copyright(c) 2015 EZchip Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include "nps_enet.h"
+
+#define DRV_NAME "nps_mgt_enet"
+
+static void nps_enet_clean_rx_fifo(struct net_device *ndev, u32 frame_len)
+{
+ struct nps_enet_priv *priv = netdev_priv(ndev);
+ u32 i, len = DIV_ROUND_UP(frame_len, sizeof(u32));
+
+ /* Empty Rx FIFO buffer by reading all words */
+ for (i = 0; i < len; i++)
+ nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
+}
+
+static void nps_enet_read_rx_fifo(struct net_device *ndev,
+ unsigned char *dst, u32 length)
+{
+ struct nps_enet_priv *priv = netdev_priv(ndev);
+ s32 i, last = length & (sizeof(u32) - 1);
+ u32 *reg = (u32 *)dst, len = length / sizeof(u32);
+ bool dst_is_aligned = IS_ALIGNED((unsigned long)dst, sizeof(u32));
+
+ /* In case dst is not aligned we need an intermediate buffer */
+ if (dst_is_aligned)
+ for (i = 0; i < len; i++, reg++)
+ *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
+ else { /* !dst_is_aligned */
+ for (i = 0; i < len; i++, reg++) {
+ u32 buf =
+ nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
+
+ /* to accommodate word-unaligned address of "reg"
+ * we have to do memcpy_toio() instead of simple "=".
+ */
+ memcpy_toio((void __iomem *)reg, &buf, sizeof(buf));
+ }
+ }
+
+ /* copy last bytes (if any) */
+ if (last) {
+ u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
+
+ memcpy_toio((void __iomem *)reg, &buf, last);
+ }
+}
+
+static u32 nps_enet_rx_handler(struct net_device *ndev)
+{
+ u32 frame_len, err = 0;
+ u32 work_done = 0;
+ struct nps_enet_priv *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
+ struct nps_enet_rx_ctl rx_ctrl;
+
+ rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
+ frame_len = rx_ctrl.nr;
+
+ /* Check if we got RX */
+ if (!rx_ctrl.cr)
+ return work_done;
+
+ /* If we got here there is a work for us */
+ work_done++;
+
+ /* Check Rx error */
+ if (rx_ctrl.er) {
+ ndev->stats.rx_errors++;
+ err = 1;
+ }
+
+ /* Check Rx CRC error */
+ if (rx_ctrl.crc) {
+ ndev->stats.rx_crc_errors++;
+ ndev->stats.rx_dropped++;
+ err = 1;
+ }
+
+ /* Check Frame length Min 64b */
+ if (unlikely(frame_len < ETH_ZLEN)) {
+ ndev->stats.rx_length_errors++;
+ ndev->stats.rx_dropped++;
+ err = 1;
+ }
+
+ if (err)
+ goto rx_irq_clean;
+
+ /* Skb allocation */
+ skb = netdev_alloc_skb_ip_align(ndev, frame_len);
+ if (unlikely(!skb)) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ goto rx_irq_clean;
+ }
+
+ /* Copy frame from Rx fifo into the skb */
+ nps_enet_read_rx_fifo(ndev, skb->data, frame_len);
+
+ skb_put(skb, frame_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += frame_len;
+ netif_receive_skb(skb);
+
+ goto rx_irq_frame_done;
+
+rx_irq_clean:
+ /* Clean Rx fifo */
+ nps_enet_clean_rx_fifo(ndev, frame_len);
+
+rx_irq_frame_done:
+ /* Ack Rx ctrl register */
+ nps_enet_reg_set(priv, NPS_ENET_REG_RX_CTL, 0);
+
+ return work_done;
+}
+
+static void nps_enet_tx_handler(struct net_device *ndev)
+{
+ struct nps_enet_priv *priv = netdev_priv(ndev);
+ struct nps_enet_tx_ctl tx_ctrl;
+
+ tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
+
+ /* Check if we got TX */
+ if (!priv->tx_packet_sent || tx_ctrl.ct)
+ return;
+
+ /* Check Tx transmit error */
+ if (unlikely(tx_ctrl.et)) {
+ ndev->stats.tx_errors++;
+ } else {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += tx_ctrl.nt;
+ }
+
+ if (priv->tx_skb) {
+ dev_kfree_skb(priv->tx_skb);
+ priv->tx_skb = NULL;
+ }
+
+ priv->tx_packet_sent = false;
+
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+}
+
+/**
+ * nps_enet_poll - NAPI poll handler.
+ * @napi: Pointer to napi_struct structure.
+ * @budget: How many frames to process on one call.
+ *
+ * returns: Number of processed frames
+ */
+static int nps_enet_poll(struct napi_struct *napi, int budget)
+{
+ struct net_device *ndev = napi->dev;
+ struct nps_enet_priv *priv = netdev_priv(ndev);
+ struct nps_enet_buf_int_enable buf_int_enable;
+ u32 work_done;
+
+ buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
+ buf_int_enable.tx_done = NPS_ENET_ENABLE;
+ nps_enet_tx_handler(ndev);
+ work_done = nps_enet_rx_handler(ndev);
+ if (work_done < budget) {
+ napi_complete(napi);
+ nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
+ buf_int_enable.value);
+ }
+
+ return work_done;
+}
+
+/**
+ * nps_enet_irq_handler - Global interrupt handler for ENET.
+ * @irq: irq number.
+ * @dev_instance: device instance.
+ *
+ * returns: IRQ_HANDLED for all cases.
+ *
+ * EZchip ENET has 2 interrupt causes, and depending on bits raised in
+ * CTRL registers we may tell what is a reason for interrupt to fire up.
+ * We got one for RX and the other for TX (completion).
+ */
+static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
+{
+ struct net_device *ndev = dev_instance;
+ struct nps_enet_priv *priv = netdev_priv(ndev);
+ struct nps_enet_buf_int_cause buf_int_cause;
+
+ buf_int_cause.value =
+ nps_enet_reg_get(priv, NPS_ENET_REG_BUF_INT_CAUSE);
+
+ if (buf_int_cause.tx_done || buf_int_cause.rx_rdy)
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
+ __napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void nps_enet_set_hw_mac_address(struct net_device *ndev)
+{
+ struct nps_enet_priv *priv = netdev_priv(ndev);
+ struct nps_enet_ge_mac_cfg_1 ge_mac_cfg_1;
+ struct nps_enet_ge_mac_cfg_2 *ge_mac_cfg_2 = &priv->ge_mac_cfg_2;
+
+ /* set MAC address in HW */
+ ge_mac_cfg_1.octet_0 = ndev->dev_addr[0];
+ ge_mac_cfg_1.octet_1 = ndev->dev_addr[1];
+ ge_mac_cfg_1.octet_2 = ndev->dev_addr[2];
+ ge_mac_cfg_1.octet_3 = ndev->dev_addr[3];
+ ge_mac_cfg_2->octet_4 = ndev->dev_addr[4];
+ ge_mac_cfg_2->octet_5 = ndev->dev_addr[5];
+
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_1,
+ ge_mac_cfg_1.value);
+
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2,
+ ge_mac_cfg_2->value);
+}
+
+/**
+ * nps_enet_hw_reset - Reset the network device.
+ * @ndev: Pointer to the network device.
+ *
+ * This function reset the PCS and TX fifo.
+ * The programming model is to set the relevant reset bits
+ * wait for some time for this to propagate and then unset
+ * the reset bits. This way we ensure that reset procedure
+ * is done successfully by device.
+ */
+static void nps_enet_hw_reset(struct net_device *ndev)
+{
+ struct nps_enet_priv *priv = netdev_priv(ndev);
+ struct nps_enet_ge_rst ge_rst;
+ struct nps_enet_phase_fifo_ctl phase_fifo_ctl;
+
+ ge_rst.value = 0;
+ phase_fifo_ctl.value = 0;
+ /* Pcs reset sequence*/
+ ge_rst.gmac_0 = NPS_ENET_ENABLE;
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst.value);
+ usleep_range(10, 20);
+ ge_rst.value = 0;
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst.value);
+
+ /* Tx fifo reset sequence */
+ phase_fifo_ctl.rst = NPS_ENET_ENABLE;
+ phase_fifo_ctl.init = NPS_ENET_ENABLE;
+ nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL,
+ phase_fifo_ctl.value);
+ usleep_range(10, 20);
+ phase_fifo_ctl.value = 0;
+ nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL,
+ phase_fifo_ctl.value);
+}
+
+static void nps_enet_hw_enable_control(struct net_device *ndev)
+{
+ struct nps_enet_priv *priv = netdev_priv(ndev);
+ struct nps_enet_ge_mac_cfg_0 ge_mac_cfg_0;
+ struct nps_enet_buf_int_enable buf_int_enable;
+ struct nps_enet_ge_mac_cfg_2 *ge_mac_cfg_2 = &priv->ge_mac_cfg_2;
+ struct nps_enet_ge_mac_cfg_3 *ge_mac_cfg_3 = &priv->ge_mac_cfg_3;
+ s32 max_frame_length;
+
+ ge_mac_cfg_0.value = 0;
+ buf_int_enable.value = 0;
+ /* Enable Rx and Tx statistics */
+ ge_mac_cfg_2->stat_en = NPS_ENET_GE_MAC_CFG_2_STAT_EN;
+
+ /* Discard packets with different MAC address */
+ ge_mac_cfg_2->disc_da = NPS_ENET_ENABLE;
+
+ /* Discard multicast packets */
+ ge_mac_cfg_2->disc_mc = NPS_ENET_ENABLE;
+
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2,
+ ge_mac_cfg_2->value);
+
+ /* Discard Packets bigger than max frame length */
+ max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN;
+ if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) {
+ ge_mac_cfg_3->max_len = max_frame_length;
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
+ ge_mac_cfg_3->value);
+ }
+
+ /* Enable interrupts */
+ buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
+ buf_int_enable.tx_done = NPS_ENET_ENABLE;
+ nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
+ buf_int_enable.value);
+
+ /* Write device MAC address to HW */
+ nps_enet_set_hw_mac_address(ndev);
+
+ /* Rx and Tx HW features */
+ ge_mac_cfg_0.tx_pad_en = NPS_ENET_ENABLE;
+ ge_mac_cfg_0.tx_crc_en = NPS_ENET_ENABLE;
+ ge_mac_cfg_0.rx_crc_strip = NPS_ENET_ENABLE;
+
+ /* IFG configuration */
+ ge_mac_cfg_0.rx_ifg = NPS_ENET_GE_MAC_CFG_0_RX_IFG;
+ ge_mac_cfg_0.tx_ifg = NPS_ENET_GE_MAC_CFG_0_TX_IFG;
+
+ /* preamble configuration */
+ ge_mac_cfg_0.rx_pr_check_en = NPS_ENET_ENABLE;
+ ge_mac_cfg_0.tx_pr_len = NPS_ENET_GE_MAC_CFG_0_TX_PR_LEN;
+
+ /* enable flow control frames */
+ ge_mac_cfg_0.tx_fc_en = NPS_ENET_ENABLE;
+ ge_mac_cfg_0.rx_fc_en = NPS_ENET_ENABLE;
+ ge_mac_cfg_0.tx_fc_retr = NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR;
+
+ /* Enable Rx and Tx */
+ ge_mac_cfg_0.rx_en = NPS_ENET_ENABLE;
+ ge_mac_cfg_0.tx_en = NPS_ENET_ENABLE;
+
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0,
+ ge_mac_cfg_0.value);
+}
+
+static void nps_enet_hw_disable_control(struct net_device *ndev)
+{
+ struct nps_enet_priv *priv = netdev_priv(ndev);
+
+ /* Disable interrupts */
+ nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
+
+ /* Disable Rx and Tx */
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0, 0);
+}
+
+static void nps_enet_send_frame(struct net_device *ndev,
+ struct sk_buff *skb)
+{
+ struct nps_enet_priv *priv = netdev_priv(ndev);
+ struct nps_enet_tx_ctl tx_ctrl;
+ short length = skb->len;
+ u32 i, len = DIV_ROUND_UP(length, sizeof(u32));
+ u32 *src = (u32 *)virt_to_phys(skb->data);
+ bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32));
+
+ tx_ctrl.value = 0;
+ /* In case src is not aligned we need an intermediate buffer */
+ if (src_is_aligned)
+ for (i = 0; i < len; i++, src++)
+ nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src);
+ else { /* !src_is_aligned */
+ for (i = 0; i < len; i++, src++) {
+ u32 buf;
+
+ /* to accommodate word-unaligned address of "src"
+ * we have to do memcpy_fromio() instead of simple "="
+ */
+ memcpy_fromio(&buf, (void __iomem *)src, sizeof(buf));
+ nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, buf);
+ }
+ }
+ /* Write the length of the Frame */
+ tx_ctrl.nt = length;
+
+ /* Indicate SW is done */
+ priv->tx_packet_sent = true;
+ tx_ctrl.ct = NPS_ENET_ENABLE;
+
+ /* Send Frame */
+ nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl.value);
+}
+
+/**
+ * nps_enet_set_mac_address - Set the MAC address for this device.
+ * @ndev: Pointer to net_device structure.
+ * @p: 6 byte Address to be written as MAC address.
+ *
+ * This function copies the HW address from the sockaddr structure to the
+ * net_device structure and updates the address in HW.
+ *
+ * returns: -EBUSY if the net device is busy or 0 if the address is set
+ * successfully.
+ */
+static s32 nps_enet_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct sockaddr *addr = p;
+ s32 res;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ res = eth_mac_addr(ndev, p);
+ if (!res) {
+ ether_addr_copy(ndev->dev_addr, addr->sa_data);
+ nps_enet_set_hw_mac_address(ndev);
+ }
+
+ return res;
+}
+
+/**
+ * nps_enet_set_rx_mode - Change the receive filtering mode.
+ * @ndev: Pointer to the network device.
+ *
+ * This function enables/disables promiscuous mode
+ */
+static void nps_enet_set_rx_mode(struct net_device *ndev)
+{
+ struct nps_enet_priv *priv = netdev_priv(ndev);
+ struct nps_enet_ge_mac_cfg_2 ge_mac_cfg_2;
+
+ ge_mac_cfg_2.value = priv->ge_mac_cfg_2.value;
+
+ if (ndev->flags & IFF_PROMISC) {
+ ge_mac_cfg_2.disc_da = NPS_ENET_DISABLE;
+ ge_mac_cfg_2.disc_mc = NPS_ENET_DISABLE;
+ } else {
+ ge_mac_cfg_2.disc_da = NPS_ENET_ENABLE;
+ ge_mac_cfg_2.disc_mc = NPS_ENET_ENABLE;
+ }
+
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2.value);
+}
+
+/**
+ * nps_enet_open - Open the network device.
+ * @ndev: Pointer to the network device.
+ *
+ * returns: 0, on success or non-zero error value on failure.
+ *
+ * This function sets the MAC address, requests and enables an IRQ
+ * for the ENET device and starts the Tx queue.
+ */
+static s32 nps_enet_open(struct net_device *ndev)
+{
+ struct nps_enet_priv *priv = netdev_priv(ndev);
+ s32 err;
+
+ /* Reset private variables */
+ priv->tx_packet_sent = false;
+ priv->ge_mac_cfg_2.value = 0;
+ priv->ge_mac_cfg_3.value = 0;
+
+ /* ge_mac_cfg_3 default values */
+ priv->ge_mac_cfg_3.rx_ifg_th = NPS_ENET_GE_MAC_CFG_3_RX_IFG_TH;
+ priv->ge_mac_cfg_3.max_len = NPS_ENET_GE_MAC_CFG_3_MAX_LEN;
+
+ /* Disable HW device */
+ nps_enet_hw_disable_control(ndev);
+
+ /* irq Rx allocation */
+ err = request_irq(priv->irq, nps_enet_irq_handler,
+ 0, "enet-rx-tx", ndev);
+ if (err)
+ return err;
+
+ napi_enable(&priv->napi);
+
+ /* Enable HW device */
+ nps_enet_hw_reset(ndev);
+ nps_enet_hw_enable_control(ndev);
+
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+/**
+ * nps_enet_stop - Close the network device.
+ * @ndev: Pointer to the network device.
+ *
+ * This function stops the Tx queue, disables interrupts for the ENET device.
+ */
+static s32 nps_enet_stop(struct net_device *ndev)
+{
+ struct nps_enet_priv *priv = netdev_priv(ndev);
+
+ napi_disable(&priv->napi);
+ netif_stop_queue(ndev);
+ nps_enet_hw_disable_control(ndev);
+ free_irq(priv->irq, ndev);
+
+ return 0;
+}
+
+/**
+ * nps_enet_start_xmit - Starts the data transmission.
+ * @skb: sk_buff pointer that contains data to be Transmitted.
+ * @ndev: Pointer to net_device structure.
+ *
+ * returns: NETDEV_TX_OK, on success
+ * NETDEV_TX_BUSY, if any of the descriptors are not free.
+ *
+ * This function is invoked from upper layers to initiate transmission.
+ */
+static netdev_tx_t nps_enet_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct nps_enet_priv *priv = netdev_priv(ndev);
+
+ /* This driver handles one frame at a time */
+ netif_stop_queue(ndev);
+
+ nps_enet_send_frame(ndev, skb);
+
+ priv->tx_skb = skb;
+
+ return NETDEV_TX_OK;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void nps_enet_poll_controller(struct net_device *ndev)
+{
+ disable_irq(ndev->irq);
+ nps_enet_irq_handler(ndev->irq, ndev);
+ enable_irq(ndev->irq);
+}
+#endif
+
+static const struct net_device_ops nps_netdev_ops = {
+ .ndo_open = nps_enet_open,
+ .ndo_stop = nps_enet_stop,
+ .ndo_start_xmit = nps_enet_start_xmit,
+ .ndo_set_mac_address = nps_enet_set_mac_address,
+ .ndo_set_rx_mode = nps_enet_set_rx_mode,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = nps_enet_poll_controller,
+#endif
+};
+
+static s32 nps_enet_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *ndev;
+ struct nps_enet_priv *priv;
+ s32 err = 0;
+ const char *mac_addr;
+ struct resource *res_regs;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ ndev = alloc_etherdev(sizeof(struct nps_enet_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, dev);
+ priv = netdev_priv(ndev);
+
+ /* The EZ NET specific entries in the device structure. */
+ ndev->netdev_ops = &nps_netdev_ops;
+ ndev->watchdog_timeo = (400 * HZ / 1000);
+ /* FIXME :: no multicast support yet */
+ ndev->flags &= ~IFF_MULTICAST;
+
+ res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs_base = devm_ioremap_resource(dev, res_regs);
+ if (IS_ERR(priv->regs_base)) {
+ err = PTR_ERR(priv->regs_base);
+ goto out_netdev;
+ }
+ dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs_base);
+
+ /* set kernel MAC address to dev */
+ mac_addr = of_get_mac_address(dev->of_node);
+ if (mac_addr)
+ ether_addr_copy(ndev->dev_addr, mac_addr);
+ else
+ eth_hw_addr_random(ndev);
+
+ /* Get IRQ number */
+ priv->irq = platform_get_irq(pdev, 0);
+ if (!priv->irq) {
+ dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n");
+ err = -ENODEV;
+ goto out_netdev;
+ }
+
+ netif_napi_add(ndev, &priv->napi, nps_enet_poll,
+ NPS_ENET_NAPI_POLL_WEIGHT);
+
+ /* Register the driver. Should be the last thing in probe */
+ err = register_netdev(ndev);
+ if (err) {
+ dev_err(dev, "Failed to register ndev for %s, err = 0x%08x\n",
+ ndev->name, (s32)err);
+ goto out_netif_api;
+ }
+
+ dev_info(dev, "(rx/tx=%d)\n", priv->irq);
+ return 0;
+
+out_netif_api:
+ netif_napi_del(&priv->napi);
+out_netdev:
+ if (err)
+ free_netdev(ndev);
+
+ return err;
+}
+
+static s32 nps_enet_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct nps_enet_priv *priv = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+ netif_napi_del(&priv->napi);
+
+ return 0;
+}
+
+static const struct of_device_id nps_enet_dt_ids[] = {
+ { .compatible = "ezchip,nps-mgt-enet" },
+ { /* Sentinel */ }
+};
+
+static struct platform_driver nps_enet_driver = {
+ .probe = nps_enet_probe,
+ .remove = nps_enet_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = nps_enet_dt_ids,
+ },
+};
+
+module_platform_driver(nps_enet_driver);
+
+MODULE_AUTHOR("EZchip Semiconductor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h
new file mode 100644
index 000000000..fc45c9daa
--- /dev/null
+++ b/drivers/net/ethernet/ezchip/nps_enet.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright(c) 2015 EZchip Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ */
+
+#ifndef _NPS_ENET_H
+#define _NPS_ENET_H
+
+/* default values */
+#define NPS_ENET_NAPI_POLL_WEIGHT 0x2
+#define NPS_ENET_MAX_FRAME_LENGTH 0x3FFF
+#define NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR 0x7
+#define NPS_ENET_GE_MAC_CFG_0_RX_IFG 0x5
+#define NPS_ENET_GE_MAC_CFG_0_TX_IFG 0xC
+#define NPS_ENET_GE_MAC_CFG_0_TX_PR_LEN 0x7
+#define NPS_ENET_GE_MAC_CFG_2_STAT_EN 0x3
+#define NPS_ENET_GE_MAC_CFG_3_RX_IFG_TH 0x14
+#define NPS_ENET_GE_MAC_CFG_3_MAX_LEN 0x3FFC
+#define NPS_ENET_ENABLE 1
+#define NPS_ENET_DISABLE 0
+
+/* register definitions */
+#define NPS_ENET_REG_TX_CTL 0x800
+#define NPS_ENET_REG_TX_BUF 0x808
+#define NPS_ENET_REG_RX_CTL 0x810
+#define NPS_ENET_REG_RX_BUF 0x818
+#define NPS_ENET_REG_BUF_INT_ENABLE 0x8C0
+#define NPS_ENET_REG_BUF_INT_CAUSE 0x8C4
+#define NPS_ENET_REG_GE_MAC_CFG_0 0x1000
+#define NPS_ENET_REG_GE_MAC_CFG_1 0x1004
+#define NPS_ENET_REG_GE_MAC_CFG_2 0x1008
+#define NPS_ENET_REG_GE_MAC_CFG_3 0x100C
+#define NPS_ENET_REG_GE_RST 0x1400
+#define NPS_ENET_REG_PHASE_FIFO_CTL 0x1404
+
+/* Tx control register */
+struct nps_enet_tx_ctl {
+ union {
+ /* ct: SW sets to indicate frame ready in Tx buffer for
+ * transmission. HW resets to when transmission done
+ * et: Transmit error
+ * nt: Length in bytes of Tx frame loaded to Tx buffer
+ */
+ struct {
+ u32
+ __reserved_1:16,
+ ct:1,
+ et:1,
+ __reserved_2:3,
+ nt:11;
+ };
+
+ u32 value;
+ };
+};
+
+/* Rx control register */
+struct nps_enet_rx_ctl {
+ union {
+ /* cr: HW sets to indicate frame ready in Rx buffer.
+ * SW resets to indicate host read received frame
+ * and new frames can be written to Rx buffer
+ * er: Rx error indication
+ * crc: Rx CRC error indication
+ * nr: Length in bytes of Rx frame loaded by MAC to Rx buffer
+ */
+ struct {
+ u32
+ __reserved_1:16,
+ cr:1,
+ er:1,
+ crc:1,
+ __reserved_2:2,
+ nr:11;
+ };
+
+ u32 value;
+ };
+};
+
+/* Interrupt enable for data buffer events register */
+struct nps_enet_buf_int_enable {
+ union {
+ /* tx_done: Interrupt generation in the case when new frame
+ * is ready in Rx buffer
+ * rx_rdy: Interrupt generation in the case when current frame
+ * was read from TX buffer
+ */
+ struct {
+ u32
+ __reserved:30,
+ tx_done:1,
+ rx_rdy:1;
+ };
+
+ u32 value;
+ };
+};
+
+/* Interrupt cause for data buffer events register */
+struct nps_enet_buf_int_cause {
+ union {
+ /* tx_done: Interrupt in the case when current frame was
+ * read from TX buffer.
+ * rx_rdy: Interrupt in the case when new frame is ready
+ * in RX buffer.
+ */
+ struct {
+ u32
+ __reserved:30,
+ tx_done:1,
+ rx_rdy:1;
+ };
+
+ u32 value;
+ };
+};
+
+/* Gbps Eth MAC Configuration 0 register */
+struct nps_enet_ge_mac_cfg_0 {
+ union {
+ /* tx_pr_len: Transmit preamble length in bytes
+ * tx_ifg_nib: Tx idle pattern
+ * nib_mode: Nibble (4-bit) Mode
+ * rx_pr_check_en: Receive preamble Check Enable
+ * tx_ifg: Transmit inter-Frame Gap
+ * rx_ifg: Receive inter-Frame Gap
+ * tx_fc_retr: Transmit Flow Control Retransmit Mode
+ * rx_length_check_en: Receive Length Check Enable
+ * rx_crc_ignore: Results of the CRC check are ignored
+ * rx_crc_strip: MAC strips the CRC from received frames
+ * rx_fc_en: Receive Flow Control Enable
+ * tx_crc_en: Transmit CRC Enabled
+ * tx_pad_en: Transmit Padding Enable
+ * tx_cf_en: Transmit Flow Control Enable
+ * tx_en: Transmit Enable
+ * rx_en: Receive Enable
+ */
+ struct {
+ u32
+ tx_pr_len:4,
+ tx_ifg_nib:4,
+ nib_mode:1,
+ rx_pr_check_en:1,
+ tx_ifg:6,
+ rx_ifg:4,
+ tx_fc_retr:3,
+ rx_length_check_en:1,
+ rx_crc_ignore:1,
+ rx_crc_strip:1,
+ rx_fc_en:1,
+ tx_crc_en:1,
+ tx_pad_en:1,
+ tx_fc_en:1,
+ tx_en:1,
+ rx_en:1;
+ };
+
+ u32 value;
+ };
+};
+
+/* Gbps Eth MAC Configuration 1 register */
+struct nps_enet_ge_mac_cfg_1 {
+ union {
+ /* octet_3: MAC address octet 3
+ * octet_2: MAC address octet 2
+ * octet_1: MAC address octet 1
+ * octet_0: MAC address octet 0
+ */
+ struct {
+ u32
+ octet_3:8,
+ octet_2:8,
+ octet_1:8,
+ octet_0:8;
+ };
+
+ u32 value;
+ };
+};
+
+/* Gbps Eth MAC Configuration 2 register */
+struct nps_enet_ge_mac_cfg_2 {
+ union {
+ /* transmit_flush_en: MAC flush enable
+ * stat_en: RMON statistics interface enable
+ * disc_da: Discard frames with DA different
+ * from MAC address
+ * disc_bc: Discard broadcast frames
+ * disc_mc: Discard multicast frames
+ * octet_5: MAC address octet 5
+ * octet_4: MAC address octet 4
+ */
+ struct {
+ u32
+ transmit_flush_en:1,
+ __reserved_1:5,
+ stat_en:2,
+ __reserved_2:1,
+ disc_da:1,
+ disc_bc:1,
+ disc_mc:1,
+ __reserved_3:4,
+ octet_5:8,
+ octet_4:8;
+ };
+
+ u32 value;
+ };
+};
+
+/* Gbps Eth MAC Configuration 3 register */
+struct nps_enet_ge_mac_cfg_3 {
+ union {
+ /* ext_oob_cbfc_sel: Selects one of the 4 profiles for
+ * extended OOB in-flow-control indication
+ * max_len: Maximum receive frame length in bytes
+ * tx_cbfc_en: Enable transmission of class-based
+ * flow control packets
+ * rx_ifg_th: Threshold for IFG status reporting via OOB
+ * cf_timeout: Configurable time to decrement FC counters
+ * cf_drop: Drop control frames
+ * redirect_cbfc_sel: Selects one of CBFC redirect profiles
+ * rx_cbfc_redir_en: Enable Rx class-based flow
+ * control redirect
+ * rx_cbfc_en: Enable Rx class-based flow control
+ * tm_hd_mode: TM header mode
+ */
+ struct {
+ u32
+ ext_oob_cbfc_sel:2,
+ max_len:14,
+ tx_cbfc_en:1,
+ rx_ifg_th:5,
+ cf_timeout:4,
+ cf_drop:1,
+ redirect_cbfc_sel:2,
+ rx_cbfc_redir_en:1,
+ rx_cbfc_en:1,
+ tm_hd_mode:1;
+ };
+
+ u32 value;
+ };
+};
+
+/* GE MAC, PCS reset control register */
+struct nps_enet_ge_rst {
+ union {
+ /* gmac_0: GE MAC reset
+ * spcs_0: SGMII PCS reset
+ */
+ struct {
+ u32
+ __reserved_1:23,
+ gmac_0:1,
+ __reserved_2:7,
+ spcs_0:1;
+ };
+
+ u32 value;
+ };
+};
+
+/* Tx phase sync FIFO control register */
+struct nps_enet_phase_fifo_ctl {
+ union {
+ /* init: initialize serdes TX phase sync FIFO pointers
+ * rst: reset serdes TX phase sync FIFO
+ */
+ struct {
+ u32
+ __reserved:30,
+ init:1,
+ rst:1;
+ };
+
+ u32 value;
+ };
+};
+
+/**
+ * struct nps_enet_priv - Storage of ENET's private information.
+ * @regs_base: Base address of ENET memory-mapped control registers.
+ * @irq: For RX/TX IRQ number.
+ * @tx_packet_sent: SW indication if frame is being sent.
+ * @tx_skb: socket buffer of sent frame.
+ * @napi: Structure for NAPI.
+ */
+struct nps_enet_priv {
+ void __iomem *regs_base;
+ s32 irq;
+ bool tx_packet_sent;
+ struct sk_buff *tx_skb;
+ struct napi_struct napi;
+ struct nps_enet_ge_mac_cfg_2 ge_mac_cfg_2;
+ struct nps_enet_ge_mac_cfg_3 ge_mac_cfg_3;
+};
+
+/**
+ * nps_reg_set - Sets ENET register with provided value.
+ * @priv: Pointer to EZchip ENET private data structure.
+ * @reg: Register offset from base address.
+ * @value: Value to set in register.
+ */
+static inline void nps_enet_reg_set(struct nps_enet_priv *priv,
+ s32 reg, s32 value)
+{
+ iowrite32be(value, priv->regs_base + reg);
+}
+
+/**
+ * nps_reg_get - Gets value of specified ENET register.
+ * @priv: Pointer to EZchip ENET private data structure.
+ * @reg: Register offset from base address.
+ *
+ * returns: Value of requested register.
+ */
+static inline u32 nps_enet_reg_get(struct nps_enet_priv *priv, s32 reg)
+{
+ return ioread32be(priv->regs_base + reg);
+}
+
+#endif /* _NPS_ENET_H */
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index 5918c6891..040c7f163 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_FARADAY
default y
depends on ARM
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 25e342572..ff76d4e9d 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -9,9 +9,7 @@ config NET_VENDOR_FREESCALE
M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM)
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
@@ -85,12 +83,12 @@ config UGETH_TX_ON_DEMAND
config GIANFAR
tristate "Gianfar Ethernet"
- depends on FSL_SOC
select FSL_PQ_MDIO
select PHYLIB
select CRC32
---help---
This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
- and MPC86xx family of chips, and the FEC on the 8540.
+ and MPC86xx family of chips, the eTSEC on LS1021A and the FEC
+ on the 8540.
endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index a86af8a74..99d33e2d3 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -428,6 +428,8 @@ struct bufdesc_ex {
#define FEC_QUIRK_BUG_CAPTURE (1 << 10)
/* Controller has only one MDIO bus */
#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
+/* Controller supports RACC register */
+#define FEC_QUIRK_HAS_RACC (1 << 12)
struct fec_enet_priv_tx_q {
int index;
@@ -560,6 +562,7 @@ struct fec_enet_private {
};
void fec_ptp_init(struct platform_device *pdev);
+void fec_ptp_stop(struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 66d47e448..b349e6f36 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/pm_runtime.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -77,6 +78,7 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
#define FEC_ENET_RAEM_V 0x8
#define FEC_ENET_RAFL_V 0x8
#define FEC_ENET_OPD_V 0xFFF0
+#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
static struct platform_device_id fec_devtype[] = {
{
@@ -85,28 +87,30 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0,
}, {
.name = "imx25-fec",
- .driver_data = FEC_QUIRK_USE_GASKET,
+ .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC,
}, {
.name = "imx27-fec",
- .driver_data = 0,
+ .driver_data = FEC_QUIRK_HAS_RACC,
}, {
.name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
- FEC_QUIRK_SINGLE_MDIO,
+ FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
}, {
.name = "imx6q-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358,
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
+ FEC_QUIRK_HAS_RACC,
}, {
.name = "mvf600-fec",
- .driver_data = FEC_QUIRK_ENET_MAC,
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
}, {
.name = "imx6sx-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
- FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE,
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_RACC,
}, {
/* sentinel */
}
@@ -970,13 +974,15 @@ fec_restart(struct net_device *ndev)
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
#if !defined(CONFIG_M5272)
- /* set RX checksum */
- val = readl(fep->hwp + FEC_RACC);
- if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
- val |= FEC_RACC_OPTIONS;
- else
- val &= ~FEC_RACC_OPTIONS;
- writel(val, fep->hwp + FEC_RACC);
+ if (fep->quirks & FEC_QUIRK_HAS_RACC) {
+ /* set RX checksum */
+ val = readl(fep->hwp + FEC_RACC);
+ if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
+ val |= FEC_RACC_OPTIONS;
+ else
+ val &= ~FEC_RACC_OPTIONS;
+ writel(val, fep->hwp + FEC_RACC);
+ }
#endif
/*
@@ -1763,10 +1769,16 @@ static void fec_enet_adjust_link(struct net_device *ndev)
static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
unsigned long time_left;
+ int ret = 0;
+
+ ret = pm_runtime_get_sync(dev);
+ if (IS_ERR_VALUE(ret))
+ return ret;
fep->mii_timeout = 0;
- init_completion(&fep->mdio_done);
+ reinit_completion(&fep->mdio_done);
/* start a read op */
writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
@@ -1779,21 +1791,33 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (time_left == 0) {
fep->mii_timeout = 1;
netdev_err(fep->netdev, "MDIO read timeout\n");
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto out;
}
- /* return value */
- return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
+ ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
+
+out:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
}
static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
u16 value)
{
struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
unsigned long time_left;
+ int ret = 0;
+
+ ret = pm_runtime_get_sync(dev);
+ if (IS_ERR_VALUE(ret))
+ return ret;
fep->mii_timeout = 0;
- init_completion(&fep->mdio_done);
+ reinit_completion(&fep->mdio_done);
/* start a write op */
writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
@@ -1807,10 +1831,13 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
if (time_left == 0) {
fep->mii_timeout = 1;
netdev_err(fep->netdev, "MDIO write timeout\n");
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
}
- return 0;
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
}
static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
@@ -1822,9 +1849,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
ret = clk_prepare_enable(fep->clk_ahb);
if (ret)
return ret;
- ret = clk_prepare_enable(fep->clk_ipg);
- if (ret)
- goto failed_clk_ipg;
if (fep->clk_enet_out) {
ret = clk_prepare_enable(fep->clk_enet_out);
if (ret)
@@ -1848,7 +1872,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
}
} else {
clk_disable_unprepare(fep->clk_ahb);
- clk_disable_unprepare(fep->clk_ipg);
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
if (fep->clk_ptp) {
@@ -1870,8 +1893,6 @@ failed_clk_ptp:
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
failed_clk_enet_out:
- clk_disable_unprepare(fep->clk_ipg);
-failed_clk_ipg:
clk_disable_unprepare(fep->clk_ahb);
return ret;
@@ -2118,6 +2139,82 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
}
+static int fec_enet_get_regs_len(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct resource *r;
+ int s = 0;
+
+ r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
+ if (r)
+ s = resource_size(r);
+
+ return s;
+}
+
+/* List of registers that can be safety be read to dump them with ethtool */
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+ defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+static u32 fec_enet_register_offset[] = {
+ FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
+ FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
+ FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
+ FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
+ FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
+ FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
+ FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
+ FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
+ FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
+ FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
+ FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
+ FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
+ RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
+ RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
+ RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
+ RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
+ RMON_T_P_GTE2048, RMON_T_OCTETS,
+ IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
+ IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
+ IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
+ RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
+ RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
+ RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
+ RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
+ RMON_R_P_GTE2048, RMON_R_OCTETS,
+ IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
+ IEEE_R_FDXFC, IEEE_R_OCTETS_OK
+};
+#else
+static u32 fec_enet_register_offset[] = {
+ FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
+ FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
+ FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
+ FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
+ FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
+ FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
+ FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
+ FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
+ FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
+};
+#endif
+
+static void fec_enet_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *regbuf)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
+ u32 *buf = (u32 *)regbuf;
+ u32 i, off;
+
+ memset(buf, 0, regs->len);
+
+ for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
+ off = fec_enet_register_offset[i] / 4;
+ buf[off] = readl(&theregs[off]);
+ }
+}
+
static int fec_enet_get_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{
@@ -2515,6 +2612,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.get_settings = fec_enet_get_settings,
.set_settings = fec_enet_set_settings,
.get_drvinfo = fec_enet_get_drvinfo,
+ .get_regs_len = fec_enet_get_regs_len,
+ .get_regs = fec_enet_get_regs,
.nway_reset = fec_enet_nway_reset,
.get_link = ethtool_op_get_link,
.get_coalesce = fec_enet_get_coalesce,
@@ -2765,10 +2864,14 @@ fec_enet_open(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
+ ret = pm_runtime_get_sync(&fep->pdev->dev);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
pinctrl_pm_select_default_state(&fep->pdev->dev);
ret = fec_enet_clk_enable(ndev, true);
if (ret)
- return ret;
+ goto clk_enable;
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
@@ -2778,12 +2881,14 @@ fec_enet_open(struct net_device *ndev)
if (ret)
goto err_enet_alloc;
+ /* Init MAC prior to mii bus probe */
+ fec_restart(ndev);
+
/* Probe and connect to PHY when open the interface */
ret = fec_enet_mii_probe(ndev);
if (ret)
goto err_enet_mii_probe;
- fec_restart(ndev);
napi_enable(&fep->napi);
phy_start(fep->phy_dev);
netif_tx_start_all_queues(ndev);
@@ -2797,6 +2902,9 @@ err_enet_mii_probe:
fec_enet_free_buffers(ndev);
err_enet_alloc:
fec_enet_clk_enable(ndev, false);
+clk_enable:
+ pm_runtime_mark_last_busy(&fep->pdev->dev);
+ pm_runtime_put_autosuspend(&fep->pdev->dev);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
return ret;
}
@@ -2819,6 +2927,9 @@ fec_enet_close(struct net_device *ndev)
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ pm_runtime_mark_last_busy(&fep->pdev->dev);
+ pm_runtime_put_autosuspend(&fep->pdev->dev);
+
fec_enet_free_buffers(ndev);
return 0;
@@ -3031,8 +3142,8 @@ static int fec_enet_init(struct net_device *ndev)
fep->bufdesc_size;
/* Allocate memory for buffer descriptors. */
- cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma,
- GFP_KERNEL);
+ cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
+ GFP_KERNEL);
if (!cbd_base) {
return -ENOMEM;
}
@@ -3304,6 +3415,10 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_clk;
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+
fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(fep->reg_phy)) {
ret = regulator_enable(fep->reg_phy);
@@ -3316,6 +3431,12 @@ fec_probe(struct platform_device *pdev)
fep->reg_phy = NULL;
}
+ pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
fec_reset_phy(pdev);
if (fep->bufdesc_ex)
@@ -3363,6 +3484,10 @@ fec_probe(struct platform_device *pdev)
fep->rx_copybreak = COPYBREAK_DEFAULT;
INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
return 0;
failed_register:
@@ -3370,9 +3495,12 @@ failed_register:
failed_mii_init:
failed_irq:
failed_init:
+ fec_ptp_stop(pdev);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
failed_regulator:
+ clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
fec_enet_clk_enable(ndev, false);
failed_clk:
failed_phy:
@@ -3389,14 +3517,12 @@ fec_drv_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
- cancel_delayed_work_sync(&fep->time_keep);
cancel_work_sync(&fep->tx_timeout_work);
+ fec_ptp_stop(pdev);
unregister_netdev(ndev);
fec_enet_mii_remove(fep);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
- if (fep->ptp_clock)
- ptp_clock_unregister(fep->ptp_clock);
of_node_put(fep->phy_node);
free_netdev(ndev);
@@ -3484,7 +3610,28 @@ failed_clk:
return ret;
}
-static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
+static int __maybe_unused fec_runtime_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ clk_disable_unprepare(fep->clk_ipg);
+
+ return 0;
+}
+
+static int __maybe_unused fec_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ return clk_prepare_enable(fep->clk_ipg);
+}
+
+static const struct dev_pm_ops fec_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
+ SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
+};
static struct platform_driver fec_driver = {
.driver = {
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index a583d89b1..f457a23d0 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -353,6 +353,7 @@ static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
writel(tmp, fep->hwp + FEC_ATIME_INC);
+ corr_period = corr_period > 1 ? corr_period - 1 : corr_period;
writel(corr_period, fep->hwp + FEC_ATIME_CORR);
/* dummy read to update the timer. */
timecounter_read(&fep->tc);
@@ -603,6 +604,16 @@ void fec_ptp_init(struct platform_device *pdev)
schedule_delayed_work(&fep->time_keep, HZ);
}
+void fec_ptp_stop(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ cancel_delayed_work_sync(&fep->time_keep);
+ if (fep->ptp_clock)
+ ptp_clock_unregister(fep->ptp_clock);
+}
+
/**
* fec_ptp_check_pps_event
* @fep: the fec_enet_private structure handle
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 9b3639eae..cf8e54652 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -86,7 +86,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
struct net_device *dev = fep->ndev;
const struct fs_platform_info *fpi = fep->fpi;
cbd_t __iomem *bdp;
- struct sk_buff *skb, *skbn, *skbt;
+ struct sk_buff *skb, *skbn;
int received = 0;
u16 pkt_len, sc;
int curidx;
@@ -161,10 +161,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
skb_reserve(skbn, 2); /* align IP header */
skb_copy_from_linear_data(skb,
skbn->data, pkt_len);
- /* swap */
- skbt = skb;
- skb = skbn;
- skbn = skbt;
+ swap(skb, skbn);
}
} else {
skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
@@ -490,6 +487,9 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
{
struct sk_buff *new_skb;
+ if (skb_linearize(skb))
+ return NULL;
+
/* Alloc new skb */
new_skb = netdev_alloc_skb(dev, skb->len + 4);
if (!new_skb)
@@ -515,12 +515,27 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
cbd_t __iomem *bdp;
int curidx;
u16 sc;
- int nr_frags = skb_shinfo(skb)->nr_frags;
+ int nr_frags;
skb_frag_t *frag;
int len;
-
#ifdef CONFIG_FS_ENET_MPC5121_FEC
- if (((unsigned long)skb->data) & 0x3) {
+ int is_aligned = 1;
+ int i;
+
+ if (!IS_ALIGNED((unsigned long)skb->data, 4)) {
+ is_aligned = 0;
+ } else {
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ frag = skb_shinfo(skb)->frags;
+ for (i = 0; i < nr_frags; i++, frag++) {
+ if (!IS_ALIGNED(frag->page_offset, 4)) {
+ is_aligned = 0;
+ break;
+ }
+ }
+ }
+
+ if (!is_aligned) {
skb = tx_skb_align_workaround(dev, skb);
if (!skb) {
/*
@@ -532,6 +547,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
#endif
+
spin_lock(&fep->tx_lock);
/*
@@ -539,6 +555,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
bdp = fep->cur_tx;
+ nr_frags = skb_shinfo(skb)->nr_frags;
if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
netif_stop_queue(dev);
spin_unlock(&fep->tx_lock);
@@ -569,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
frag = skb_shinfo(skb)->frags;
while (nr_frags) {
CBDC_SC(bdp,
- BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
+ BD_ENET_TX_TC);
CBDS_SC(bdp, BD_ENET_TX_READY);
if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index b34214e2d..016743e35 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
}
#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
-#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB)
+#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF)
#define FEC_RX_EVENT (FEC_ENET_RXF)
#define FEC_TX_EVENT (FEC_ENET_TXF)
#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4ee080d49..10b3bbbba 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -516,6 +516,15 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
return &dev->stats;
}
+static int gfar_set_mac_addr(struct net_device *dev, void *p)
+{
+ eth_mac_addr(dev, p);
+
+ gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
+
+ return 0;
+}
+
static const struct net_device_ops gfar_netdev_ops = {
.ndo_open = gfar_enet_open,
.ndo_start_xmit = gfar_start_xmit,
@@ -526,7 +535,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
.ndo_get_stats = gfar_get_stats,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = gfar_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = gfar_netpoll,
@@ -556,22 +565,6 @@ static void gfar_ints_enable(struct gfar_private *priv)
}
}
-static void lock_tx_qs(struct gfar_private *priv)
-{
- int i;
-
- for (i = 0; i < priv->num_tx_queues; i++)
- spin_lock(&priv->tx_queue[i]->txlock);
-}
-
-static void unlock_tx_qs(struct gfar_private *priv)
-{
- int i;
-
- for (i = 0; i < priv->num_tx_queues; i++)
- spin_unlock(&priv->tx_queue[i]->txlock);
-}
-
static int gfar_alloc_tx_queues(struct gfar_private *priv)
{
int i;
@@ -1367,7 +1360,6 @@ static int gfar_probe(struct platform_device *ofdev)
priv->dev = &ofdev->dev;
SET_NETDEV_DEV(dev, &ofdev->dev);
- spin_lock_init(&priv->bflock);
INIT_WORK(&priv->reset_task, gfar_reset_task);
platform_set_drvdata(ofdev, priv);
@@ -1411,6 +1403,8 @@ static int gfar_probe(struct platform_device *ofdev)
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
gfar_init_addr_hash_table(priv);
/* Insert receive time stamps into padding alignment bytes */
@@ -1459,9 +1453,8 @@ static int gfar_probe(struct platform_device *ofdev)
goto register_fail;
}
- device_init_wakeup(&dev->dev,
- priv->device_flags &
- FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ device_set_wakeup_capable(&dev->dev, priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
/* fill out IRQ number and name fields */
for (i = 0; i < priv->num_grps; i++) {
@@ -1529,48 +1522,37 @@ static int gfar_suspend(struct device *dev)
struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned long flags;
u32 tempval;
-
int magic_packet = priv->wol_en &&
(priv->device_flags &
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ if (!netif_running(ndev))
+ return 0;
+
+ disable_napi(priv);
+ netif_tx_lock(ndev);
netif_device_detach(ndev);
+ netif_tx_unlock(ndev);
- if (netif_running(ndev)) {
+ gfar_halt(priv);
- local_irq_save(flags);
- lock_tx_qs(priv);
+ if (magic_packet) {
+ /* Enable interrupt on Magic Packet */
+ gfar_write(&regs->imask, IMASK_MAG);
- gfar_halt_nodisable(priv);
+ /* Enable Magic Packet mode */
+ tempval = gfar_read(&regs->maccfg2);
+ tempval |= MACCFG2_MPEN;
+ gfar_write(&regs->maccfg2, tempval);
- /* Disable Tx, and Rx if wake-on-LAN is disabled. */
+ /* re-enable the Rx block */
tempval = gfar_read(&regs->maccfg1);
-
- tempval &= ~MACCFG1_TX_EN;
-
- if (!magic_packet)
- tempval &= ~MACCFG1_RX_EN;
-
+ tempval |= MACCFG1_RX_EN;
gfar_write(&regs->maccfg1, tempval);
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- disable_napi(priv);
-
- if (magic_packet) {
- /* Enable interrupt on Magic Packet */
- gfar_write(&regs->imask, IMASK_MAG);
-
- /* Enable Magic Packet mode */
- tempval = gfar_read(&regs->maccfg2);
- tempval |= MACCFG2_MPEN;
- gfar_write(&regs->maccfg2, tempval);
- } else {
- phy_stop(priv->phydev);
- }
+ } else {
+ phy_stop(priv->phydev);
}
return 0;
@@ -1581,37 +1563,26 @@ static int gfar_resume(struct device *dev)
struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned long flags;
u32 tempval;
int magic_packet = priv->wol_en &&
(priv->device_flags &
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
- if (!netif_running(ndev)) {
- netif_device_attach(ndev);
+ if (!netif_running(ndev))
return 0;
- }
- if (!magic_packet && priv->phydev)
+ if (magic_packet) {
+ /* Disable Magic Packet mode */
+ tempval = gfar_read(&regs->maccfg2);
+ tempval &= ~MACCFG2_MPEN;
+ gfar_write(&regs->maccfg2, tempval);
+ } else {
phy_start(priv->phydev);
-
- /* Disable Magic Packet mode, in case something
- * else woke us up.
- */
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- tempval = gfar_read(&regs->maccfg2);
- tempval &= ~MACCFG2_MPEN;
- gfar_write(&regs->maccfg2, tempval);
+ }
gfar_start(priv);
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
netif_device_attach(ndev);
-
enable_napi(priv);
return 0;
@@ -2034,7 +2005,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
/* Install our interrupt handlers for Error,
* Transmit, and Receive
*/
- err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
+ err = request_irq(gfar_irq(grp, ER)->irq, gfar_error,
+ IRQF_NO_SUSPEND,
gfar_irq(grp, ER)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2057,7 +2029,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
goto rx_irq_fail;
}
} else {
- err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
+ err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt,
+ IRQF_NO_SUSPEND,
gfar_irq(grp, TX)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2129,6 +2102,11 @@ int startup_gfar(struct net_device *ndev)
/* Start Rx/Tx DMA and enable the interrupts */
gfar_start(priv);
+ /* force link state update after mac reset */
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
phy_start(priv->phydev);
enable_napi(priv);
@@ -2158,8 +2136,6 @@ static int gfar_enet_open(struct net_device *dev)
if (err)
return err;
- device_set_wakeup_enable(&dev->dev, priv->wol_en);
-
return err;
}
@@ -2254,7 +2230,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i, rq = 0;
int do_tstamp, do_csum, do_vlan;
u32 bufaddr;
- unsigned long flags;
unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
rq = skb->queue_mapping;
@@ -2434,19 +2409,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(txq, bytes_sent);
- /* We can work in parallel with gfar_clean_tx_ring(), except
- * when modifying num_txbdfree. Note that we didn't grab the lock
- * when we were reading the num_txbdfree and checking for available
- * space, that's because outside of this function it can only grow,
- * and once we've got needed space, it cannot suddenly disappear.
- *
- * The lock also protects us from gfar_error(), which can modify
- * regs->tstat and thus retrigger the transfers, which is why we
- * also must grab the lock before setting ready bit for the first
- * to be transmitted BD.
- */
- spin_lock_irqsave(&tx_queue->txlock, flags);
-
gfar_wmb();
txbdp_start->lstatus = cpu_to_be32(lstatus);
@@ -2463,8 +2425,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+ /* We can work in parallel with gfar_clean_tx_ring(), except
+ * when modifying num_txbdfree. Note that we didn't grab the lock
+ * when we were reading the num_txbdfree and checking for available
+ * space, that's because outside of this function it can only grow.
+ */
+ spin_lock_bh(&tx_queue->txlock);
/* reduce TxBD free count */
tx_queue->num_txbdfree -= (nr_txbds);
+ spin_unlock_bh(&tx_queue->txlock);
/* If the next BD still needs to be cleaned up, then the bds
* are full. We need to tell the kernel to stop sending us stuff.
@@ -2478,9 +2447,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Tell the DMA to go go go */
gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
- /* Unlock priv */
- spin_unlock_irqrestore(&tx_queue->txlock, flags);
-
return NETDEV_TX_OK;
dma_map_err:
@@ -2622,7 +2588,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
skb_dirtytx = tx_queue->skb_dirtytx;
while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
- unsigned long flags;
frags = skb_shinfo(skb)->nr_frags;
@@ -2686,9 +2651,9 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
TX_RING_MOD_MASK(tx_ring_size);
howmany++;
- spin_lock_irqsave(&tx_queue->txlock, flags);
+ spin_lock(&tx_queue->txlock);
tx_queue->num_txbdfree += nr_txbds;
- spin_unlock_irqrestore(&tx_queue->txlock, flags);
+ spin_unlock(&tx_queue->txlock);
}
/* If we freed a buffer, we can restart transmission, if necessary */
@@ -3411,21 +3376,12 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
if (events & IEVENT_CRL)
dev->stats.tx_aborted_errors++;
if (events & IEVENT_XFUN) {
- unsigned long flags;
-
netif_dbg(priv, tx_err, dev,
"TX FIFO underrun, packet dropped\n");
dev->stats.tx_dropped++;
atomic64_inc(&priv->extra_stats.tx_underrun);
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- /* Reactivate the Tx Queues */
- gfar_write(&regs->tstat, gfargrp->tstat);
-
- unlock_tx_qs(priv);
- local_irq_restore(flags);
+ schedule_work(&priv->reset_task);
}
netif_dbg(priv, tx_err, dev, "Transmit Error\n");
}
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index daa1d37de..5545e4103 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1145,9 +1145,6 @@ struct gfar_private {
int oldduplex;
int oldlink;
- /* Bitfield update lock */
- spinlock_t bflock;
-
uint32_t msg_enable;
struct work_struct reset_task;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index fda12fb32..5b90fcf96 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -653,7 +653,6 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct gfar_private *priv = netdev_priv(dev);
- unsigned long flags;
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
wol->wolopts != 0)
@@ -664,9 +663,7 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
- spin_lock_irqsave(&priv->bflock, flags);
- priv->wol_en = !!device_may_wakeup(&dev->dev);
- spin_unlock_irqrestore(&priv->bflock, flags);
+ priv->wol_en = !!device_may_wakeup(&dev->dev);
return 0;
}
@@ -903,27 +900,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
return 0;
}
-static int gfar_comp_asc(const void *a, const void *b)
-{
- return memcmp(a, b, 4);
-}
-
-static int gfar_comp_desc(const void *a, const void *b)
-{
- return -memcmp(a, b, 4);
-}
-
-static void gfar_swap(void *a, void *b, int size)
-{
- u32 *_a = a;
- u32 *_b = b;
-
- swap(_a[0], _b[0]);
- swap(_a[1], _b[1]);
- swap(_a[2], _b[2]);
- swap(_a[3], _b[3]);
-}
-
/* Write a mask to filer cache */
static void gfar_set_mask(u32 mask, struct filer_table *tab)
{
@@ -1273,310 +1249,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
return 0;
}
-/* Copy size filer entries */
-static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
- struct gfar_filer_entry src[0], s32 size)
-{
- while (size > 0) {
- size--;
- dst[size].ctrl = src[size].ctrl;
- dst[size].prop = src[size].prop;
- }
-}
-
-/* Delete the contents of the filer-table between start and end
- * and collapse them
- */
-static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
-{
- int length;
-
- if (end > MAX_FILER_CACHE_IDX || end < begin)
- return -EINVAL;
-
- end++;
- length = end - begin;
-
- /* Copy */
- while (end < tab->index) {
- tab->fe[begin].ctrl = tab->fe[end].ctrl;
- tab->fe[begin++].prop = tab->fe[end++].prop;
-
- }
- /* Fill up with don't cares */
- while (begin < tab->index) {
- tab->fe[begin].ctrl = 0x60;
- tab->fe[begin].prop = 0xFFFFFFFF;
- begin++;
- }
-
- tab->index -= length;
- return 0;
-}
-
-/* Make space on the wanted location */
-static int gfar_expand_filer_entries(u32 begin, u32 length,
- struct filer_table *tab)
-{
- if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
- begin > MAX_FILER_CACHE_IDX)
- return -EINVAL;
-
- gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
- tab->index - length + 1);
-
- tab->index += length;
- return 0;
-}
-
-static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
-{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
- start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
- (RQFCR_AND | RQFCR_CLE))
- return start;
- }
- return -1;
-}
-
-static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
-{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
- start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
- (RQFCR_CLE))
- return start;
- }
- return -1;
-}
-
-/* Uses hardwares clustering option to reduce
- * the number of filer table entries
- */
-static void gfar_cluster_filer(struct filer_table *tab)
-{
- s32 i = -1, j, iend, jend;
-
- while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
- j = i;
- while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
- /* The cluster entries self and the previous one
- * (a mask) must be identical!
- */
- if (tab->fe[i].ctrl != tab->fe[j].ctrl)
- break;
- if (tab->fe[i].prop != tab->fe[j].prop)
- break;
- if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
- break;
- if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
- break;
- iend = gfar_get_next_cluster_end(i, tab);
- jend = gfar_get_next_cluster_end(j, tab);
- if (jend == -1 || iend == -1)
- break;
-
- /* First we make some free space, where our cluster
- * element should be. Then we copy it there and finally
- * delete in from its old location.
- */
- if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
- -EINVAL)
- break;
-
- gfar_copy_filer_entries(&(tab->fe[iend + 1]),
- &(tab->fe[jend + 1]), jend - j);
-
- if (gfar_trim_filer_entries(jend - 1,
- jend + (jend - j),
- tab) == -EINVAL)
- return;
-
- /* Mask out cluster bit */
- tab->fe[iend].ctrl &= ~(RQFCR_CLE);
- }
- }
-}
-
-/* Swaps the masked bits of a1<>a2 and b1<>b2 */
-static void gfar_swap_bits(struct gfar_filer_entry *a1,
- struct gfar_filer_entry *a2,
- struct gfar_filer_entry *b1,
- struct gfar_filer_entry *b2, u32 mask)
-{
- u32 temp[4];
- temp[0] = a1->ctrl & mask;
- temp[1] = a2->ctrl & mask;
- temp[2] = b1->ctrl & mask;
- temp[3] = b2->ctrl & mask;
-
- a1->ctrl &= ~mask;
- a2->ctrl &= ~mask;
- b1->ctrl &= ~mask;
- b2->ctrl &= ~mask;
-
- a1->ctrl |= temp[1];
- a2->ctrl |= temp[0];
- b1->ctrl |= temp[3];
- b2->ctrl |= temp[2];
-}
-
-/* Generate a list consisting of masks values with their start and
- * end of validity and block as indicator for parts belonging
- * together (glued by ANDs) in mask_table
- */
-static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *tab)
-{
- u32 i, and_index = 0, block_index = 1;
-
- for (i = 0; i < tab->index; i++) {
-
- /* LSByte of control = 0 sets a mask */
- if (!(tab->fe[i].ctrl & 0xF)) {
- mask_table[and_index].mask = tab->fe[i].prop;
- mask_table[and_index].start = i;
- mask_table[and_index].block = block_index;
- if (and_index >= 1)
- mask_table[and_index - 1].end = i - 1;
- and_index++;
- }
- /* cluster starts and ends will be separated because they should
- * hold their position
- */
- if (tab->fe[i].ctrl & RQFCR_CLE)
- block_index++;
- /* A not set AND indicates the end of a depended block */
- if (!(tab->fe[i].ctrl & RQFCR_AND))
- block_index++;
- }
-
- mask_table[and_index - 1].end = i - 1;
-
- return and_index;
-}
-
-/* Sorts the entries of mask_table by the values of the masks.
- * Important: The 0xFF80 flags of the first and last entry of a
- * block must hold their position (which queue, CLusterEnable, ReJEct,
- * AND)
- */
-static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *temp_table, u32 and_index)
-{
- /* Pointer to compare function (_asc or _desc) */
- int (*gfar_comp)(const void *, const void *);
-
- u32 i, size = 0, start = 0, prev = 1;
- u32 old_first, old_last, new_first, new_last;
-
- gfar_comp = &gfar_comp_desc;
-
- for (i = 0; i < and_index; i++) {
- if (prev != mask_table[i].block) {
- old_first = mask_table[start].start + 1;
- old_last = mask_table[i - 1].end;
- sort(mask_table + start, size,
- sizeof(struct gfar_mask_entry),
- gfar_comp, &gfar_swap);
-
- /* Toggle order for every block. This makes the
- * thing more efficient!
- */
- if (gfar_comp == gfar_comp_desc)
- gfar_comp = &gfar_comp_asc;
- else
- gfar_comp = &gfar_comp_desc;
-
- new_first = mask_table[start].start + 1;
- new_last = mask_table[i - 1].end;
-
- gfar_swap_bits(&temp_table->fe[new_first],
- &temp_table->fe[old_first],
- &temp_table->fe[new_last],
- &temp_table->fe[old_last],
- RQFCR_QUEUE | RQFCR_CLE |
- RQFCR_RJE | RQFCR_AND);
-
- start = i;
- size = 0;
- }
- size++;
- prev = mask_table[i].block;
- }
-}
-
-/* Reduces the number of masks needed in the filer table to save entries
- * This is done by sorting the masks of a depended block. A depended block is
- * identified by gluing ANDs or CLE. The sorting order toggles after every
- * block. Of course entries in scope of a mask must change their location with
- * it.
- */
-static int gfar_optimize_filer_masks(struct filer_table *tab)
-{
- struct filer_table *temp_table;
- struct gfar_mask_entry *mask_table;
-
- u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
- s32 ret = 0;
-
- /* We need a copy of the filer table because
- * we want to change its order
- */
- temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
- if (temp_table == NULL)
- return -ENOMEM;
-
- mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
- sizeof(struct gfar_mask_entry), GFP_KERNEL);
-
- if (mask_table == NULL) {
- ret = -ENOMEM;
- goto end;
- }
-
- and_index = gfar_generate_mask_table(mask_table, tab);
-
- gfar_sort_mask_table(mask_table, temp_table, and_index);
-
- /* Now we can copy the data from our duplicated filer table to
- * the real one in the order the mask table says
- */
- for (i = 0; i < and_index; i++) {
- size = mask_table[i].end - mask_table[i].start + 1;
- gfar_copy_filer_entries(&(tab->fe[j]),
- &(temp_table->fe[mask_table[i].start]), size);
- j += size;
- }
-
- /* And finally we just have to check for duplicated masks and drop the
- * second ones
- */
- for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
- if (tab->fe[i].ctrl == 0x80) {
- previous_mask = i++;
- break;
- }
- }
- for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
- if (tab->fe[i].ctrl == 0x80) {
- if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
- /* Two identical ones found!
- * So drop the second one!
- */
- gfar_trim_filer_entries(i, i, tab);
- } else
- /* Not identical! */
- previous_mask = i;
- }
- }
-
- kfree(mask_table);
-end: kfree(temp_table);
- return ret;
-}
-
/* Write the bit-pattern from software's buffer to hardware registers */
static int gfar_write_filer_table(struct gfar_private *priv,
struct filer_table *tab)
@@ -1586,11 +1258,10 @@ static int gfar_write_filer_table(struct gfar_private *priv,
return -EBUSY;
/* Fill regular entries */
- for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
- i++)
+ for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
/* Fill the rest with fall-troughs */
- for (; i < MAX_FILER_IDX - 1; i++)
+ for (; i < MAX_FILER_IDX; i++)
gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
/* Last entry must be default accept
* because that's what people expect
@@ -1624,7 +1295,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
{
struct ethtool_flow_spec_container *j;
struct filer_table *tab;
- s32 i = 0;
s32 ret = 0;
/* So index is set to zero, too! */
@@ -1649,17 +1319,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
}
}
- i = tab->index;
-
- /* Optimizations to save entries */
- gfar_cluster_filer(tab);
- gfar_optimize_filer_masks(tab);
-
- pr_debug("\tSummary:\n"
- "\tData on hardware: %d\n"
- "\tCompression rate: %d%%\n",
- tab->index, 100 - (100 * tab->index) / i);
-
/* Write everything to hardware */
ret = gfar_write_filer_table(priv, tab);
if (ret == -EBUSY) {
@@ -1725,13 +1384,14 @@ static int gfar_add_cls(struct gfar_private *priv,
}
process:
+ priv->rx_list.count++;
ret = gfar_process_filer_changes(priv);
if (ret)
goto clean_list;
- priv->rx_list.count++;
return ret;
clean_list:
+ priv->rx_list.count--;
list_del(&temp->list);
clean_mem:
kfree(temp);
diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig
index 108525738..faee34e44 100644
--- a/drivers/net/ethernet/fujitsu/Kconfig
+++ b/drivers/net/ethernet/fujitsu/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_FUJITSU
default y
depends on PCMCIA
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
the questions about Fujitsu cards. If you say Y, you will be asked for
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index a54d89791..dead17b5d 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_HISILICON
default y
depends on ARM
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 3b39fddde..d49bee38c 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -798,7 +798,7 @@ static void hip04_free_ring(struct net_device *ndev, struct device *d)
for (i = 0; i < RX_DESC_NUM; i++)
if (priv->rx_buf[i])
- put_page(virt_to_head_page(priv->rx_buf[i]));
+ skb_free_frag(priv->rx_buf[i]);
for (i = 0; i < TX_DESC_NUM; i++)
if (priv->tx_skb[i])
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 0ffdcd381..a5e077eac 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -500,7 +500,6 @@ static int hix5hd2_rx(struct net_device *dev, int limit)
napi_gro_receive(&priv->napi, skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
- dev->last_rx = jiffies;
next:
pos = dma_ring_incr(pos, RX_DESC_NUM);
}
diff --git a/drivers/net/ethernet/hp/Kconfig b/drivers/net/ethernet/hp/Kconfig
index a0b8ece1e..d4df78c2a 100644
--- a/drivers/net/ethernet/hp/Kconfig
+++ b/drivers/net/ethernet/hp/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_HP
default y
depends on ISA || EISA || PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
@@ -22,9 +20,7 @@ config HP100
tristate "HP 10/100VG PCLAN (ISA, EISA, PCI) support"
depends on (ISA || EISA || PCI)
---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card of this type, say Y here.
To compile this driver as a module, choose M here. The module
will be called hp100.
diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig
index 9521e68aa..e8d61f670 100644
--- a/drivers/net/ethernet/i825xx/Kconfig
+++ b/drivers/net/ethernet/i825xx/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_I825XX
default y
depends on NET_VENDOR_INTEL
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question does not directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
index 563a1ac71..99c1cebd0 100644
--- a/drivers/net/ethernet/ibm/Kconfig
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_IBM
default y
depends on PPC_PSERIES || PPC_DCR || (IBMEBUS && SPARSEMEM)
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index b9df0cbd0..b60a34d98 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2999,7 +2999,7 @@ static struct platform_driver emac_driver = {
static void __init emac_make_bootlist(void)
{
struct device_node *np = NULL;
- int j, max, i = 0, k;
+ int j, max, i = 0;
int cell_indices[EMAC_BOOT_LIST_SIZE];
/* Collect EMACs */
@@ -3026,12 +3026,8 @@ static void __init emac_make_bootlist(void)
for (i = 0; max > 1 && (i < (max - 1)); i++)
for (j = i; j < max; j++) {
if (cell_indices[i] > cell_indices[j]) {
- np = emac_boot_list[i];
- emac_boot_list[i] = emac_boot_list[j];
- emac_boot_list[j] = np;
- k = cell_indices[i];
- cell_indices[i] = cell_indices[j];
- cell_indices[j] = k;
+ swap(emac_boot_list[i], emac_boot_list[j]);
+ swap(cell_indices[i], cell_indices[j]);
}
}
}
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 18134766a..29bbb628d 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -58,7 +58,7 @@ static struct kobj_type ktype_veth_pool;
static const char ibmveth_driver_name[] = "ibmveth";
static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
-#define ibmveth_driver_version "1.04"
+#define ibmveth_driver_version "1.05"
MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
@@ -100,6 +100,8 @@ struct ibmveth_stat ibmveth_stats[] = {
{ "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
{ "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
{ "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
+ { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
+ { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }
};
/* simple methods of getting data from the current rxq entry */
@@ -852,6 +854,10 @@ static int ibmveth_set_features(struct net_device *dev,
struct ibmveth_adapter *adapter = netdev_priv(dev);
int rx_csum = !!(features & NETIF_F_RXCSUM);
int rc;
+ netdev_features_t changed = features ^ dev->features;
+
+ if (features & NETIF_F_TSO & changed)
+ netdev_info(dev, "TSO feature requires all partitions to have updated driver");
if (rx_csum == adapter->rx_csum)
return 0;
@@ -1035,6 +1041,15 @@ retry_bounce:
descs[i+1].fields.address = dma_addr;
}
+ if (skb_is_gso(skb) && !skb_is_gso_v6(skb)) {
+ /* Put -1 in the IP checksum to tell phyp it
+ * is a largesend packet and put the mss in the TCP checksum.
+ */
+ ip_hdr(skb)->check = 0xffff;
+ tcp_hdr(skb)->check = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ adapter->tx_large_packets++;
+ }
+
if (ibmveth_send(adapter, descs)) {
adapter->tx_send_failed++;
netdev->stats.tx_dropped++;
@@ -1080,6 +1095,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
struct net_device *netdev = adapter->netdev;
int frames_processed = 0;
unsigned long lpar_rc;
+ struct iphdr *iph;
restart_poll:
while (frames_processed < budget) {
@@ -1122,10 +1138,23 @@ restart_poll:
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, netdev);
- if (csum_good)
+ if (csum_good) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
+ iph = (struct iphdr *)skb->data;
+
+ /* If the IP checksum is not offloaded and if the packet
+ * is large send, the checksum must be rebuilt.
+ */
+ if (iph->check == 0xffff) {
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ adapter->rx_large_packets++;
+ }
+ }
+ }
- netif_receive_skb(skb); /* send it up */
+ napi_gro_receive(napi, skb); /* send it up */
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += length;
@@ -1422,8 +1451,14 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->features |= netdev->hw_features;
+ /* TSO is disabled by default */
+ netdev->hw_features |= NETIF_F_TSO;
+
memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
+ if (firmware_has_feature(FW_FEATURE_CMO))
+ memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
+
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
int error;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 1f37499d4..41dedb1fb 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -104,7 +104,8 @@ static inline long h_illan_attributes(unsigned long unit_address,
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 512, 256, 256, 256 };
-static int pool_active[] = { 1, 1, 0, 0, 0};
+static int pool_count_cmo[] = { 256, 512, 256, 256, 64 };
+static int pool_active[] = { 1, 1, 0, 0, 1};
#define IBM_VETH_INVALID_MAP ((u16)0xffff)
@@ -160,6 +161,8 @@ struct ibmveth_adapter {
u64 rx_no_buffer;
u64 tx_map_failed;
u64 tx_send_failed;
+ u64 tx_large_packets;
+ u64 rx_large_packets;
};
/*
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index ff2903652..c3b6af83f 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -1028,7 +1028,7 @@ static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
/* detailed rx_errors */
sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) +
- ipg_r16(IPG_FRAMETOOLONGERRRORS);
+ ipg_r16(IPG_FRAMETOOLONGERRORS);
sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS);
/* Unutilized IPG statistic registers. */
diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h
index a21e4f570..de606281f 100644
--- a/drivers/net/ethernet/icplus/ipg.h
+++ b/drivers/net/ethernet/icplus/ipg.h
@@ -102,7 +102,7 @@ enum ipg_regs {
#define IPG_MCSTFRAMESRCVDOK 0xB8
#define IPG_BCSTFRAMESRCVDOK 0xBE
#define IPG_MACCONTROLFRAMESRCVD 0xC6
-#define IPG_FRAMETOOLONGERRRORS 0xC8
+#define IPG_FRAMETOOLONGERRORS 0xC8
#define IPG_INRANGELENGTHERRORS 0xCA
#define IPG_FRAMECHECKSEQERRORS 0xCC
#define IPG_FRAMESLOSTRXERRORS 0xCE
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index f4ff46558..4163b1648 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -6,9 +6,7 @@ config NET_VENDOR_INTEL
bool "Intel devices"
default y
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index f9fae3e6f..dd54e8f63 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -872,7 +872,7 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
{
struct cb *cb;
unsigned long flags;
- int err = 0;
+ int err;
spin_lock_irqsave(&nic->cb_lock, flags);
@@ -2921,9 +2921,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- init_timer(&nic->watchdog);
- nic->watchdog.function = e100_watchdog;
- nic->watchdog.data = (unsigned long)nic;
+ setup_timer(&nic->watchdog, e100_watchdog, (unsigned long)nic);
INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 983eb4e6f..74dc15055 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2079,11 +2079,6 @@ static void *e1000_alloc_frag(const struct e1000_adapter *a)
return data;
}
-static void e1000_free_frag(const void *data)
-{
- put_page(virt_to_head_page(data));
-}
-
/**
* e1000_clean_rx_ring - Free Rx Buffers per Queue
* @adapter: board private structure
@@ -2107,7 +2102,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (buffer_info->rxbuf.data) {
- e1000_free_frag(buffer_info->rxbuf.data);
+ skb_free_frag(buffer_info->rxbuf.data);
buffer_info->rxbuf.data = NULL;
}
} else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
@@ -4594,28 +4589,28 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
data = e1000_alloc_frag(adapter);
/* Failed allocation, critical failure */
if (!data) {
- e1000_free_frag(olddata);
+ skb_free_frag(olddata);
adapter->alloc_rx_buff_failed++;
break;
}
if (!e1000_check_64k_bound(adapter, data, bufsz)) {
/* give up */
- e1000_free_frag(data);
- e1000_free_frag(olddata);
+ skb_free_frag(data);
+ skb_free_frag(olddata);
adapter->alloc_rx_buff_failed++;
break;
}
/* Use new allocation */
- e1000_free_frag(olddata);
+ skb_free_frag(olddata);
}
buffer_info->dma = dma_map_single(&pdev->dev,
data,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
- e1000_free_frag(data);
+ skb_free_frag(data);
buffer_info->dma = 0;
adapter->alloc_rx_buff_failed++;
break;
@@ -4637,7 +4632,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
- e1000_free_frag(data);
+ skb_free_frag(data);
buffer_info->rxbuf.data = NULL;
buffer_info->dma = 0;
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 08f22f348..2af603f3e 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
index 535a94309..a2162e116 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 32e77755a..5f7016442 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 2e758f796..abc6a9abf 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 0570c668e..133d4074d 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 0abc942c9..0b748d195 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -98,6 +98,8 @@ struct e1000_info;
#define DEFAULT_RADV 8
#define BURST_RDTR 0x20
#define BURST_RADV 0x20
+#define PCICFG_DESC_RING_STATUS 0xe4
+#define FLUSH_DESC_REQUIRED 0x100
/* in the case of WTHRESH, it appears at least the 82571/2 hardware
* writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
@@ -384,6 +386,10 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define INCVALUE_SHIFT_25MHz 18
#define INCPERIOD_25MHz 1
+#define INCVALUE_24MHz 125
+#define INCVALUE_SHIFT_24MHz 14
+#define INCPERIOD_24MHz 3
+
/* Another drawback of scaling the incvalue by a large factor is the
* 64-bit SYSTIM register overflows more quickly. This is dealt with
* by simply reading the clock before it overflows.
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 11f486e4f..ad6daa656 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -1516,8 +1516,19 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 rctl;
-
+ u32 rctl, fext_nvm11, tarc0;
+
+ if (hw->mac.type == e1000_pch_spt) {
+ fext_nvm11 = er32(FEXTNVM11);
+ fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
+ ew32(FEXTNVM11, fext_nvm11);
+ tarc0 = er32(TARC(0));
+ /* clear bits 28 & 29 (control of MULR concurrent requests) */
+ tarc0 &= 0xcfffffff;
+ /* set bit 29 (value of MULR requests is now 2) */
+ tarc0 |= 0x20000000;
+ ew32(TARC(0), tarc0);
+ }
if (hw->phy.media_type == e1000_media_type_fiber ||
hw->phy.media_type == e1000_media_type_internal_serdes) {
switch (hw->mac.type) {
@@ -1542,7 +1553,7 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 rctl;
+ u32 rctl, fext_nvm11, tarc0;
u16 phy_reg;
rctl = er32(RCTL);
@@ -1550,6 +1561,16 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
ew32(RCTL, rctl);
switch (hw->mac.type) {
+ case e1000_pch_spt:
+ fext_nvm11 = er32(FEXTNVM11);
+ fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
+ ew32(FEXTNVM11, fext_nvm11);
+ tarc0 = er32(TARC(0));
+ /* clear bits 28 & 29 (control of MULR concurrent requests) */
+ /* set bit 29 (value of MULR requests is now 0) */
+ tarc0 &= 0xcfffffff;
+ ew32(TARC(0), tarc0);
+ /* fall through */
case e1000_80003es2lan:
if (hw->phy.media_type == e1000_media_type_fiber ||
hw->phy.media_type == e1000_media_type_internal_serdes) {
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 19e8c487d..c9da4654e 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index e2498dbf3..91a5a0ae9 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -237,17 +237,19 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
if (ret_val)
return false;
out:
- if ((hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt)) {
- /* Unforce SMBus mode in PHY */
- e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
- phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
- e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
+ if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) {
+ /* Only unforce SMBus if ME is not active */
+ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ /* Unforce SMBus mode in PHY */
+ e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
- /* Unforce SMBus mode in MAC */
- mac_reg = er32(CTRL_EXT);
- mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, mac_reg);
+ /* Unforce SMBus mode in MAC */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+ }
}
return true;
@@ -1014,8 +1016,7 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
u16 speed, duplex, scale = 0;
u16 max_snoop, max_nosnoop;
u16 max_ltr_enc; /* max LTR latency encoded */
- s64 lat_ns; /* latency (ns) */
- s64 value;
+ u64 value;
u32 rxa;
if (!hw->adapter->max_frame_size) {
@@ -1040,14 +1041,11 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
* 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
* 1=2^5ns, 2=2^10ns,...5=2^25ns.
*/
- lat_ns = ((s64)rxa * 1024 -
- (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000;
- if (lat_ns < 0)
- lat_ns = 0;
- else
- do_div(lat_ns, speed);
+ rxa *= 512;
+ value = (rxa > hw->adapter->max_frame_size) ?
+ (rxa - hw->adapter->max_frame_size) * (16000 / speed) :
+ 0;
- value = lat_ns;
while (value > PCI_LTR_VALUE_MASK) {
scale++;
value = DIV_ROUND_UP(value, (1 << 5));
@@ -1091,6 +1089,7 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
u32 mac_reg;
s32 ret_val = 0;
u16 phy_reg;
+ u16 oem_reg = 0;
if ((hw->mac.type < e1000_pch_lpt) ||
(hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
@@ -1132,33 +1131,37 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
if (ret_val)
goto out;
+ /* Force SMBus mode in PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Force SMBus mode in MAC */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
/* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
* LPLU and disable Gig speed when entering ULP
*/
if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
- &phy_reg);
+ &oem_reg);
if (ret_val)
goto release;
+
+ phy_reg = oem_reg;
phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
+
ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
phy_reg);
+
if (ret_val)
goto release;
}
- /* Force SMBus mode in PHY */
- ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
- if (ret_val)
- goto release;
- phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
- e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
-
- /* Force SMBus mode in MAC */
- mac_reg = er32(CTRL_EXT);
- mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, mac_reg);
-
/* Set Inband ULP Exit, Reset to SMBus mode and
* Disable SMBus Release on PERST# in PHY
*/
@@ -1170,10 +1173,15 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
if (to_sx) {
if (er32(WUFC) & E1000_WUFC_LNKC)
phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
+ else
+ phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
+ phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
} else {
phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
+ phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
+ phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
}
e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
@@ -1185,6 +1193,15 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
/* Commit ULP changes in PHY by starting auto ULP configuration */
phy_reg |= I218_ULP_CONFIG1_START;
e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
+ to_sx && (er32(STATUS) & E1000_STATUS_LU)) {
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
+ oem_reg);
+ if (ret_val)
+ goto release;
+ }
+
release:
hw->phy.ops.release(hw);
out:
@@ -1383,16 +1400,20 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (((hw->mac.type == e1000_pch2lan) ||
(hw->mac.type == e1000_pch_lpt) ||
(hw->mac.type == e1000_pch_spt)) && link) {
- u32 reg;
+ u16 speed, duplex;
- reg = er32(STATUS);
+ e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex);
tipg_reg = er32(TIPG);
tipg_reg &= ~E1000_TIPG_IPGT_MASK;
- if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
+ if (duplex == HALF_DUPLEX && speed == SPEED_10) {
tipg_reg |= 0xFF;
/* Reduce Rx latency in analog PHY */
emi_val = 0;
+ } else if (hw->mac.type == e1000_pch_spt &&
+ duplex == FULL_DUPLEX && speed != SPEED_1000) {
+ tipg_reg |= 0xC;
+ emi_val = 1;
} else {
/* Roll back the default values */
@@ -1416,14 +1437,59 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
+
+ if (hw->mac.type == e1000_pch_spt) {
+ u16 data;
+ u16 ptr_gap;
+
+ if (speed == SPEED_1000) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy_locked(hw,
+ PHY_REG(776, 20),
+ &data);
+ if (ret_val) {
+ hw->phy.ops.release(hw);
+ return ret_val;
+ }
+
+ ptr_gap = (data & (0x3FF << 2)) >> 2;
+ if (ptr_gap < 0x18) {
+ data &= ~(0x3FF << 2);
+ data |= (0x18 << 2);
+ ret_val =
+ e1e_wphy_locked(hw,
+ PHY_REG(776, 20),
+ data);
+ }
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+ }
+
+ /* I217 Packet Loss issue:
+ * ensure that FEXTNVM4 Beacon Duration is set correctly
+ * on power up.
+ * Set the Beacon Duration for I217 to 8 usec
+ */
+ if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) {
+ u32 mac_reg;
+
+ mac_reg = er32(FEXTNVM4);
+ mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
+ ew32(FEXTNVM4, mac_reg);
}
/* Work-around I218 hang issue */
if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
(hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
(hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
- (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3) ||
- (hw->mac.type == e1000_pch_spt)) {
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
ret_val = e1000_k1_workaround_lpt_lp(hw, link);
if (ret_val)
return ret_val;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 770a573b9..26459853c 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -98,8 +98,15 @@
#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000
/* bit for disabling packet buffer read */
#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000
-
+#define E1000_FEXTNVM7_SIDE_CLK_UNGATE 0x00000004
#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
+#define E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS 0x00000800
+#define E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS 0x00001000
+#define E1000_FEXTNVM11_DISABLE_PB_READ 0x00000200
+#define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000
+
+/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */
+#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
#define K1_ENTRY_LATENCY 0
#define K1_MIN_TIME 1
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 30b74d590..e59d7c283 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index 0513d90cd..8284618af 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index 06edfca1a..cc9b3befc 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
index a8c27f98f..0b9ea5952 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.h
+++ b/drivers/net/ethernet/intel/e1000e/manage.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 68913d103..adfe1de78 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -48,7 +48,7 @@
#define DRV_EXTRAVERSION "-k"
-#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
+#define DRV_VERSION "3.2.5" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -3525,22 +3525,30 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
switch (hw->mac.type) {
case e1000_pch2lan:
case e1000_pch_lpt:
- case e1000_pch_spt:
- /* On I217, I218 and I219, the clock frequency is 25MHz
- * or 96MHz as indicated by the System Clock Frequency
- * Indication
- */
- if (((hw->mac.type != e1000_pch_lpt) &&
- (hw->mac.type != e1000_pch_spt)) ||
- (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 96MHz frequency */
incperiod = INCPERIOD_96MHz;
incvalue = INCVALUE_96MHz;
shift = INCVALUE_SHIFT_96MHz;
adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
+ } else {
+ /* Stable 25MHz frequency */
+ incperiod = INCPERIOD_25MHz;
+ incvalue = INCVALUE_25MHz;
+ shift = INCVALUE_SHIFT_25MHz;
+ adapter->cc.shift = shift;
+ }
+ break;
+ case e1000_pch_spt:
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
+ /* Stable 24MHz frequency */
+ incperiod = INCPERIOD_24MHz;
+ incvalue = INCVALUE_24MHz;
+ shift = INCVALUE_SHIFT_24MHz;
+ adapter->cc.shift = shift;
break;
}
- /* fall-through */
+ return -EINVAL;
case e1000_82574:
case e1000_82583:
/* Stable 25MHz frequency */
@@ -3788,6 +3796,108 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
}
/**
+ * e1000_flush_tx_ring - remove all descriptors from the tx_ring
+ *
+ * We want to clear all pending descriptors from the TX ring.
+ * zeroing happens when the HW reads the regs. We assign the ring itself as
+ * the data of the next descriptor. We don't care about the data we are about
+ * to reset the HW.
+ */
+static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_tx_desc *tx_desc = NULL;
+ u32 tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS;
+ u16 size = 512;
+
+ tctl = er32(TCTL);
+ ew32(TCTL, tctl | E1000_TCTL_EN);
+ tdt = er32(TDT(0));
+ BUG_ON(tdt != tx_ring->next_to_use);
+ tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use);
+ tx_desc->buffer_addr = tx_ring->dma;
+
+ tx_desc->lower.data = cpu_to_le32(txd_lower | size);
+ tx_desc->upper.data = 0;
+ /* flush descriptors to memory before notifying the HW */
+ wmb();
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+ ew32(TDT(0), tx_ring->next_to_use);
+ mmiowb();
+ usleep_range(200, 250);
+}
+
+/**
+ * e1000_flush_rx_ring - remove all descriptors from the rx_ring
+ *
+ * Mark all descriptors in the RX ring as consumed and disable the rx ring
+ */
+static void e1000_flush_rx_ring(struct e1000_adapter *adapter)
+{
+ u32 rctl, rxdctl;
+ struct e1000_hw *hw = &adapter->hw;
+
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ e1e_flush();
+ usleep_range(100, 150);
+
+ rxdctl = er32(RXDCTL(0));
+ /* zero the lower 14 bits (prefetch and host thresholds) */
+ rxdctl &= 0xffffc000;
+
+ /* update thresholds: prefetch threshold to 31, host threshold to 1
+ * and make sure the granularity is "descriptors" and not "cache lines"
+ */
+ rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
+
+ ew32(RXDCTL(0), rxdctl);
+ /* momentarily enable the RX ring for the changes to take effect */
+ ew32(RCTL, rctl | E1000_RCTL_EN);
+ e1e_flush();
+ usleep_range(100, 150);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+}
+
+/**
+ * e1000_flush_desc_rings - remove all descriptors from the descriptor rings
+ *
+ * In i219, the descriptor rings must be emptied before resetting the HW
+ * or before changing the device state to D3 during runtime (runtime PM).
+ *
+ * Failure to do this will cause the HW to enter a unit hang state which can
+ * only be released by PCI reset on the device
+ *
+ */
+
+static void e1000_flush_desc_rings(struct e1000_adapter *adapter)
+{
+ u16 hang_state;
+ u32 fext_nvm11, tdlen;
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* First, disable MULR fix in FEXTNVM11 */
+ fext_nvm11 = er32(FEXTNVM11);
+ fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
+ ew32(FEXTNVM11, fext_nvm11);
+ /* do nothing if we're not in faulty state, or if the queue is empty */
+ tdlen = er32(TDLEN(0));
+ pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS,
+ &hang_state);
+ if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen)
+ return;
+ e1000_flush_tx_ring(adapter);
+ /* recheck, maybe the fault is caused by the rx ring */
+ pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS,
+ &hang_state);
+ if (hang_state & FLUSH_DESC_REQUIRED)
+ e1000_flush_rx_ring(adapter);
+}
+
+/**
* e1000e_reset - bring the hardware into a known good state
*
* This function boots the hardware and enables some settings that
@@ -3943,6 +4053,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
}
}
+ if (hw->mac.type == e1000_pch_spt)
+ e1000_flush_desc_rings(adapter);
/* Allow time for pending master requests to run */
mac->ops.reset_hw(hw);
@@ -4016,6 +4128,20 @@ void e1000e_reset(struct e1000_adapter *adapter)
phy_data &= ~IGP02E1000_PM_SPD;
e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
}
+ if (hw->mac.type == e1000_pch_spt && adapter->int_mode == 0) {
+ u32 reg;
+
+ /* Fextnvm7 @ 0xe4[2] = 1 */
+ reg = er32(FEXTNVM7);
+ reg |= E1000_FEXTNVM7_SIDE_CLK_UNGATE;
+ ew32(FEXTNVM7, reg);
+ /* Fextnvm9 @ 0x5bb4[13:12] = 11 */
+ reg = er32(FEXTNVM9);
+ reg |= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS |
+ E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS;
+ ew32(FEXTNVM9, reg);
+ }
+
}
int e1000e_up(struct e1000_adapter *adapter)
@@ -4115,8 +4241,6 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
spin_unlock(&adapter->stats64_lock);
e1000e_flush_descriptors(adapter);
- e1000_clean_tx_ring(adapter->tx_ring);
- e1000_clean_rx_ring(adapter->rx_ring);
adapter->link_speed = 0;
adapter->link_duplex = 0;
@@ -4127,8 +4251,14 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
e1000_lv_jumbo_workaround_ich8lan(hw, false))
e_dbg("failed to disable jumbo frame workaround mode\n");
- if (reset && !pci_channel_offline(adapter->pdev))
- e1000e_reset(adapter);
+ if (!pci_channel_offline(adapter->pdev)) {
+ if (reset)
+ e1000e_reset(adapter);
+ else if (hw->mac.type == e1000_pch_spt)
+ e1000_flush_desc_rings(adapter);
+ }
+ e1000_clean_tx_ring(adapter->tx_ring);
+ e1000_clean_rx_ring(adapter->rx_ring);
}
void e1000e_reinit_locked(struct e1000_adapter *adapter)
@@ -4150,11 +4280,29 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
cc);
struct e1000_hw *hw = &adapter->hw;
+ u32 systimel_1, systimel_2, systimeh;
cycle_t systim, systim_next;
-
- /* latch SYSTIMH on read of SYSTIML */
- systim = (cycle_t)er32(SYSTIML);
- systim |= (cycle_t)er32(SYSTIMH) << 32;
+ /* SYSTIMH latching upon SYSTIML read does not work well.
+ * This means that if SYSTIML overflows after we read it but before
+ * we read SYSTIMH, the value of SYSTIMH has been incremented and we
+ * will experience a huge non linear increment in the systime value
+ * to fix that we test for overflow and if true, we re-read systime.
+ */
+ systimel_1 = er32(SYSTIML);
+ systimeh = er32(SYSTIMH);
+ systimel_2 = er32(SYSTIML);
+ /* Check for overflow. If there was no overflow, use the values */
+ if (systimel_1 < systimel_2) {
+ systim = (cycle_t)systimel_1;
+ systim |= (cycle_t)systimeh << 32;
+ } else {
+ /* There was an overflow, read again SYSTIMH, and use
+ * systimel_2
+ */
+ systimeh = er32(SYSTIMH);
+ systim = (cycle_t)systimel_2;
+ systim |= (cycle_t)systimeh << 32;
+ }
if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
u64 incvalue, time_delta, rem, temp;
@@ -6217,13 +6365,14 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
}
/**
- * e1000e_disable_aspm - Disable ASPM states
+ * __e1000e_disable_aspm - Disable ASPM states
* @pdev: pointer to PCI device struct
* @state: bit-mask of ASPM states to disable
+ * @locked: indication if this context holds pci_bus_sem locked.
*
* Some devices *must* have certain ASPM states disabled per hardware errata.
**/
-static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state, int locked)
{
struct pci_dev *parent = pdev->bus->self;
u16 aspm_dis_mask = 0;
@@ -6262,7 +6411,10 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
"L1" : "");
#ifdef CONFIG_PCIEASPM
- pci_disable_link_state_locked(pdev, state);
+ if (locked)
+ pci_disable_link_state_locked(pdev, state);
+ else
+ pci_disable_link_state(pdev, state);
/* Double-check ASPM control. If not disabled by the above, the
* BIOS is preventing that from happening (or CONFIG_PCIEASPM is
@@ -6285,6 +6437,32 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
aspm_dis_mask);
}
+/**
+ * e1000e_disable_aspm - Disable ASPM states.
+ * @pdev: pointer to PCI device struct
+ * @state: bit-mask of ASPM states to disable
+ *
+ * This function acquires the pci_bus_sem!
+ * Some devices *must* have certain ASPM states disabled per hardware errata.
+ **/
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+ __e1000e_disable_aspm(pdev, state, 0);
+}
+
+/**
+ * e1000e_disable_aspm_locked Disable ASPM states.
+ * @pdev: pointer to PCI device struct
+ * @state: bit-mask of ASPM states to disable
+ *
+ * This function must be called with pci_bus_sem acquired!
+ * Some devices *must* have certain ASPM states disabled per hardware errata.
+ **/
+static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state)
+{
+ __e1000e_disable_aspm(pdev, state, 1);
+}
+
#ifdef CONFIG_PM
static int __e1000_resume(struct pci_dev *pdev)
{
@@ -6298,7 +6476,7 @@ static int __e1000_resume(struct pci_dev *pdev)
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
aspm_disable_flag |= PCIE_LINK_STATE_L1;
if (aspm_disable_flag)
- e1000e_disable_aspm(pdev, aspm_disable_flag);
+ e1000e_disable_aspm_locked(pdev, aspm_disable_flag);
pci_set_master(pdev);
@@ -6676,6 +6854,19 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
}
}
+static netdev_features_t e1000_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
+ if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
+ features &= ~NETIF_F_RXFCS;
+
+ return features;
+}
+
static int e1000_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -6732,6 +6923,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
.ndo_poll_controller = e1000_netpoll,
#endif
.ndo_set_features = e1000_set_features,
+ .ndo_fix_features = e1000_fix_features,
};
/**
@@ -7287,7 +7479,7 @@ static int __init e1000_init_module(void)
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
- pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
+ pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index fa6b1036a..49f205c02 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
index 342bf69ef..5d46967e0 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.h
+++ b/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index aa1923f7e..6d8c39abe 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index b2005e13f..de13aeaca 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index 537d2780b..55bfe4735 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 8d7b21dc7..25a0ad510 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 85eefc483..b24e5fee1 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,6 +38,8 @@
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
+#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */
#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 4b9d9f88a..c6dc96834 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -124,7 +124,7 @@ static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct fm10k_intfc *interface = netdev_priv(dev);
char *p = (char *)data;
- int i;
+ unsigned int i;
switch (stringset) {
case ETH_SS_TEST:
@@ -143,12 +143,13 @@ static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
- if (interface->hw.mac.type != fm10k_mac_vf)
+ if (interface->hw.mac.type != fm10k_mac_vf) {
for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
memcpy(p, fm10k_gstrings_pf_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ }
for (i = 0; i < interface->hw.mac.max_queues; i++) {
sprintf(p, "tx_queue_%u_packets", i);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 5b08e6284..94571e6e7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -400,11 +400,31 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
return num_vfs;
}
+static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
+ struct fm10k_vf_info *vf_info)
+{
+ struct fm10k_hw *hw = &interface->hw;
+
+ /* assigning the MAC address will send a mailbox message */
+ fm10k_mbx_lock(interface);
+
+ /* disable LPORT for this VF which clears switch rules */
+ hw->iov.ops.reset_lport(hw, vf_info);
+
+ /* assign new MAC+VLAN for this VF */
+ hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
+
+ /* re-enable the LPORT for this VF */
+ hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx,
+ FM10K_VF_FLAG_MULTI_CAPABLE);
+
+ fm10k_mbx_unlock(interface);
+}
+
int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
- struct fm10k_hw *hw = &interface->hw;
struct fm10k_vf_info *vf_info;
/* verify SR-IOV is active and that vf idx is valid */
@@ -419,13 +439,7 @@ int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
vf_info = &iov_data->vf_info[vf_idx];
ether_addr_copy(vf_info->mac, mac);
- /* assigning the MAC will send a mailbox message so lock is needed */
- fm10k_mbx_lock(interface);
-
- /* assign MAC address to VF */
- hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
-
- fm10k_mbx_unlock(interface);
+ fm10k_reset_vf_info(interface, vf_info);
return 0;
}
@@ -455,16 +469,10 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
/* record default VLAN ID for VF */
vf_info->pf_vid = vid;
- /* assigning the VLAN will send a mailbox message so lock is needed */
- fm10k_mbx_lock(interface);
-
/* Clear the VLAN table for the VF */
hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
- /* Update VF assignment and trigger reset */
- hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
-
- fm10k_mbx_unlock(interface);
+ fm10k_reset_vf_info(interface, vf_info);
return 0;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index c754b2027..b5b292510 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
static inline bool fm10k_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
@@ -269,16 +269,19 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
unsigned int size = le16_to_cpu(rx_desc->w.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = FM10K_RX_BUFSZ;
#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int truesize = SKB_DATA_ALIGN(size);
#endif
+ unsigned int pull_len;
- if ((size <= FM10K_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
+ if (unlikely(skb_is_nonlinear(skb)))
+ goto add_tail_frag;
+ if (likely(size <= FM10K_RX_HDR_LEN)) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is not reserved, we can reuse buffer as-is */
@@ -290,8 +293,21 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
return false;
}
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ va += pull_len;
+ size -= pull_len;
+
+add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, truesize);
+ (unsigned long)va & ~PAGE_MASK, size, truesize);
return fm10k_can_reuse_rx_page(rx_buffer, page, truesize);
}
@@ -518,44 +534,6 @@ static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring,
}
/**
- * fm10k_pull_tail - fm10k specific version of skb_pull_tail
- * @skb: pointer to current skb being adjusted
- *
- * This function is an fm10k specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- */
-static void fm10k_pull_tail(struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned char *va;
- unsigned int pull_len;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-}
-
-/**
* fm10k_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
@@ -580,10 +558,6 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
return true;
}
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- fm10k_pull_tail(skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 1b2738380..1a4b52637 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1259,16 +1259,11 @@ static s32 fm10k_mbx_process_error(struct fm10k_hw *hw,
struct fm10k_mbx_info *mbx)
{
const u32 *hdr = &mbx->mbx_hdr;
- s32 err_no;
u16 head;
/* we will need to pull all of the fields for verification */
head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
- /* we only have lower 10 bits of error number so add upper bits */
- err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO);
- err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO);
-
switch (mbx->state) {
case FM10K_STATE_OPEN:
case FM10K_STATE_DISCONNECT:
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 2f4f41b7e..99228bf46 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -923,18 +923,12 @@ static int __fm10k_mc_sync(struct net_device *dev,
struct fm10k_intfc *interface = netdev_priv(dev);
struct fm10k_hw *hw = &interface->hw;
u16 vid, glort = interface->glort;
- s32 err;
-
- if (!is_multicast_ether_addr(addr))
- return -EADDRNOTAVAIL;
/* update table with current entries */
for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
- err = hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync);
- if (err)
- return err;
+ hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync);
}
return 0;
@@ -1339,8 +1333,7 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
dglort.glort = interface->glort;
- if (l2_accel)
- dglort.shared_l = fls(l2_accel->size);
+ dglort.shared_l = fls(l2_accel->size);
hw->mac.ops.configure_dglort_map(hw, &dglort);
/* If table is empty remove it */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index df9fda38b..ce53ff25f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1559,6 +1559,7 @@ void fm10k_down(struct fm10k_intfc *interface)
/* free any buffers still on the rings */
fm10k_clean_all_tx_rings(interface);
+ fm10k_clean_all_rx_rings(interface);
}
/**
@@ -1740,30 +1741,18 @@ static int fm10k_probe(struct pci_dev *pdev,
struct fm10k_intfc *interface;
struct fm10k_hw *hw;
int err;
- u64 dma_mask;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- /* By default fm10k only supports a 48 bit DMA mask */
- dma_mask = DMA_BIT_MASK(48) | dma_get_required_mask(&pdev->dev);
-
- if ((dma_mask <= DMA_BIT_MASK(32)) ||
- dma_set_mask_and_coherent(&pdev->dev, dma_mask)) {
- dma_mask &= DMA_BIT_MASK(32);
-
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ if (err)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA configuration failed: %d\n", err);
+ goto err_dma;
}
err = pci_request_selected_regions(pdev,
@@ -1772,7 +1761,7 @@ static int fm10k_probe(struct pci_dev *pdev,
fm10k_driver_name);
if (err) {
dev_err(&pdev->dev,
- "pci_request_selected_regions failed 0x%x\n", err);
+ "pci_request_selected_regions failed: %d\n", err);
goto err_pci_reg;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 891e21874..3ca0233b3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1046,6 +1046,12 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i);
}
+ /* repeat the first ring for all the remaining VF rings */
+ for (i = queues_per_pool; i < qmap_stride; i++) {
+ fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx);
+ fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx);
+ }
+
return 0;
}
@@ -1345,6 +1351,14 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
err = fm10k_update_lport_state_pf(hw, vf_info->glort,
1, false);
+ /* we need to clear VF_FLAG_ENABLED flags in order to ensure
+ * that we actually re-enable the LPORT state below. Note that
+ * this has no impact if the VF is already disabled, as the
+ * flags are already cleared.
+ */
+ if (!err)
+ vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info);
+
/* when enabling the port we should reset the rate limiters */
hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate);
@@ -1786,8 +1800,8 @@ static s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb)
if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK)
return FM10K_ERR_PARAM;
- if (ppb < 0)
- systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_NEGATIVE;
+ if (ppb > 0)
+ systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE;
fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index 7ab1db4ff..40a0dbc62 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -81,26 +81,26 @@ struct fm10k_mac_update {
__le16 glort;
u8 flags;
u8 action;
-};
+} __packed;
struct fm10k_global_table_data {
__le32 used;
__le32 avail;
-};
+} __packed;
struct fm10k_swapi_error {
__le32 status;
struct fm10k_global_table_data mac;
struct fm10k_global_table_data nexthop;
struct fm10k_global_table_data ffu;
-};
+} __packed;
struct fm10k_swapi_1588_timestamp {
__le64 egress;
__le64 ingress;
__le16 dglort;
__le16 sglort;
-};
+} __packed;
s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[];
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
index 9043633c3..b4945e8ab 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
@@ -70,16 +70,16 @@ void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
* if none are present then insert skb in tail of list
*/
skb = fm10k_ts_tx_skb(interface, FM10K_CB(clone)->fi.w.dglort);
- if (!skb)
+ if (!skb) {
+ skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
__skb_queue_tail(list, clone);
+ }
spin_unlock_irqrestore(&list->lock, flags);
/* if list is already has one then we just free the clone */
if (skb)
- kfree_skb(skb);
- else
- skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
+ dev_kfree_skb(clone);
}
void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
@@ -103,9 +103,10 @@ void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
if (!skb)
return;
- /* timestamp the sk_buff and return it to the socket */
+ /* timestamp the sk_buff and free out copy */
fm10k_systime_to_hwtstamp(interface, &shhwtstamps, systime);
- skb_complete_tx_timestamp(skb, &shhwtstamps);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
}
void fm10k_ts_tx_subtask(struct fm10k_intfc *interface)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 4af96686c..2a17d82fa 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -369,7 +369,7 @@ struct fm10k_hw;
/* Registers contained in BAR 4 for Switch management */
#define FM10K_SW_SYSTIME_ADJUST 0x0224D
#define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF
-#define FM10K_SW_SYSTIME_ADJUST_DIR_NEGATIVE 0x80000000
+#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE 0x80000000
#define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252)
enum fm10k_int_source {
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 5d4730712..ec76c3fa3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -182,6 +182,7 @@ struct i40e_lump_tracking {
enum i40e_fd_stat_idx {
I40E_FD_STAT_ATR,
I40E_FD_STAT_SB,
+ I40E_FD_STAT_ATR_TUNNEL,
I40E_FD_STAT_PF_COUNT
};
#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
@@ -189,6 +190,8 @@ enum i40e_fd_stat_idx {
(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
#define I40E_FD_SB_STAT_IDX(pf_id) \
(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
+#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
+ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
struct i40e_fdir_filter {
struct hlist_node fdir_node;
@@ -263,8 +266,6 @@ struct i40e_pf {
struct hlist_head fdir_filter_list;
u16 fdir_pf_active_filters;
- u16 fd_sb_cnt_idx;
- u16 fd_atr_cnt_idx;
unsigned long fd_flush_timestamp;
u32 fd_flush_cnt;
u32 fd_add_err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4cbaaeb90..9a68c65b1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -147,6 +147,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
+ I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
/* LPI stats */
@@ -1548,6 +1549,17 @@ static int i40e_loopback_test(struct net_device *netdev, u64 *data)
return *data;
}
+static inline bool i40e_active_vfs(struct i40e_pf *pf)
+{
+ struct i40e_vf *vfs = pf->vf;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE)
+ return true;
+ return false;
+}
+
static void i40e_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
@@ -1560,6 +1572,20 @@ static void i40e_diag_test(struct net_device *netdev,
netif_info(pf, drv, netdev, "offline testing starting\n");
set_bit(__I40E_TESTING, &pf->state);
+
+ if (i40e_active_vfs(pf)) {
+ dev_warn(&pf->pdev->dev,
+ "Please take active VFS offline and restart the adapter before running NIC diagnostics\n");
+ data[I40E_ETH_TEST_REG] = 1;
+ data[I40E_ETH_TEST_EEPROM] = 1;
+ data[I40E_ETH_TEST_INTR] = 1;
+ data[I40E_ETH_TEST_LOOPBACK] = 1;
+ data[I40E_ETH_TEST_LINK] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__I40E_TESTING, &pf->state);
+ goto skip_ol_tests;
+ }
+
/* If the device is online then take it offline */
if (if_running)
/* indicate we're in test mode */
@@ -1605,6 +1631,8 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_LOOPBACK] = 0;
}
+skip_ol_tests:
+
netif_info(pf, drv, netdev, "testing finished\n");
}
@@ -2265,7 +2293,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->pctype = 0;
input->dest_vsi = vsi->id;
input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
- input->cnt_index = pf->fd_sb_cnt_idx;
+ input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
input->flow_type = fsp->flow_type;
input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 1803afeef..c8b621e0e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -118,7 +118,7 @@ static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof)
*
* The FC EOF is converted to the value understood by HW for descriptor
* programming. Never call this w/o calling i40e_fcoe_eof_is_supported()
- * first.
+ * first and that already checks for all supported valid eof values.
**/
static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
{
@@ -132,9 +132,12 @@ static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
case FC_EOF_A:
return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A;
default:
- /* FIXME: still returns 0 */
- pr_err("Unrecognized EOF %x\n", eof);
- return 0;
+ /* Supported valid eof shall be already checked by
+ * calling i40e_fcoe_eof_is_supported() first,
+ * therefore this default case shall never hit.
+ */
+ WARN_ON(1);
+ return -EINVAL;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 5b5bea159..48a52b35b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 2
+#define DRV_VERSION_BUILD 4
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -772,9 +772,8 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
dcb_cfg = &hw->local_dcbx_config;
- /* See if DCB enabled with PFC TC */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
- !(dcb_cfg->pfc.pfcenable)) {
+ /* Collect Link XOFF stats when PFC is disabled */
+ if (!dcb_cfg->pfc.pfcenable) {
i40e_update_link_xoff_rx(pf);
return;
}
@@ -1097,12 +1096,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
&osd->rx_jabber, &nsd->rx_jabber);
/* FDIR stats */
- i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
+ i40e_stat_update32(hw,
+ I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
pf->stat_offsets_loaded,
&osd->fd_atr_match, &nsd->fd_atr_match);
- i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
+ i40e_stat_update32(hw,
+ I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
pf->stat_offsets_loaded,
&osd->fd_sb_match, &nsd->fd_sb_match);
+ i40e_stat_update32(hw,
+ I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
+ pf->stat_offsets_loaded,
+ &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
val = rd32(hw, I40E_PRTPM_EEE_STAT);
nsd->tx_lpi_status =
@@ -4739,7 +4744,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
pf->fd_add_err = pf->fd_atr_cnt = 0;
if (pf->fd_tcp_rule > 0) {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
pf->fd_tcp_rule = 0;
}
i40e_fdir_filter_restore(vsi);
@@ -5428,7 +5434,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
- dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
}
/* Wait for some more space to be available to turn on ATR */
@@ -5436,7 +5443,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
}
}
}
@@ -5469,7 +5477,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (!(time_after(jiffies, min_flush_time)) &&
(fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
- dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
disable_atr = true;
}
@@ -5496,7 +5505,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (!disable_atr)
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
- dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
}
}
}
@@ -7680,12 +7690,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
- /* Setup a counter for fd_atr per PF */
- pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- /* Setup a counter for fd_sb per PF */
- pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
} else {
dev_info(&pf->pdev->dev,
"Flow Director Sideband mode Disabled in MFP mode\n");
@@ -7775,7 +7781,8 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
pf->fdir_pf_active_filters = 0;
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
/* if ATR was auto disabled it can be re-enabled. */
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
@@ -8062,7 +8069,7 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
#ifdef HAVE_BRIDGE_FILTER
static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
- u32 __always_unused filter_mask, int nlflags)
+ u32 filter_mask, int nlflags)
#else
static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, int nlflags)
@@ -8088,7 +8095,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return 0;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
- nlflags);
+ nlflags, 0, 0, filter_mask, NULL);
}
#endif /* HAVE_BRIDGE_ATTRIBS */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9d95042d5..9a4f2bc70 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -165,9 +165,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
- /* set the timestamp */
- tx_buf->time_stamp = jiffies;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch.
*/
@@ -283,7 +280,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
if (add) {
pf->fd_tcp_rule++;
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
}
} else {
@@ -291,7 +289,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
(pf->fd_tcp_rule - 1) : 0;
if (pf->fd_tcp_rule == 0) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
}
}
@@ -501,7 +500,8 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
!(pf->auto_disable_flags &
I40E_FLAG_FD_SB_ENABLED)) {
- dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
pf->auto_disable_flags |=
I40E_FLAG_FD_SB_ENABLED;
}
@@ -807,10 +807,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->vsi->seid,
tx_ring->queue_index,
tx_ring->next_to_use, i);
- dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " jiffies <%lx>\n",
- tx_ring->tx_bi[i].time_stamp, jiffies);
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
@@ -1653,9 +1649,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
- /* TODO: shouldn't we increment a counter indicating the
- * drop?
- */
continue;
}
@@ -1688,7 +1681,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_ring->netdev->last_rx = jiffies;
rx_desc->wb.qword1.status_error_len = 0;
} while (likely(total_rx_packets < budget));
@@ -1821,7 +1813,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
#endif
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_ring->netdev->last_rx = jiffies;
rx_desc->wb.qword1.status_error_len = 0;
} while (likely(total_rx_packets < budget));
@@ -1925,11 +1916,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* i40e_atr - Add a Flow Director ATR filter
* @tx_ring: ring to add programming descriptor to
* @skb: send buffer
- * @flags: send flags
+ * @tx_flags: send tx flags
* @protocol: wire protocol
**/
static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u32 flags, __be16 protocol)
+ u32 tx_flags, __be16 protocol)
{
struct i40e_filter_program_desc *fdir_desc;
struct i40e_pf *pf = tx_ring->vsi->back;
@@ -1954,25 +1945,38 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!tx_ring->atr_sample_rate)
return;
- /* snag network header to get L4 type and address */
- hdr.network = skb_network_header(skb);
+ if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
+ return;
- /* Currently only IPv4/IPv6 with TCP is supported */
- if (protocol == htons(ETH_P_IP)) {
- if (hdr.ipv4->protocol != IPPROTO_TCP)
- return;
+ if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
+ /* snag network header to get L4 type and address */
+ hdr.network = skb_network_header(skb);
- /* access ihl as a u8 to avoid unaligned access on ia64 */
- hlen = (hdr.network[0] & 0x0F) << 2;
- } else if (protocol == htons(ETH_P_IPV6)) {
- if (hdr.ipv6->nexthdr != IPPROTO_TCP)
+ /* Currently only IPv4/IPv6 with TCP is supported
+ * access ihl as u8 to avoid unaligned access on ia64
+ */
+ if (tx_flags & I40E_TX_FLAGS_IPV4)
+ hlen = (hdr.network[0] & 0x0F) << 2;
+ else if (protocol == htons(ETH_P_IPV6))
+ hlen = sizeof(struct ipv6hdr);
+ else
return;
-
- hlen = sizeof(struct ipv6hdr);
} else {
- return;
+ hdr.network = skb_inner_network_header(skb);
+ hlen = skb_inner_network_header_len(skb);
}
+ /* Currently only IPv4/IPv6 with TCP is supported
+ * Note: tx_flags gets modified to reflect inner protocols in
+ * tx_enable_csum function if encap is enabled.
+ */
+ if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
+ (hdr.ipv4->protocol != IPPROTO_TCP))
+ return;
+ else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
+ (hdr.ipv6->nexthdr != IPPROTO_TCP))
+ return;
+
th = (struct tcphdr *)(hdr.network + hlen);
/* Due to lack of space, no more new filters can be programmed */
@@ -2022,9 +2026,16 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
- dtype_cmd |=
- ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
+ dtype_cmd |=
+ ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ else
+ dtype_cmd |=
+ ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
fdir_desc->rsvd = cpu_to_le32(0);
@@ -2045,13 +2056,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
* otherwise returns 0 to indicate the flags has been set properly.
**/
#ifdef I40E_FCOE
-int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
- struct i40e_ring *tx_ring,
- u32 *flags)
-#else
-static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring,
u32 *flags)
+#else
+static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct i40e_ring *tx_ring,
+ u32 *flags)
#endif
{
__be16 protocol = skb->protocol;
@@ -2119,16 +2130,14 @@ out:
* i40e_tso - set up the tso context descriptor
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
* @hdr_len: ptr to the size of the packet header
* @cd_tunneling: ptr to context descriptor bits
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, __be16 protocol, u8 *hdr_len,
- u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+ u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
+ u32 *cd_tunneling)
{
u32 cd_cmd, cd_tso_len, cd_mss;
struct ipv6hdr *ipv6h;
@@ -2220,12 +2229,12 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
/**
* i40e_tx_enable_csum - Enable Tx checksum offloads
* @skb: send buffer
- * @tx_flags: Tx flags currently set
+ * @tx_flags: pointer to Tx flags currently set
* @td_cmd: Tx descriptor command bits to set
* @td_offset: Tx descriptor header offsets to set
* @cd_tunneling: ptr to context desc bits
**/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
u32 *td_cmd, u32 *td_offset,
struct i40e_ring *tx_ring,
u32 *cd_tunneling)
@@ -2241,6 +2250,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+ *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
default:
return;
@@ -2250,18 +2260,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
this_ipv6_hdr = inner_ipv6_hdr(skb);
this_tcp_hdrlen = inner_tcp_hdrlen(skb);
- if (tx_flags & I40E_TX_FLAGS_IPV4) {
-
- if (tx_flags & I40E_TX_FLAGS_TSO) {
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+ if (*tx_flags & I40E_TX_FLAGS_TSO) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
ip_hdr(skb)->check = 0;
} else {
*cd_tunneling |=
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
- } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
- if (tx_flags & I40E_TX_FLAGS_TSO)
+ if (*tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
}
@@ -2273,8 +2282,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
if (this_ip_hdr->version == 6) {
- tx_flags &= ~I40E_TX_FLAGS_IPV4;
- tx_flags |= I40E_TX_FLAGS_IPV6;
+ *tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ *tx_flags |= I40E_TX_FLAGS_IPV6;
}
} else {
network_hdr_len = skb_network_header_len(skb);
@@ -2284,12 +2293,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
}
/* Enable IP checksum offloads */
- if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
l4_hdr = this_ip_hdr->protocol;
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
*/
- if (tx_flags & I40E_TX_FLAGS_TSO) {
+ if (*tx_flags & I40E_TX_FLAGS_TSO) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
this_ip_hdr->check = 0;
} else {
@@ -2298,7 +2307,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
/* Now set the td_offset for IP header length */
*td_offset = (network_hdr_len >> 2) <<
I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
- } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
l4_hdr = this_ipv6_hdr->nexthdr;
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
/* Now set the td_offset for IP header length */
@@ -2396,9 +2405,9 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* Returns 0 if stop is not needed
**/
#ifdef I40E_FCOE
-int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#else
-static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#endif
{
if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
@@ -2473,13 +2482,13 @@ linearize_chk_done:
* @td_offset: offset for checksum or crc
**/
#ifdef I40E_FCOE
-void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
- struct i40e_tx_buffer *first, u32 tx_flags,
- const u8 hdr_len, u32 td_cmd, u32 td_offset)
-#else
-static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
+#else
+static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
#endif
{
unsigned int data_len = skb->data_len;
@@ -2585,9 +2594,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->queue_index),
first->bytecount);
- /* set the timestamp */
- first->time_stamp = jiffies;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -2640,11 +2646,11 @@ dma_error:
* one descriptor.
**/
#ifdef I40E_FCOE
-int i40e_xmit_descriptor_count(struct sk_buff *skb,
- struct i40e_ring *tx_ring)
-#else
-static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
+#else
+static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
#endif
{
unsigned int f;
@@ -2706,7 +2712,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+ tso = i40e_tso(tx_ring, skb, &hdr_len,
&cd_type_cmd_tso_mss, &cd_tunneling);
if (tso < 0)
@@ -2732,7 +2738,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_flags |= I40E_TX_FLAGS_CSUM;
- i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+ i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 4b0b8102c..0dc48dc9c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -139,6 +139,7 @@ enum i40e_dyn_idx_t {
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
#define I40E_TX_FLAGS_TSYN (u32)(1 << 8)
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -146,7 +147,6 @@ enum i40e_dyn_idx_t {
struct i40e_tx_buffer {
struct i40e_tx_desc *next_to_watch;
- unsigned long time_stamp;
union {
struct sk_buff *skb;
void *raw_buf;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 568e855da..9a5a75b1e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1133,6 +1133,7 @@ struct i40e_hw_port_stats {
/* flow director stats */
u64 fd_atr_match;
u64 fd_sb_match;
+ u64 fd_atr_tunnel_match;
/* EEE LPI */
u32 tx_lpi_status;
u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 4e9376da0..23f95cdbd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -980,6 +980,13 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
int pre_existing_vfs = pci_num_vf(pdev);
int err = 0;
+ if (pf->state & __I40E_TESTING) {
+ dev_warn(&pdev->dev,
+ "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
+ err = -EPERM;
+ goto err_out;
+ }
+
dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
i40e_free_vfs(pf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 458fbb421..395f32f22 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -322,10 +322,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->vsi->seid,
tx_ring->queue_index,
tx_ring->next_to_use, i);
- dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " jiffies <%lx>\n",
- tx_ring->tx_bi[i].time_stamp, jiffies);
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
@@ -488,6 +484,8 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
if (!dev)
return -ENOMEM;
+ /* warn if we are about to overwrite the pointer */
+ WARN_ON(tx_ring->tx_bi);
bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
if (!tx_ring->tx_bi)
@@ -648,6 +646,8 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
struct device *dev = rx_ring->dev;
int bi_size;
+ /* warn if we are about to overwrite the pointer */
+ WARN_ON(rx_ring->rx_bi);
bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
if (!rx_ring->rx_bi)
@@ -1128,9 +1128,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
- /* TODO: shouldn't we increment a counter indicating the
- * drop?
- */
continue;
}
@@ -1156,7 +1153,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_ring->netdev->last_rx = jiffies;
rx_desc->wb.qword1.status_error_len = 0;
} while (likely(total_rx_packets < budget));
@@ -1271,7 +1267,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
: 0;
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_ring->netdev->last_rx = jiffies;
rx_desc->wb.qword1.status_error_len = 0;
} while (likely(total_rx_packets < budget));
@@ -1352,7 +1347,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
}
/**
- * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
* @skb: send buffer
* @tx_ring: ring to send buffer on
* @flags: the tx flags to be set
@@ -1363,9 +1358,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
* Returns error code indicate the frame should be dropped upon error and the
* otherwise returns 0 to indicate the flags has been set properly.
**/
-static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
- struct i40e_ring *tx_ring,
- u32 *flags)
+static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct i40e_ring *tx_ring,
+ u32 *flags)
{
__be16 protocol = skb->protocol;
u32 tx_flags = 0;
@@ -1408,16 +1403,14 @@ out:
* i40e_tso - set up the tso context descriptor
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
* @hdr_len: ptr to the size of the packet header
* @cd_tunneling: ptr to context descriptor bits
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, __be16 protocol, u8 *hdr_len,
- u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+ u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
+ u32 *cd_tunneling)
{
u32 cd_cmd, cd_tso_len, cd_mss;
struct ipv6hdr *ipv6h;
@@ -1468,12 +1461,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
/**
* i40e_tx_enable_csum - Enable Tx checksum offloads
* @skb: send buffer
- * @tx_flags: Tx flags currently set
+ * @tx_flags: pointer to Tx flags currently set
* @td_cmd: Tx descriptor command bits to set
* @td_offset: Tx descriptor header offsets to set
* @cd_tunneling: ptr to context desc bits
**/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
u32 *td_cmd, u32 *td_offset,
struct i40e_ring *tx_ring,
u32 *cd_tunneling)
@@ -1489,6 +1482,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+ *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
default:
return;
@@ -1498,18 +1492,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
this_ipv6_hdr = inner_ipv6_hdr(skb);
this_tcp_hdrlen = inner_tcp_hdrlen(skb);
- if (tx_flags & I40E_TX_FLAGS_IPV4) {
-
- if (tx_flags & I40E_TX_FLAGS_TSO) {
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+ if (*tx_flags & I40E_TX_FLAGS_TSO) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
ip_hdr(skb)->check = 0;
} else {
*cd_tunneling |=
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
- } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
- if (tx_flags & I40E_TX_FLAGS_TSO)
+ if (*tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
}
@@ -1521,8 +1514,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
if (this_ip_hdr->version == 6) {
- tx_flags &= ~I40E_TX_FLAGS_IPV4;
- tx_flags |= I40E_TX_FLAGS_IPV6;
+ *tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ *tx_flags |= I40E_TX_FLAGS_IPV6;
}
@@ -1534,12 +1527,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
}
/* Enable IP checksum offloads */
- if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
l4_hdr = this_ip_hdr->protocol;
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
*/
- if (tx_flags & I40E_TX_FLAGS_TSO) {
+ if (*tx_flags & I40E_TX_FLAGS_TSO) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
this_ip_hdr->check = 0;
} else {
@@ -1548,7 +1541,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
/* Now set the td_offset for IP header length */
*td_offset = (network_hdr_len >> 2) <<
I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
- } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
l4_hdr = this_ipv6_hdr->nexthdr;
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
/* Now set the td_offset for IP header length */
@@ -1672,7 +1665,44 @@ linearize_chk_done:
}
/**
- * i40e_tx_map - Build the Tx descriptor
+ * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Memory barrier before checking head and tail */
+ smp_mb();
+
+ /* Check again in a case another CPU has just made room available. */
+ if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ return 0;
+}
+
+/**
+ * i40evf_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __i40evf_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40evf_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
* @skb: send buffer
* @first: first buffer info buffer to use
@@ -1681,9 +1711,9 @@ linearize_chk_done:
* @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc
**/
-static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
- struct i40e_tx_buffer *first, u32 tx_flags,
- const u8 hdr_len, u32 td_cmd, u32 td_offset)
+static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
@@ -1789,9 +1819,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->queue_index),
first->bytecount);
- /* set the timestamp */
- first->time_stamp = jiffies;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -1808,8 +1835,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
+ i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- writel(i, tx_ring->tail);
+ if (!skb->xmit_more ||
+ netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index)))
+ writel(i, tx_ring->tail);
return;
@@ -1831,44 +1862,7 @@ dma_error:
}
/**
- * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size: the size buffer we want to assure is available
- *
- * Returns -EBUSY if a stop is needed, else 0
- **/
-static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
- netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
- /* Memory barrier before checking head and tail */
- smp_mb();
-
- /* Check again in a case another CPU has just made room available. */
- if (likely(I40E_DESC_UNUSED(tx_ring) < size))
- return -EBUSY;
-
- /* A reprieve! - use start_queue because it doesn't call schedule */
- netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
- ++tx_ring->tx_stats.restart_queue;
- return 0;
-}
-
-/**
- * i40e_maybe_stop_tx - 1st level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size: the size buffer we want to assure is available
- *
- * Returns 0 if stop is not needed
- **/
-static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
- if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
- return 0;
- return __i40e_maybe_stop_tx(tx_ring, size);
-}
-
-/**
- * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed
* @skb: send buffer
* @tx_ring: ring to send buffer on
*
@@ -1876,8 +1870,8 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* there is not enough descriptors available in this ring since we need at least
* one descriptor.
**/
-static int i40e_xmit_descriptor_count(struct sk_buff *skb,
- struct i40e_ring *tx_ring)
+static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
{
unsigned int f;
int count = 0;
@@ -1892,7 +1886,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
count += TXD_USE_COUNT(skb_headlen(skb));
- if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+ if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
return 0;
}
@@ -1918,11 +1912,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
u32 td_cmd = 0;
u8 hdr_len = 0;
int tso;
- if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+ if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY;
/* prepare the xmit flags */
- if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+ if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
/* obtain protocol of skb */
@@ -1937,7 +1931,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+ tso = i40e_tso(tx_ring, skb, &hdr_len,
&cd_type_cmd_tso_mss, &cd_tunneling);
if (tso < 0)
@@ -1958,17 +1952,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_flags |= I40E_TX_FLAGS_CSUM;
- i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+ i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling);
}
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2);
- i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
- td_cmd, td_offset);
-
- i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+ i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+ td_cmd, td_offset);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 1e49bb1fb..e7a34f899 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -138,6 +138,7 @@ enum i40e_dyn_idx_t {
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -145,7 +146,6 @@ enum i40e_dyn_idx_t {
struct i40e_tx_buffer {
struct i40e_tx_desc *next_to_watch;
- unsigned long time_stamp;
union {
struct sk_buff *skb;
void *raw_buf;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index ec9d83a93..c463ec415 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1108,6 +1108,7 @@ struct i40e_hw_port_stats {
/* flow director stats */
u64 fd_atr_match;
u64 fd_sb_match;
+ u64 fd_atr_tunnel_match;
/* EEE LPI */
u32 tx_lpi_status;
u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 1b98c25b3..fea3b75a9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -264,7 +264,6 @@ extern const char i40evf_driver_version[];
int i40evf_up(struct i40evf_adapter *adapter);
void i40evf_down(struct i40evf_adapter *adapter);
-void i40evf_reinit_locked(struct i40evf_adapter *adapter);
void i40evf_reset(struct i40evf_adapter *adapter);
void i40evf_set_ethtool_ops(struct net_device *netdev);
void i40evf_update_stats(struct i40evf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index f4e77665b..2b53c870e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -267,8 +267,10 @@ static int i40evf_set_ringparam(struct net_device *netdev,
adapter->tx_desc_count = new_tx_count;
adapter->rx_desc_count = new_rx_count;
- if (netif_running(netdev))
- i40evf_reinit_locked(adapter);
+ if (netif_running(netdev)) {
+ adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+ schedule_work(&adapter->reset_task);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 7c53aca4b..4ab4ebba0 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -170,7 +170,8 @@ static void i40evf_tx_timeout(struct net_device *netdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
- if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
+ if (!(adapter->flags & (I40EVF_FLAG_RESET_PENDING |
+ I40EVF_FLAG_RESET_NEEDED))) {
adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
schedule_work(&adapter->reset_task);
}
@@ -1460,7 +1461,7 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
lut = 0;
for (j = 0; j < 4; j++) {
- if (cqueue == adapter->vsi_res->num_queue_pairs)
+ if (cqueue == adapter->num_active_queues)
cqueue = 0;
lut |= ((cqueue) << (8 * j));
cqueue++;
@@ -1470,8 +1471,8 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
i40e_flush(hw);
}
-#define I40EVF_RESET_WAIT_MS 100
-#define I40EVF_RESET_WAIT_COUNT 200
+#define I40EVF_RESET_WAIT_MS 10
+#define I40EVF_RESET_WAIT_COUNT 500
/**
* i40evf_reset_task - Call-back task to handle hardware reset
* @work: pointer to work_struct
@@ -1495,10 +1496,17 @@ static void i40evf_reset_task(struct work_struct *work)
&adapter->crit_section))
usleep_range(500, 1000);
+ i40evf_misc_irq_disable(adapter);
if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
- dev_info(&adapter->pdev->dev, "Requesting reset from PF\n");
+ adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
+ /* Restart the AQ here. If we have been reset but didn't
+ * detect it, or if the PF had to reinit, our AQ will be hosed.
+ */
+ i40evf_shutdown_adminq(hw);
+ i40evf_init_adminq(hw);
i40evf_request_reset(adapter);
}
+ adapter->flags |= I40EVF_FLAG_RESET_PENDING;
/* poll until we see the reset actually happen */
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
@@ -1507,10 +1515,10 @@ static void i40evf_reset_task(struct work_struct *work)
if ((rstat_val != I40E_VFR_VFACTIVE) &&
(rstat_val != I40E_VFR_COMPLETED))
break;
- msleep(I40EVF_RESET_WAIT_MS);
+ usleep_range(500, 1000);
}
if (i == I40EVF_RESET_WAIT_COUNT) {
- adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+ dev_info(&adapter->pdev->dev, "Never saw reset\n");
goto continue_reset; /* act like the reset happened */
}
@@ -1518,11 +1526,12 @@ static void i40evf_reset_task(struct work_struct *work)
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if ((rstat_val == I40E_VFR_VFACTIVE) ||
- (rstat_val == I40E_VFR_COMPLETED))
+ if (rstat_val == I40E_VFR_VFACTIVE)
break;
msleep(I40EVF_RESET_WAIT_MS);
}
+ /* extra wait to make sure minimum wait is met */
+ msleep(I40EVF_RESET_WAIT_MS);
if (i == I40EVF_RESET_WAIT_COUNT) {
struct i40evf_mac_filter *f, *ftmp;
struct i40evf_vlan_filter *fv, *fvtmp;
@@ -1534,11 +1543,10 @@ static void i40evf_reset_task(struct work_struct *work)
if (netif_running(adapter->netdev)) {
set_bit(__I40E_DOWN, &adapter->vsi.state);
- i40evf_irq_disable(adapter);
- i40evf_napi_disable_all(adapter);
- netif_tx_disable(netdev);
- netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+ i40evf_napi_disable_all(adapter);
+ i40evf_irq_disable(adapter);
i40evf_free_traffic_irqs(adapter);
i40evf_free_all_tx_resources(adapter);
i40evf_free_all_rx_resources(adapter);
@@ -1550,6 +1558,7 @@ static void i40evf_reset_task(struct work_struct *work)
list_del(&f->list);
kfree(f);
}
+
list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list,
list) {
list_del(&fv->list);
@@ -1564,22 +1573,27 @@ static void i40evf_reset_task(struct work_struct *work)
i40evf_shutdown_adminq(hw);
adapter->netdev->flags &= ~IFF_UP;
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+ dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
return; /* Do not attempt to reinit. It's dead, Jim. */
}
continue_reset:
- adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
-
- i40evf_irq_disable(adapter);
-
if (netif_running(adapter->netdev)) {
- i40evf_napi_disable_all(adapter);
- netif_tx_disable(netdev);
- netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ i40evf_napi_disable_all(adapter);
}
+ i40evf_irq_disable(adapter);
adapter->state = __I40EVF_RESETTING;
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+
+ /* free the Tx/Rx rings and descriptors, might be better to just
+ * re-use them sometime in the future
+ */
+ i40evf_free_all_rx_resources(adapter);
+ i40evf_free_all_tx_resources(adapter);
/* kill and reinit the admin queue */
if (i40evf_shutdown_adminq(hw))
@@ -1603,6 +1617,7 @@ continue_reset:
adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ i40evf_misc_irq_enable(adapter);
mod_timer(&adapter->watchdog_timer, jiffies + 2);
@@ -1624,7 +1639,10 @@ continue_reset:
goto reset_err;
i40evf_irq_enable(adapter, true);
+ } else {
+ adapter->state = __I40EVF_DOWN;
}
+
return;
reset_err:
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
@@ -1667,6 +1685,11 @@ static void i40evf_adminq_task(struct work_struct *work)
memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
} while (pending);
+ if ((adapter->flags &
+ (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) ||
+ adapter->state == __I40EVF_RESETTING)
+ goto freedom;
+
/* check for error indications */
val = rd32(hw, hw->aq.arq.len);
oldval = val;
@@ -1702,6 +1725,7 @@ static void i40evf_adminq_task(struct work_struct *work)
if (oldval != val)
wr32(hw, hw->aq.asq.len, val);
+freedom:
kfree(event.msg_buf);
out:
/* re-enable Admin queue interrupt cause */
@@ -1897,47 +1921,6 @@ static struct net_device_stats *i40evf_get_stats(struct net_device *netdev)
}
/**
- * i40evf_reinit_locked - Software reinit
- * @adapter: board private structure
- *
- * Reinititalizes the ring structures in response to a software configuration
- * change. Roughly the same as close followed by open, but skips releasing
- * and reallocating the interrupts.
- **/
-void i40evf_reinit_locked(struct i40evf_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- int err;
-
- WARN_ON(in_interrupt());
-
- i40evf_down(adapter);
-
- /* allocate transmit descriptors */
- err = i40evf_setup_all_tx_resources(adapter);
- if (err)
- goto err_reinit;
-
- /* allocate receive descriptors */
- err = i40evf_setup_all_rx_resources(adapter);
- if (err)
- goto err_reinit;
-
- i40evf_configure(adapter);
-
- err = i40evf_up_complete(adapter);
- if (err)
- goto err_reinit;
-
- i40evf_irq_enable(adapter, true);
- return;
-
-err_reinit:
- dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
- i40evf_close(netdev);
-}
-
-/**
* i40evf_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -1952,9 +1935,10 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
return -EINVAL;
- /* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
- i40evf_reinit_locked(adapter);
+ adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+ schedule_work(&adapter->reset_task);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 0f69ef817..b0182dd31 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,5 +1,5 @@
/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
+ * Copyright(c) 2007-2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -1900,8 +1900,8 @@ static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
* igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
* @hw: pointer to the HW structure
*
- * After rx enable if managability is enabled then there is likely some
- * bad data at the start of the fifo and possibly in the DMA fifo. This
+ * After rx enable if manageability is enabled then there is likely some
+ * bad data at the start of the fifo and possibly in the DMA fifo. This
* function clears the fifos and flushes any packets that came in as rx was
* being enabled.
**/
@@ -1910,6 +1910,11 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
int i, ms_wait;
+ /* disable IPv6 options as per hardware errata */
+ rfctl = rd32(E1000_RFCTL);
+ rfctl |= E1000_RFCTL_IPV6_EX_DIS;
+ wr32(E1000_RFCTL, rfctl);
+
if (hw->mac.type != e1000_82575 ||
!(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
return;
@@ -1937,7 +1942,6 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
* incoming packets are rejected. Set enable and wait 2ms so that
* any packet that was coming in as RCTL.EN was set is flushed
*/
- rfctl = rd32(E1000_RFCTL);
wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
rlpml = rd32(E1000_RLPML);
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 217f81388..f8684aa28 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -344,7 +344,8 @@
#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
/* Header split receive */
-#define E1000_RFCTL_LEF 0x00040000
+#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
+#define E1000_RFCTL_LEF 0x00040000
/* Collision related configuration parameters */
#define E1000_COLLISION_THRESHOLD 15
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a0a9b1fcb..830466c49 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -58,7 +58,7 @@
#define MAJ 5
#define MIN 2
-#define BUILD 15
+#define BUILD 18
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
@@ -1836,31 +1836,19 @@ void igb_reinit_locked(struct igb_adapter *adapter)
*
* @adapter: adapter struct
**/
-static s32 igb_enable_mas(struct igb_adapter *adapter)
+static void igb_enable_mas(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 connsw;
- s32 ret_val = 0;
-
- connsw = rd32(E1000_CONNSW);
- if (!(hw->phy.media_type == e1000_media_type_copper))
- return ret_val;
+ u32 connsw = rd32(E1000_CONNSW);
/* configure for SerDes media detect */
- if (!(connsw & E1000_CONNSW_SERDESD)) {
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ (!(connsw & E1000_CONNSW_SERDESD))) {
connsw |= E1000_CONNSW_ENRGSRC;
connsw |= E1000_CONNSW_AUTOSENSE_EN;
wr32(E1000_CONNSW, connsw);
wrfl();
- } else if (connsw & E1000_CONNSW_SERDESD) {
- /* already SerDes, no need to enable anything */
- return ret_val;
- } else {
- netdev_info(adapter->netdev,
- "MAS: Unable to configure feature, disabling..\n");
- adapter->flags &= ~IGB_FLAG_MAS_ENABLE;
}
- return ret_val;
}
void igb_reset(struct igb_adapter *adapter)
@@ -1980,10 +1968,9 @@ void igb_reset(struct igb_adapter *adapter)
adapter->ei.get_invariants(hw);
adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
}
- if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
- if (igb_enable_mas(adapter))
- dev_err(&pdev->dev,
- "Error enabling Media Auto Sense\n");
+ if ((mac->type == e1000_82575) &&
+ (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+ igb_enable_mas(adapter);
}
if (hw->mac.ops.init_hw(hw))
dev_err(&pdev->dev, "Hardware Error\n");
@@ -4989,6 +4976,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
+ unsigned short f;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = vlan_get_protocol(skb);
u8 hdr_len = 0;
@@ -4999,14 +4987,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
* + 1 desc for context descriptor,
* otherwise try next time
*/
- if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
- unsigned short f;
-
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
- } else {
- count += skb_shinfo(skb)->nr_frags;
- }
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
if (igb_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */
@@ -6584,7 +6566,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
static inline bool igb_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 636f9e350..ac3ac2a20 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -643,6 +643,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
+#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11)
/* Tx fast path data */
int num_tx_queues;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 824a7ab79..65db69b86 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1225,7 +1225,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = {
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
- .check_overtemp = &ixgbe_tn_check_overtemp,
+ .check_overtemp = &ixgbe_tn_check_overtemp,
};
struct ixgbe_info ixgbe_82598_info = {
@@ -1234,4 +1234,5 @@ struct ixgbe_info ixgbe_82598_info = {
.mac_ops = &mac_ops_82598,
.eeprom_ops = &eeprom_ops_82598,
.phy_ops = &phy_ops_82598,
+ .mvals = ixgbe_mvals_8259X,
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index e0c363948..6b87d9634 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -71,7 +71,7 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
{
u32 fwsm, manc, factps;
- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
return false;
@@ -79,7 +79,7 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
if (!(manc & IXGBE_MANC_RCV_TCO_EN))
return false;
- factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ factps = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
if (factps & IXGBE_FACTPS_MNGCG)
return false;
@@ -510,7 +510,7 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
/* Check to see if MNG FW could be enabled */
- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
!hw->wol_enabled &&
@@ -2378,4 +2378,5 @@ struct ixgbe_info ixgbe_82599_info = {
.eeprom_ops = &eeprom_ops_82599,
.phy_ops = &phy_ops_82599,
.mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_8259X,
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 06d8f3cfa..4c1c26732 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -57,6 +57,11 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset);
static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+/* Base table for registers values that change by MAC */
+const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(8259X)
+};
+
/**
* ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
* control
@@ -91,6 +96,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_T3_LOM:
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
+ case IXGBE_DEV_ID_X550T:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
supported = true;
break;
default:
@@ -463,7 +470,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
}
- if (hw->mac.type == ixgbe_mac_X540) {
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
if (hw->phy.id == 0)
hw->phy.ops.identify(hw);
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
@@ -681,7 +688,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
bus->lan_id = bus->func;
/* check for a port swap */
- reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
if (reg & IXGBE_FACTPS_LFS)
bus->func ^= 0x1;
}
@@ -799,7 +806,7 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
* Check for EEPROM present first.
* If not present leave as none
*/
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (eec & IXGBE_EEC_PRES) {
eeprom->type = ixgbe_eeprom_spi;
@@ -1283,14 +1290,14 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
return IXGBE_ERR_SWFW_SYNC;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
/* Request EEPROM Access */
eec |= IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (eec & IXGBE_EEC_GNT)
break;
udelay(5);
@@ -1299,7 +1306,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
/* Release if grant not acquired */
if (!(eec & IXGBE_EEC_GNT)) {
eec &= ~IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
hw_dbg(hw, "Could not acquire EEPROM grant\n");
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
@@ -1309,7 +1316,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
/* Setup EEPROM for Read/Write */
/* Clear CS and SK */
eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
return 0;
@@ -1333,7 +1340,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
if (!(swsm & IXGBE_SWSM_SMBI))
break;
usleep_range(50, 100);
@@ -1353,7 +1360,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
if (swsm & IXGBE_SWSM_SMBI) {
hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
return IXGBE_ERR_EEPROM;
@@ -1362,16 +1369,16 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
/* Now get the semaphore between SW/FW through the SWESMBI bit */
for (i = 0; i < timeout; i++) {
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
/* Set the SW EEPROM semaphore bit to request access */
swsm |= IXGBE_SWSM_SWESMBI;
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm);
/* If we set the bit successfully then we got the
* semaphore.
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
if (swsm & IXGBE_SWSM_SWESMBI)
break;
@@ -1400,11 +1407,11 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
{
u32 swsm;
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm);
IXGBE_WRITE_FLUSH(hw);
}
@@ -1454,15 +1461,15 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
{
u32 eec;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
/* Toggle CS to flush commands */
eec |= IXGBE_EEC_CS;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
eec &= ~IXGBE_EEC_CS;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
}
@@ -1480,7 +1487,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
u32 mask;
u32 i;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
/*
* Mask is used to shift "count" bits of "data" out to the EEPROM
@@ -1501,7 +1508,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
else
eec &= ~IXGBE_EEC_DI;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
@@ -1518,7 +1525,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
/* We leave the "DI" bit set to "0" when we leave this routine. */
eec &= ~IXGBE_EEC_DI;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
}
@@ -1539,7 +1546,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
* the value of the "DO" bit. During this "shifting in" process the
* "DI" bit should always be clear.
*/
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
@@ -1547,7 +1554,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
data = data << 1;
ixgbe_raise_eeprom_clk(hw, &eec);
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eec &= ~(IXGBE_EEC_DI);
if (eec & IXGBE_EEC_DO)
@@ -1571,7 +1578,7 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
* (setting the SK bit), then delay
*/
*eec = *eec | IXGBE_EEC_SK;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
}
@@ -1588,7 +1595,7 @@ static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
* delay
*/
*eec = *eec & ~IXGBE_EEC_SK;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
}
@@ -1601,19 +1608,19 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
{
u32 eec;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eec |= IXGBE_EEC_CS; /* Pull CS high */
eec &= ~IXGBE_EEC_SK; /* Lower SCK */
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
/* Stop requesting EEPROM access */
eec &= ~IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f21f8a165..ec015fed8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -118,6 +118,8 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
u32 headroom, int strategy);
+extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT];
+
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
#define IXGBE_EMC_INTERNAL_DATA 0x00
#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index eafa9ec80..ec7b2324b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -207,6 +207,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
switch (adapter->hw.phy.type) {
case ixgbe_phy_tn:
case ixgbe_phy_aq:
+ case ixgbe_phy_x550em_ext_t:
case ixgbe_phy_cu_unknown:
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
@@ -470,16 +471,16 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
/* NVM Register */
- regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
+ regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
- regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
+ regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
- regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
+ regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
/* Interrupt */
/* don't read EICR because it can clear interrupt causes, instead
@@ -2594,18 +2595,35 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fdir_filter *input;
union ixgbe_atr_input mask;
+ u8 queue;
int err;
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
return -EOPNOTSUPP;
- /*
- * Don't allow programming if the action is a queue greater than
- * the number of online Rx queues.
+ /* ring_cookie is a masked into a set of queues and ixgbe pools or
+ * we use the drop index.
*/
- if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
- (fsp->ring_cookie >= adapter->num_rx_queues))
- return -EINVAL;
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ queue = IXGBE_FDIR_DROP_QUEUE;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+
+ if (!vf && (ring >= adapter->num_rx_queues))
+ return -EINVAL;
+ else if (vf &&
+ ((vf > adapter->num_vfs) ||
+ ring >= adapter->num_rx_queues_per_pool))
+ return -EINVAL;
+
+ /* Map the ring onto the absolute queue index */
+ if (!vf)
+ queue = adapter->rx_ring[ring]->reg_idx;
+ else
+ queue = ((vf - 1) *
+ adapter->num_rx_queues_per_pool) + ring;
+ }
/* Don't allow indexes to exist outside of available space */
if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
@@ -2683,10 +2701,7 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
/* program filters to filter memory */
err = ixgbe_fdir_write_perfect_filter_82599(hw,
- &input->filter, input->sw_idx,
- (input->action == IXGBE_FDIR_DROP_QUEUE) ?
- IXGBE_FDIR_DROP_QUEUE :
- adapter->rx_ring[input->action]->reg_idx);
+ &input->filter, input->sw_idx, queue);
if (err)
goto err_out_w_lock;
@@ -3053,7 +3068,7 @@ static int ixgbe_get_module_info(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- u32 status;
+ s32 status;
u8 sff8472_rev, addr_mode;
bool page_swap = false;
@@ -3061,14 +3076,14 @@ static int ixgbe_get_module_info(struct net_device *dev,
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_COMP,
&sff8472_rev);
- if (status != 0)
+ if (status)
return -EIO;
/* addressing mode is not supported */
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_SWAP,
&addr_mode);
- if (status != 0)
+ if (status)
return -EIO;
if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
@@ -3095,7 +3110,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
u8 databyte = 0xFF;
int i = 0;
@@ -3112,7 +3127,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
else
status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
- if (status != 0)
+ if (status)
return -EIO;
data[i - ee->offset] = databyte;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5be12a00e..ae21e0b06 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -81,6 +81,8 @@ const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2014 Intel Corporation.";
+static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
+
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
[board_82599] = &ixgbe_82599_info,
@@ -131,6 +133,7 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
/* required last entry */
{0, }
};
@@ -1829,7 +1832,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
static inline bool ixgbe_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
/**
@@ -2366,7 +2369,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
* - We may have missed the interrupt so always have to
* check if we got a LSC
*/
- if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
+ if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
!(eicr & IXGBE_EICR_LSC))
return;
@@ -2386,14 +2389,13 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
break;
default:
- if (!(eicr & IXGBE_EICR_GPI_SDP0))
+ if (adapter->hw.mac.type >= ixgbe_mac_X540)
+ return;
+ if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
return;
break;
}
- e_crit(drv,
- "Network adapter has been stopped because it has over heated. "
- "Restart the computer. If the problem persists, "
- "power off the system and replace the adapter\n");
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
adapter->interrupt_event = 0;
}
@@ -2403,15 +2405,17 @@ static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
struct ixgbe_hw *hw = &adapter->hw;
if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
- (eicr & IXGBE_EICR_GPI_SDP1)) {
+ (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
e_crit(probe, "Fan has stopped, replace the adapter\n");
/* write to clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
}
}
static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
{
+ struct ixgbe_hw *hw = &adapter->hw;
+
if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
return;
@@ -2421,7 +2425,8 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
* Need to check link state so complete overtemp check
* on service task
*/
- if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) &&
+ if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
+ (eicr & IXGBE_EICR_LSC)) &&
(!test_bit(__IXGBE_DOWN, &adapter->state))) {
adapter->interrupt_event = eicr;
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
@@ -2437,28 +2442,46 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
return;
}
- e_crit(drv,
- "Network adapter has been stopped because it has over heated. "
- "Restart the computer. If the problem persists, "
- "power off the system and replace the adapter\n");
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
+}
+
+static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
+{
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ if (hw->phy.type == ixgbe_phy_nl)
+ return true;
+ return false;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X550EM_x:
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ case ixgbe_media_type_fiber_qsfp:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
}
static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
{
struct ixgbe_hw *hw = &adapter->hw;
- if (eicr & IXGBE_EICR_GPI_SDP2) {
+ if (eicr & IXGBE_EICR_GPI_SDP2(hw)) {
/* Clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2(hw));
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
ixgbe_service_event_schedule(adapter);
}
}
- if (eicr & IXGBE_EICR_GPI_SDP1) {
+ if (eicr & IXGBE_EICR_GPI_SDP1(hw)) {
/* Clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
ixgbe_service_event_schedule(adapter);
@@ -2543,6 +2566,7 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
bool flush)
{
+ struct ixgbe_hw *hw = &adapter->hw;
u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
/* don't reenable LSC while waiting for link */
@@ -2552,7 +2576,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
- mask |= IXGBE_EIMS_GPI_SDP0;
+ mask |= IXGBE_EIMS_GPI_SDP0(hw);
break;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
@@ -2563,15 +2587,17 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
break;
}
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
- mask |= IXGBE_EIMS_GPI_SDP1;
+ mask |= IXGBE_EIMS_GPI_SDP1(hw);
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
- mask |= IXGBE_EIMS_GPI_SDP1;
- mask |= IXGBE_EIMS_GPI_SDP2;
+ mask |= IXGBE_EIMS_GPI_SDP1(hw);
+ mask |= IXGBE_EIMS_GPI_SDP2(hw);
/* fall through */
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
+ mask |= IXGBE_EICR_GPI_SDP0_X540;
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_MAILBOX;
break;
@@ -2626,6 +2652,13 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
+ (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
+ adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
+ ixgbe_service_event_schedule(adapter);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR,
+ IXGBE_EICR_GPI_SDP0_X540);
+ }
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -4703,32 +4736,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_configure_dfwd(adapter);
}
-static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
-{
- switch (hw->phy.type) {
- case ixgbe_phy_sfp_avago:
- case ixgbe_phy_sfp_ftl:
- case ixgbe_phy_sfp_intel:
- case ixgbe_phy_sfp_unknown:
- case ixgbe_phy_sfp_passive_tyco:
- case ixgbe_phy_sfp_passive_unknown:
- case ixgbe_phy_sfp_active_unknown:
- case ixgbe_phy_sfp_ftl_active:
- case ixgbe_phy_qsfp_passive_unknown:
- case ixgbe_phy_qsfp_active_unknown:
- case ixgbe_phy_qsfp_intel:
- case ixgbe_phy_qsfp_unknown:
- /* ixgbe_phy_none is set when no SFP module is present */
- case ixgbe_phy_none:
- return true;
- case ixgbe_phy_nl:
- if (hw->mac.type == ixgbe_mac_82598EB)
- return true;
- default:
- return false;
- }
-}
-
/**
* ixgbe_sfp_link_config - set up SFP+ link
* @adapter: pointer to private adapter struct
@@ -4757,7 +4764,7 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
{
u32 speed;
bool autoneg, link_up = false;
- u32 ret = IXGBE_ERR_LINK_SETUP;
+ int ret = IXGBE_ERR_LINK_SETUP;
if (hw->mac.ops.check_link)
ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
@@ -4833,7 +4840,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
- gpie |= IXGBE_SDP0_GPIEN;
+ gpie |= IXGBE_SDP0_GPIEN_8259X;
break;
case ixgbe_mac_X540:
gpie |= IXGBE_EIMS_TS;
@@ -4845,11 +4852,11 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
/* Enable fan failure interrupt */
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
- gpie |= IXGBE_SDP1_GPIEN;
+ gpie |= IXGBE_SDP1_GPIEN(hw);
if (hw->mac.type == ixgbe_mac_82599EB) {
- gpie |= IXGBE_SDP1_GPIEN;
- gpie |= IXGBE_SDP2_GPIEN;
+ gpie |= IXGBE_SDP1_GPIEN_8259X;
+ gpie |= IXGBE_SDP2_GPIEN_8259X;
}
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -4873,6 +4880,9 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
+ if (hw->phy.ops.set_phy_power)
+ hw->phy.ops.set_phy_power(hw, true);
+
smp_mb__before_atomic();
clear_bit(__IXGBE_DOWN, &adapter->state);
ixgbe_napi_enable_all(adapter);
@@ -4992,6 +5002,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
ixgbe_ptp_reset(adapter);
+
+ if (hw->phy.ops.set_phy_power) {
+ if (!netif_running(adapter->netdev) && !adapter->wol)
+ hw->phy.ops.set_phy_power(hw, false);
+ else
+ hw->phy.ops.set_phy_power(hw, true);
+ }
}
/**
@@ -5260,7 +5277,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
case ixgbe_mac_X540:
- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
if (fwsm & IXGBE_FWSM_TS_ENABLED)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
@@ -5672,6 +5689,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
static int ixgbe_open(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
int err, queues;
/* disallow open during test */
@@ -5729,6 +5747,8 @@ err_set_queues:
ixgbe_free_irq(adapter);
err_req_irq:
ixgbe_free_all_rx_resources(adapter);
+ if (hw->phy.ops.set_phy_power && !adapter->wol)
+ hw->phy.ops.set_phy_power(&adapter->hw, false);
err_setup_rx:
ixgbe_free_all_tx_resources(adapter);
err_setup_tx:
@@ -5889,6 +5909,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
}
*enable_wake = !!wufc;
+ if (hw->phy.ops.set_phy_power && !*enable_wake)
+ hw->phy.ops.set_phy_power(hw, false);
ixgbe_release_hw_control(adapter);
@@ -6718,6 +6740,26 @@ static void ixgbe_service_timer(unsigned long data)
ixgbe_service_event_schedule(adapter);
}
+static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 status;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
+ return;
+
+ adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
+
+ if (!hw->phy.ops.handle_lasi)
+ return;
+
+ status = hw->phy.ops.handle_lasi(&adapter->hw);
+ if (status != IXGBE_ERR_OVERTEMP)
+ return;
+
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
+}
+
static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
{
if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
@@ -6759,6 +6801,7 @@ static void ixgbe_service_task(struct work_struct *work)
return;
}
ixgbe_reset_subtask(adapter);
+ ixgbe_phy_interrupt_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
ixgbe_sfp_link_config_subtask(adapter);
ixgbe_check_overtemp_subtask(adapter);
@@ -8022,7 +8065,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
- u32 status;
+ int status;
__u16 mode;
if (nla_type(attr) != IFLA_BRIDGE_MODE)
@@ -8052,7 +8095,8 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return 0;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
- adapter->bridge_mode, 0, 0, nlflags);
+ adapter->bridge_mode, 0, 0, nlflags,
+ filter_mask, NULL);
}
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
@@ -8291,6 +8335,10 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
break;
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
+ case IXGBE_DEV_ID_X550T:
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
/* check eeprom to see if enabled wol */
if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
@@ -8431,10 +8479,11 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup hw api */
memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
hw->mac.type = ii->mac;
+ hw->mvals = ii->mvals;
/* EEPROM */
memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (ixgbe_removed(hw->hw_addr)) {
err = -EIO;
goto err_ioremap;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 8a2be4441..526a20bf7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -317,14 +317,14 @@ bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
**/
static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
{
- u32 status;
+ s32 status;
u16 phy_id_high = 0;
u16 phy_id_low = 0;
status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
&phy_id_high);
- if (status == 0) {
+ if (!status) {
hw->phy.id = (u32)(phy_id_high << 16);
status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
&phy_id_low);
@@ -347,6 +347,7 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
+ case X550_PHY_ID:
case X540_PHY_ID:
phy_type = ixgbe_phy_aq;
break;
@@ -356,6 +357,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case ATH_PHY_ID:
phy_type = ixgbe_phy_nl;
break;
+ case X557_PHY_ID:
+ phy_type = ixgbe_phy_x550em_ext_t;
+ break;
default:
phy_type = ixgbe_phy_unknown;
break;
@@ -1348,6 +1352,9 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
return IXGBE_ERR_SFP_NOT_PRESENT;
}
+ /* LAN ID is needed for sfp_type determination */
+ hw->mac.ops.set_lan_id(hw);
+
status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
&identifier);
@@ -1361,9 +1368,6 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
hw->phy.id = identifier;
- /* LAN ID is needed for sfp_type determination */
- hw->mac.ops.set_lan_id(hw);
-
status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
&comp_codes_10g);
@@ -1793,7 +1797,7 @@ fail:
**/
static void ixgbe_i2c_start(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
/* Start condition must begin with data and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 1);
@@ -1822,7 +1826,7 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw)
**/
static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
/* Stop condition must begin with data low and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 0);
@@ -1880,9 +1884,9 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
}
/* Release SDA line (set high) */
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
- i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ i2cctl |= IXGBE_I2C_DATA_OUT(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
IXGBE_WRITE_FLUSH(hw);
return status;
@@ -1898,7 +1902,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
{
s32 status = 0;
u32 i = 0;
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
u32 timeout = 10;
bool ack = true;
@@ -1911,7 +1915,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
/* Poll for ACK. Note that ACK in I2C spec is
* transition from 1 to 0 */
for (i = 0; i < timeout; i++) {
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
ack = ixgbe_get_i2c_data(hw, &i2cctl);
udelay(1);
@@ -1941,14 +1945,14 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
**/
static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
ixgbe_raise_i2c_clk(hw, &i2cctl);
/* Minimum high period of clock is 4us */
udelay(IXGBE_I2C_T_HIGH);
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
*data = ixgbe_get_i2c_data(hw, &i2cctl);
ixgbe_lower_i2c_clk(hw, &i2cctl);
@@ -1969,7 +1973,7 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
{
s32 status;
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
status = ixgbe_set_i2c_data(hw, &i2cctl, data);
if (status == 0) {
@@ -2005,14 +2009,14 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
u32 i2cctl_r = 0;
for (i = 0; i < timeout; i++) {
- *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ *i2cctl |= IXGBE_I2C_CLK_OUT(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* SCL rise time (1000ns) */
udelay(IXGBE_I2C_T_RISE);
- i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
- if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw))
+ i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ if (i2cctl_r & IXGBE_I2C_CLK_IN(hw))
break;
}
}
@@ -2027,9 +2031,9 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
{
- *i2cctl &= ~IXGBE_I2C_CLK_OUT_BY_MAC(hw);
+ *i2cctl &= ~IXGBE_I2C_CLK_OUT(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* SCL fall time (300ns) */
@@ -2047,18 +2051,18 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
{
if (data)
- *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ *i2cctl |= IXGBE_I2C_DATA_OUT(hw);
else
- *i2cctl &= ~IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ *i2cctl &= ~IXGBE_I2C_DATA_OUT(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
/* Verify data was set correctly */
- *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
return IXGBE_ERR_I2C;
@@ -2076,7 +2080,7 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
**/
static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
{
- if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw))
+ if (*i2cctl & IXGBE_I2C_DATA_IN(hw))
return true;
return false;
}
@@ -2090,7 +2094,7 @@ static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
**/
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
u32 i;
ixgbe_i2c_start(hw);
@@ -2137,3 +2141,36 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
return IXGBE_ERR_OVERTEMP;
}
+
+/** ixgbe_set_copper_phy_power - Control power for copper phy
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ **/
+s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
+{
+ u32 status;
+ u16 reg;
+
+ /* Bail if we don't have copper phy */
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+ return 0;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
+
+ if (on) {
+ reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
+ } else {
+ if (ixgbe_check_reset_blocked(hw))
+ return 0;
+ reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
+ }
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 434643881..e45988c4d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -145,6 +145,7 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index dd6ba5916..b6f424f3b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -91,14 +91,24 @@
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_CAT(r, m) IXGBE_##r##_##m
+
+#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, IDX)])
+
/* General Registers */
#define IXGBE_CTRL 0x00000
#define IXGBE_STATUS 0x00008
#define IXGBE_CTRL_EXT 0x00018
#define IXGBE_ESDP 0x00020
#define IXGBE_EODSDP 0x00028
-#define IXGBE_I2CCTL_BY_MAC(_hw)((((_hw)->mac.type >= ixgbe_mac_X550) ? \
- 0x15F5C : 0x00028))
+
+#define IXGBE_I2CCTL_8259X 0x00028
+#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_8259X
+#define IXGBE_I2CCTL_X550 0x15F5C
+#define IXGBE_I2CCTL_X550EM_x IXGBE_I2CCTL_X550
+#define IXGBE_I2CCTL_X550EM_a IXGBE_I2CCTL_X550
+#define IXGBE_I2CCTL(_hw) IXGBE_BY_MAC((_hw), I2CCTL)
+
#define IXGBE_LEDCTL 0x00200
#define IXGBE_FRTIMER 0x00048
#define IXGBE_TCPTIMER 0x0004C
@@ -106,17 +116,39 @@
#define IXGBE_EXVET 0x05078
/* NVM Registers */
-#define IXGBE_EEC 0x10010
+#define IXGBE_EEC_8259X 0x10010
+#define IXGBE_EEC_X540 IXGBE_EEC_8259X
+#define IXGBE_EEC_X550 IXGBE_EEC_8259X
+#define IXGBE_EEC_X550EM_x IXGBE_EEC_8259X
+#define IXGBE_EEC_X550EM_a 0x15FF8
+#define IXGBE_EEC(_hw) IXGBE_BY_MAC((_hw), EEC)
#define IXGBE_EERD 0x10014
#define IXGBE_EEWR 0x10018
-#define IXGBE_FLA 0x1001C
+#define IXGBE_FLA_8259X 0x1001C
+#define IXGBE_FLA_X540 IXGBE_FLA_8259X
+#define IXGBE_FLA_X550 IXGBE_FLA_8259X
+#define IXGBE_FLA_X550EM_x IXGBE_FLA_8259X
+#define IXGBE_FLA_X550EM_a 0x15F6C
+#define IXGBE_FLA(_hw) IXGBE_BY_MAC((_hw), FLA)
#define IXGBE_EEMNGCTL 0x10110
#define IXGBE_EEMNGDATA 0x10114
#define IXGBE_FLMNGCTL 0x10118
#define IXGBE_FLMNGDATA 0x1011C
#define IXGBE_FLMNGCNT 0x10120
#define IXGBE_FLOP 0x1013C
-#define IXGBE_GRC 0x10200
+#define IXGBE_GRC_8259X 0x10200
+#define IXGBE_GRC_X540 IXGBE_GRC_8259X
+#define IXGBE_GRC_X550 IXGBE_GRC_8259X
+#define IXGBE_GRC_X550EM_x IXGBE_GRC_8259X
+#define IXGBE_GRC_X550EM_a 0x15F64
+#define IXGBE_GRC(_hw) IXGBE_BY_MAC((_hw), GRC)
+
+#define IXGBE_SRAMREL_8259X 0x10210
+#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL_8259X
+#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL_8259X
+#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL_8259X
+#define IXGBE_SRAMREL_X550EM_a 0x15F6C
+#define IXGBE_SRAMREL(_hw) IXGBE_BY_MAC((_hw), SRAMREL)
/* General Receive Control */
#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
@@ -126,14 +158,55 @@
#define IXGBE_VPDDIAG1 0x10208
/* I2CCTL Bit Masks */
-#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
- 0x00004000 : 0x00000001)
-#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
- 0x00000200 : 0x00000002)
-#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
- 0x00001000 : 0x00000004)
-#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
- 0x00000400 : 0x00000008)
+#define IXGBE_I2C_CLK_IN_8259X 0x00000001
+#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN_8259X
+#define IXGBE_I2C_CLK_IN_X550 0x00004000
+#define IXGBE_I2C_CLK_IN_X550EM_x IXGBE_I2C_CLK_IN_X550
+#define IXGBE_I2C_CLK_IN_X550EM_a IXGBE_I2C_CLK_IN_X550
+#define IXGBE_I2C_CLK_IN(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN)
+
+#define IXGBE_I2C_CLK_OUT_8259X 0x00000002
+#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT_8259X
+#define IXGBE_I2C_CLK_OUT_X550 0x00000200
+#define IXGBE_I2C_CLK_OUT_X550EM_x IXGBE_I2C_CLK_OUT_X550
+#define IXGBE_I2C_CLK_OUT_X550EM_a IXGBE_I2C_CLK_OUT_X550
+#define IXGBE_I2C_CLK_OUT(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT)
+
+#define IXGBE_I2C_DATA_IN_8259X 0x00000004
+#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN_8259X
+#define IXGBE_I2C_DATA_IN_X550 0x00001000
+#define IXGBE_I2C_DATA_IN_X550EM_x IXGBE_I2C_DATA_IN_X550
+#define IXGBE_I2C_DATA_IN_X550EM_a IXGBE_I2C_DATA_IN_X550
+#define IXGBE_I2C_DATA_IN(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN)
+
+#define IXGBE_I2C_DATA_OUT_8259X 0x00000008
+#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT_8259X
+#define IXGBE_I2C_DATA_OUT_X550 0x00000400
+#define IXGBE_I2C_DATA_OUT_X550EM_x IXGBE_I2C_DATA_OUT_X550
+#define IXGBE_I2C_DATA_OUT_X550EM_a IXGBE_I2C_DATA_OUT_X550
+#define IXGBE_I2C_DATA_OUT(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT)
+
+#define IXGBE_I2C_DATA_OE_N_EN_8259X 0
+#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN_8259X
+#define IXGBE_I2C_DATA_OE_N_EN_X550 0x00000800
+#define IXGBE_I2C_DATA_OE_N_EN_X550EM_x IXGBE_I2C_DATA_OE_N_EN_X550
+#define IXGBE_I2C_DATA_OE_N_EN_X550EM_a IXGBE_I2C_DATA_OE_N_EN_X550
+#define IXGBE_I2C_DATA_OE_N_EN(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN)
+
+#define IXGBE_I2C_BB_EN_8259X 0
+#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN_8259X
+#define IXGBE_I2C_BB_EN_X550 0x00000100
+#define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550
+#define IXGBE_I2C_BB_EN_X550EM_a IXGBE_I2C_BB_EN_X550
+#define IXGBE_I2C_BB_EN(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN)
+
+#define IXGBE_I2C_CLK_OE_N_EN_8259X 0
+#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN_8259X
+#define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000
+#define IXGBE_I2C_CLK_OE_N_EN_X550EM_x IXGBE_I2C_CLK_OE_N_EN_X550
+#define IXGBE_I2C_CLK_OE_N_EN_X550EM_a IXGBE_I2C_CLK_OE_N_EN_X550
+#define IXGBE_I2C_CLK_OE_N_EN(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN)
+
#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
@@ -835,15 +908,36 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_GSCN_1 0x11024
#define IXGBE_GSCN_2 0x11028
#define IXGBE_GSCN_3 0x1102C
-#define IXGBE_FACTPS 0x10150
+#define IXGBE_FACTPS_8259X 0x10150
+#define IXGBE_FACTPS_X540 IXGBE_FACTPS_8259X
+#define IXGBE_FACTPS_X550 IXGBE_FACTPS_8259X
+#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS_8259X
+#define IXGBE_FACTPS_X550EM_a 0x15FEC
+#define IXGBE_FACTPS(_hw) IXGBE_BY_MAC((_hw), FACTPS)
+
#define IXGBE_PCIEANACTL 0x11040
-#define IXGBE_SWSM 0x10140
-#define IXGBE_FWSM 0x10148
+#define IXGBE_SWSM_8259X 0x10140
+#define IXGBE_SWSM_X540 IXGBE_SWSM_8259X
+#define IXGBE_SWSM_X550 IXGBE_SWSM_8259X
+#define IXGBE_SWSM_X550EM_x IXGBE_SWSM_8259X
+#define IXGBE_SWSM_X550EM_a 0x15F70
+#define IXGBE_SWSM(_hw) IXGBE_BY_MAC((_hw), SWSM)
+#define IXGBE_FWSM_8259X 0x10148
+#define IXGBE_FWSM_X540 IXGBE_FWSM_8259X
+#define IXGBE_FWSM_X550 IXGBE_FWSM_8259X
+#define IXGBE_FWSM_X550EM_x IXGBE_FWSM_8259X
+#define IXGBE_FWSM_X550EM_a 0x15F74
+#define IXGBE_FWSM(_hw) IXGBE_BY_MAC((_hw), FWSM)
#define IXGBE_GSSR 0x10160
#define IXGBE_MREVID 0x11064
#define IXGBE_DCA_ID 0x11070
#define IXGBE_DCA_CTRL 0x11074
-#define IXGBE_SWFW_SYNC IXGBE_GSSR
+#define IXGBE_SWFW_SYNC_8259X IXGBE_GSSR
+#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC_8259X
+#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC_8259X
+#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC_8259X
+#define IXGBE_SWFW_SYNC_X550EM_a 0x15F78
+#define IXGBE_SWFW_SYNC(_hw) IXGBE_BY_MAC((_hw), SWFW_SYNC)
/* PCIe registers 82599-specific */
#define IXGBE_GCR_EXT 0x11050
@@ -855,14 +949,21 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_PHYDAT_82599 0x11044
#define IXGBE_PHYCTL_82599 0x11048
#define IXGBE_PBACLR_82599 0x11068
-#define IXGBE_CIAA_82599 0x11088
-#define IXGBE_CIAD_82599 0x1108C
-#define IXGBE_CIAA_X550 0x11508
-#define IXGBE_CIAD_X550 0x11510
-#define IXGBE_CIAA_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
- IXGBE_CIAA_X550 : IXGBE_CIAA_82599))
-#define IXGBE_CIAD_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
- IXGBE_CIAD_X550 : IXGBE_CIAD_82599))
+
+#define IXGBE_CIAA_8259X 0x11088
+#define IXGBE_CIAA_X540 IXGBE_CIAA_8259X
+#define IXGBE_CIAA_X550 0x11508
+#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550
+#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550
+#define IXGBE_CIAA(_hw) IXGBE_BY_MAC((_hw), CIAA)
+
+#define IXGBE_CIAD_8259X 0x1108C
+#define IXGBE_CIAD_X540 IXGBE_CIAD_8259X
+#define IXGBE_CIAD_X550 0x11510
+#define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550
+#define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550
+#define IXGBE_CIAD(_hw) IXGBE_BY_MAC((_hw), CIAD)
+
#define IXGBE_PICAUSE 0x110B0
#define IXGBE_PIENA 0x110B8
#define IXGBE_CDQ_MBR_82599 0x110B4
@@ -1204,18 +1305,37 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */
+#define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */
#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */
+#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
+
#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */
#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */
#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK 0xFF00 /* int std mask */
+#define IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG 0xFC00 /* chip std int flag */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */
+#define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */
+#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */
+#define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */
+/* autoneg vendor alarm int enable */
+#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000
+#define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */
+#define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */
+#define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */
+#define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Stat Reg */
+#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */
+#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */
#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Tx Dis Reg */
#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Tx Dis */
@@ -1233,6 +1353,8 @@ struct ixgbe_thermal_sensor_data {
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
#define X540_PHY_ID 0x01540200
+#define X550_PHY_ID 0x01540220
+#define X557_PHY_ID 0x01540240
#define QT2022_PHY_ID 0x0043A400
#define ATH_PHY_ID 0x03429050
#define AQ_FW_REV 0x20
@@ -1253,9 +1375,25 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_CONTROL_SOL_NL 0x0000
/* General purpose Interrupt Enable */
-#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
-#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
-#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */
+#define IXGBE_SDP0_GPIEN_8259X 0x00000001 /* SDP0 */
+#define IXGBE_SDP1_GPIEN_8259X 0x00000002 /* SDP1 */
+#define IXGBE_SDP2_GPIEN_8259X 0x00000004 /* SDP2 */
+#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */
+#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */
+#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */
+#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN_X550EM_x IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550EM_x IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550EM_x IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN_X550EM_a IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550EM_a IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550EM_a IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN)
+#define IXGBE_SDP1_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN)
+#define IXGBE_SDP2_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN)
+
#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
@@ -1417,9 +1555,25 @@ enum {
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
-#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
-#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
-#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
+#define IXGBE_EICR_GPI_SDP0_8259X 0x01000000 /* Gen Purpose INT on SDP0 */
+#define IXGBE_EICR_GPI_SDP1_8259X 0x02000000 /* Gen Purpose INT on SDP1 */
+#define IXGBE_EICR_GPI_SDP2_8259X 0x04000000 /* Gen Purpose INT on SDP2 */
+#define IXGBE_EICR_GPI_SDP0_X540 0x02000000
+#define IXGBE_EICR_GPI_SDP1_X540 0x04000000
+#define IXGBE_EICR_GPI_SDP2_X540 0x08000000
+#define IXGBE_EICR_GPI_SDP0_X550 IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550 IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550 IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0_X550EM_x IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550EM_x IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550EM_x IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0_X550EM_a IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550EM_a IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550EM_a IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0)
+#define IXGBE_EICR_GPI_SDP1(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1)
+#define IXGBE_EICR_GPI_SDP2(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2)
+
#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */
#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
@@ -1435,9 +1589,9 @@ enum {
#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
-#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
+#define IXGBE_EICS_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw)
+#define IXGBE_EICS_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw)
#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */
#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
@@ -1454,9 +1608,9 @@ enum {
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */
#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
-#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
+#define IXGBE_EIMS_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw)
+#define IXGBE_EIMS_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw)
#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */
#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
@@ -1472,9 +1626,9 @@ enum {
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
-#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
+#define IXGBE_EIMC_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw)
+#define IXGBE_EIMC_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw)
#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */
#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
@@ -2741,6 +2895,37 @@ union ixgbe_atr_hash_dword {
__be32 dword;
};
+#define IXGBE_MVALS_INIT(m) \
+ IXGBE_CAT(EEC, m), \
+ IXGBE_CAT(FLA, m), \
+ IXGBE_CAT(GRC, m), \
+ IXGBE_CAT(SRAMREL, m), \
+ IXGBE_CAT(FACTPS, m), \
+ IXGBE_CAT(SWSM, m), \
+ IXGBE_CAT(SWFW_SYNC, m), \
+ IXGBE_CAT(FWSM, m), \
+ IXGBE_CAT(SDP0_GPIEN, m), \
+ IXGBE_CAT(SDP1_GPIEN, m), \
+ IXGBE_CAT(SDP2_GPIEN, m), \
+ IXGBE_CAT(EICR_GPI_SDP0, m), \
+ IXGBE_CAT(EICR_GPI_SDP1, m), \
+ IXGBE_CAT(EICR_GPI_SDP2, m), \
+ IXGBE_CAT(CIAA, m), \
+ IXGBE_CAT(CIAD, m), \
+ IXGBE_CAT(I2C_CLK_IN, m), \
+ IXGBE_CAT(I2C_CLK_OUT, m), \
+ IXGBE_CAT(I2C_DATA_IN, m), \
+ IXGBE_CAT(I2C_DATA_OUT, m), \
+ IXGBE_CAT(I2C_DATA_OE_N_EN, m), \
+ IXGBE_CAT(I2C_BB_EN, m), \
+ IXGBE_CAT(I2C_CLK_OE_N_EN, m), \
+ IXGBE_CAT(I2CCTL, m)
+
+enum ixgbe_mvals {
+ IXGBE_MVALS_INIT(IDX),
+ IXGBE_MVALS_IDX_LIMIT
+};
+
enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0,
ixgbe_eeprom_spi,
@@ -3112,6 +3297,8 @@ struct ixgbe_phy_operations {
s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
s32 (*check_overtemp)(struct ixgbe_hw *);
+ s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
+ s32 (*handle_lasi)(struct ixgbe_hw *hw);
};
struct ixgbe_eeprom_info {
@@ -3173,6 +3360,7 @@ struct ixgbe_phy_info {
bool multispeed_fiber;
bool reset_if_overtemp;
bool qsfp_shared_i2c_bus;
+ u32 nw_mng_if_sel;
};
#include "ixgbe_mbx.h"
@@ -3216,6 +3404,7 @@ struct ixgbe_hw {
struct ixgbe_eeprom_info eeprom;
struct ixgbe_bus_info bus;
struct ixgbe_mbx_info mbx;
+ const u32 *mvals;
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
@@ -3234,6 +3423,7 @@ struct ixgbe_info {
struct ixgbe_eeprom_operations *eeprom_ops;
struct ixgbe_phy_operations *phy_ops;
struct ixgbe_mbx_operations *mbx_ops;
+ const u32 *mvals;
};
@@ -3339,4 +3529,6 @@ struct ixgbe_info {
#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2
#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3
+#define IXGBE_NW_MNG_IF_SEL 0x00011178
+#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24)
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index f5f948d08..032a5870a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -202,7 +202,7 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
eeprom->semaphore_delay = 10;
eeprom->type = ixgbe_flash;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
@@ -504,8 +504,8 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
return status;
}
- flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw)) | IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup);
status = ixgbe_poll_flash_update_done_X540(hw);
if (status == 0)
@@ -514,11 +514,11 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
hw_dbg(hw, "Flash update time out\n");
if (hw->revision_id == 0) {
- flup = IXGBE_READ_REG(hw, IXGBE_EEC);
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (flup & IXGBE_EEC_SEC1VAL) {
flup |= IXGBE_EEC_FLUP;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup);
}
status = ixgbe_poll_flash_update_done_X540(hw);
@@ -544,7 +544,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
u32 reg;
for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
- reg = IXGBE_READ_REG(hw, IXGBE_EEC);
+ reg = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (reg & IXGBE_EEC_FLUDONE)
return 0;
udelay(5);
@@ -580,10 +580,10 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
if (ixgbe_get_swfw_sync_semaphore(hw))
return IXGBE_ERR_SWFW_SYNC;
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
if (!(swfw_sync & (fwmask | swmask | hwmask))) {
swfw_sync |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
break;
} else {
@@ -605,13 +605,13 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
* corresponding FW/HW bits in the SW_FW_SYNC register.
*/
if (i >= timeout) {
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
if (swfw_sync & (fwmask | hwmask)) {
if (ixgbe_get_swfw_sync_semaphore(hw))
return IXGBE_ERR_SWFW_SYNC;
swfw_sync |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
}
}
@@ -635,9 +635,9 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
ixgbe_get_swfw_sync_semaphore(hw);
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
swfw_sync &= ~swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
usleep_range(5000, 10000);
@@ -660,7 +660,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
/* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
if (!(swsm & IXGBE_SWSM_SMBI))
break;
usleep_range(50, 100);
@@ -674,7 +674,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
/* Now get the semaphore between SW/FW through the REGSMP bit */
for (i = 0; i < timeout; i++) {
- swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
if (!(swsm & IXGBE_SWFW_REGSMP))
return 0;
@@ -696,13 +696,13 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
/* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
- swsm &= ~IXGBE_SWSM_SMBI;
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
-
- swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
swsm &= ~IXGBE_SWFW_REGSMP;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swsm);
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
+ swsm &= ~IXGBE_SWSM_SMBI;
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm);
IXGBE_WRITE_FLUSH(hw);
}
@@ -850,9 +850,14 @@ static struct ixgbe_phy_operations phy_ops_X540 = {
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
.check_overtemp = &ixgbe_tn_check_overtemp,
+ .set_phy_power = &ixgbe_set_copper_phy_power,
.get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
};
+static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(X540)
+};
+
struct ixgbe_info ixgbe_X540_info = {
.mac = ixgbe_mac_X540,
.get_invariants = &ixgbe_get_invariants_X540,
@@ -860,4 +865,5 @@ struct ixgbe_info ixgbe_X540_info = {
.eeprom_ops = &eeprom_ops_X540,
.phy_ops = &phy_ops_X540,
.mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_X540,
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index cf5cf819a..7581da13e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel 10 Gigabit PCI Express Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -26,6 +26,22 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
+/** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
+ * @hw: pointer to hardware structure
+ **/
+static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
+{
+ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ if (hw->bus.lan_id) {
+ esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
+ esdp |= IXGBE_ESDP_SDP1_DIR;
+ }
+ esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
/** ixgbe_identify_phy_x550em - Get PHY type based on device id
* @hw: pointer to hardware structure
*
@@ -33,18 +49,11 @@
*/
static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
{
- u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_X_SFP:
/* set up for CS4227 usage */
hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
- if (hw->bus.lan_id) {
- esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
- esdp |= IXGBE_ESDP_SDP1_DIR;
- }
- esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ ixgbe_setup_mux_ctl(hw);
return ixgbe_identify_module_generic(hw);
case IXGBE_DEV_ID_X550EM_X_KX4:
@@ -90,7 +99,7 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
eeprom->semaphore_delay = 10;
eeprom->type = ixgbe_flash;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
@@ -103,6 +112,39 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
return 0;
}
+/**
+ * ixgbe_iosf_wait - Wait for IOSF command completion
+ * @hw: pointer to hardware structure
+ * @ctrl: pointer to location to receive final IOSF control value
+ *
+ * Return: failing status on timeout
+ *
+ * Note: ctrl can be NULL if the IOSF control register value is not needed
+ */
+static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
+{
+ u32 i, command;
+
+ /* Check every 10 usec to see if the address cycle completed.
+ * The SB IOSF BUSY bit will clear when the operation is
+ * complete.
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
+ if (!(command & IXGBE_SB_IOSF_CTRL_BUSY))
+ break;
+ usleep_range(10, 20);
+ }
+ if (ctrl)
+ *ctrl = command;
+ if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+ hw_dbg(hw, "IOSF wait timed out\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ return 0;
+}
+
/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the
* IOSF device
* @hw: pointer to hardware structure
@@ -113,7 +155,17 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 *data)
{
- u32 i, command, error;
+ u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
+ u32 command, error;
+ s32 ret;
+
+ ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
+ if (ret)
+ return ret;
+
+ ret = ixgbe_iosf_wait(hw, NULL);
+ if (ret)
+ goto out;
command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
(device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
@@ -121,17 +173,7 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
/* Write IOSF control register */
IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
- /* Check every 10 usec to see if the address cycle completed.
- * The SB IOSF BUSY bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- usleep_range(10, 20);
-
- command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
- if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
- break;
- }
+ ret = ixgbe_iosf_wait(hw, &command);
if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
@@ -140,14 +182,12 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
return IXGBE_ERR_PHY;
}
- if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
- hw_dbg(hw, "Read timed out\n");
- return IXGBE_ERR_PHY;
- }
-
- *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
+ if (!ret)
+ *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
- return 0;
+out:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return ret;
}
/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
@@ -673,6 +713,249 @@ static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
return status;
}
+/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the
+ * IOSF device
+ *
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Data to write to the register
+ **/
+static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 data)
+{
+ u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
+ u32 command, error;
+ s32 ret;
+
+ ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
+ if (ret)
+ return ret;
+
+ ret = ixgbe_iosf_wait(hw, NULL);
+ if (ret)
+ goto out;
+
+ command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
+ (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+
+ /* Write IOSF control register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+
+ /* Write IOSF data register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
+
+ ret = ixgbe_iosf_wait(hw, &command);
+
+ if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
+ error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+ IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ hw_dbg(hw, "Failed to write, error %x\n", error);
+ return IXGBE_ERR_PHY;
+ }
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return ret;
+}
+
+/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
+ * @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ *
+ * Configures the integrated KR PHY to use iXFI mode. Used to connect an
+ * internal and external PHY at a specific speed, without autonegotiation.
+ **/
+static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+{
+ s32 status;
+ u32 reg_val;
+
+ /* Disable AN and force speed to 10G Serial. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+
+ /* Select forced link speed for internal PHY. */
+ switch (*speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ break;
+ default:
+ /* Other link speeds are not supported by internal KR PHY. */
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ /* Disable training protocol FSM. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ /* Disable Flex from training TXFFE. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ /* Enable override for coefficients. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ /* Toggle port SW reset by AN reset. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Setup internal/external PHY link speed based on link speed, then set
+ * external PHY auto advertised link speed.
+ *
+ * Returns error status for any failure
+ **/
+static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ s32 status;
+ ixgbe_link_speed force_speed;
+
+ /* Setup internal/external PHY link speed to iXFI (10G), unless
+ * only 1G is auto advertised then setup KX link.
+ */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ force_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ force_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* If internal link mode is XFI, then setup XFI internal link. */
+ if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
+
+ if (status)
+ return status;
+ }
+
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/** ixgbe_check_link_t_X550em - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Check that both the MAC and X557 external PHY have link.
+ **/
+static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up,
+ bool link_up_wait_to_complete)
+{
+ u32 status;
+ u16 autoneg_status;
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+ return IXGBE_ERR_CONFIG;
+
+ status = ixgbe_check_mac_link_generic(hw, speed, link_up,
+ link_up_wait_to_complete);
+
+ /* If check link fails or MAC link is not up, then return */
+ if (status || !(*link_up))
+ return status;
+
+ /* MAC link is up, so check external PHY link.
+ * Read this twice back to back to indicate current status.
+ */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+ if (status)
+ return status;
+
+ /* If external PHY link is not up, then indicate link not up */
+ if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
+ *link_up = false;
+
+ return 0;
+}
+
/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
* @hw: pointer to hardware structure
**/
@@ -680,13 +963,21 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
- /* CS4227 does not support autoneg, so disable the laser control
- * functions for SFP+ fiber
- */
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
+ switch (mac->ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ /* CS4227 does not support autoneg, so disable the laser control
+ * functions for SFP+ fiber
+ */
mac->ops.disable_tx_laser = NULL;
mac->ops.enable_tx_laser = NULL;
mac->ops.flap_tx_laser = NULL;
+ break;
+ case ixgbe_media_type_copper:
+ mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
+ mac->ops.check_link = ixgbe_check_link_t_X550em;
+ break;
+ default:
+ break;
}
}
@@ -778,169 +1069,221 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
return 0;
}
-/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the
- * IOSF device
+/**
+ * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
+ * @hw: pointer to hardware structure
+ * @lsc: pointer to boolean flag which indicates whether external Base T
+ * PHY interrupt is lsc
*
- * @hw: pointer to hardware structure
- * @reg_addr: 32 bit PHY register to write
- * @device_type: 3 bit device type
- * @data: Data to write to the register
+ * Determime if external Base T PHY interrupt cause is high temperature
+ * failure alarm or link status change.
+ *
+ * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+ * failure alarm, else return PHY access status.
**/
-static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 data)
+static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
{
- u32 i, command, error;
+ u32 status;
+ u16 reg;
- command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
- (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+ *lsc = false;
- /* Write IOSF control register */
- IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+ /* Vendor alarm triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
- /* Write IOSF data register */
- IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
+ if (status || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
+ return status;
- /* Check every 10 usec to see if the address cycle completed.
- * The SB IOSF BUSY bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- usleep_range(10, 20);
+ /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
- command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
- if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
- break;
- }
+ if (status || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
+ IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
+ return status;
- if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
- error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
- IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
- hw_dbg(hw, "Failed to write, error %x\n", error);
- return IXGBE_ERR_PHY;
- }
+ /* High temperature failure alarm triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
- if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
- hw_dbg(hw, "Write timed out\n");
- return IXGBE_ERR_PHY;
+ if (status)
+ return status;
+
+ /* If high temperature failure, then return over temp error and exit */
+ if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
+ /* power down the PHY in case the PHY FW didn't already */
+ ixgbe_set_copper_phy_power(hw, false);
+ return IXGBE_ERR_OVERTEMP;
}
+ /* Vendor alarm 2 triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+
+ if (status || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
+ return status;
+
+ /* link connect/disconnect event occurred */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+
+ if (status)
+ return status;
+
+ /* Indicate LSC */
+ if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
+ *lsc = true;
+
return 0;
}
-/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
- * @hw: pointer to hardware structure
- * @speed: the link speed to force
+/**
+ * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
+ * @hw: pointer to hardware structure
*
- * Configures the integrated KR PHY to use iXFI mode. Used to connect an
- * internal and external PHY at a specific speed, without autonegotiation.
+ * Enable link status change and temperature failure alarm for the external
+ * Base T PHY
+ *
+ * Returns PHY access status
**/
-static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
{
- s32 status;
- u32 reg_val;
+ u32 status;
+ u16 reg;
+ bool lsc;
- /* Disable AN and force speed to 10G Serial. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ /* Clear interrupt flags */
+ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
+
+ /* Enable link status change alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
if (status)
return status;
- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
- /* Select forced link speed for internal PHY. */
- switch (*speed) {
- case IXGBE_LINK_SPEED_10GB_FULL:
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
- break;
- case IXGBE_LINK_SPEED_1GB_FULL:
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
- break;
- default:
- /* Other link speeds are not supported by internal KR PHY. */
- return IXGBE_ERR_LINK_SETUP;
- }
-
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
if (status)
return status;
- /* Disable training protocol FSM. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ /* Enables high temperature failure alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
if (status)
return status;
- reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- if (status)
- return status;
+ reg |= IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN;
- /* Disable Flex from training TXFFE. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
if (status)
return status;
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
if (status)
return status;
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- if (status)
- return status;
+ reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
+ IXGBE_MDIO_GLOBAL_ALARM_1_INT);
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
if (status)
return status;
- /* Enable override for coefficients. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ /* Enable chip-wide vendor alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
if (status)
return status;
- reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
- reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
- reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
- reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+
+ return status;
+}
+
+/**
+ * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
+ * @hw: pointer to hardware structure
+ *
+ * Handle external Base T PHY interrupt. If high temperature
+ * failure alarm then return error, else if link status change
+ * then setup internal/external PHY link
+ *
+ * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+ * failure alarm, else return PHY access status.
+ **/
+static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+{
+ struct ixgbe_phy_info *phy = &hw->phy;
+ bool lsc;
+ u32 status;
+
+ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
if (status)
return status;
- /* Toggle port SW reset by AN reset. */
+ if (lsc)
+ return phy->ops.setup_internal_link(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
+ * @hw: pointer to hardware structure
+ * @speed: link speed
+ *
+ * Configures the integrated KR PHY.
+ **/
+static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed)
+{
+ s32 status;
+ u32 reg_val;
+
status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status)
return status;
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ |
+ IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC);
+ reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
+ IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
+
+ /* Advertise 10G support. */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
+
+ /* Advertise 1G support. */
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
+
+ /* Restart auto-negotiation. */
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
return status;
}
@@ -990,85 +1333,82 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
**/
static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
- s32 status;
- u32 reg_val;
+ return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
+}
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- if (status)
- return status;
+/** ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
+ * @hw: address of hardware structure
+ * @link_up: address of boolean to indicate link status
+ *
+ * Returns error code if unable to get link status.
+ **/
+static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
+{
+ u32 ret;
+ u16 autoneg_status;
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ;
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
- reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
- IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
+ *link_up = false;
- /* Advertise 10G support. */
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
+ /* read this twice back to back to indicate current status */
+ ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+ if (ret)
+ return ret;
- /* Advertise 1G support. */
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
+ ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+ if (ret)
+ return ret;
- /* Restart auto-negotiation. */
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
- return status;
+ return 0;
}
-/** ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY
+/** ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
* @hw: point to hardware structure
*
- * Configures the integrated KR PHY to talk to the external PHY. The base
- * driver will call this function when it gets notification via interrupt from
- * the external PHY. This function forces the internal PHY into iXFI mode at
- * the correct speed.
+ * Configures the link between the integrated KR PHY and the external X557 PHY
+ * The driver will call this function when it gets a link status change
+ * interrupt from the X557 PHY. This function configures the link speed
+ * between the PHYs to match the link speed of the BASE-T link.
*
- * A return of a non-zero value indicates an error, and the base driver should
- * not report link up.
+ * A return of a non-zero value indicates an error, and the base driver should
+ * not report link up.
**/
-static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
+static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
{
- u32 status;
- u16 lasi, autoneg_status, speed;
ixgbe_link_speed force_speed;
+ bool link_up;
+ u32 status;
+ u16 speed;
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+ return IXGBE_ERR_CONFIG;
- /* Verify that the external link status has changed */
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE, &lasi);
+ /* If link is not up, then there is no setup necessary so return */
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
if (status)
return status;
- /* If there was no change in link status, we can just exit */
- if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM))
+ if (!link_up)
return 0;
- /* we read this twice back to back to indicate current status */
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_status);
+ &speed);
if (status)
return status;
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_status);
+ /* If link is not still up, then no setup is necessary so return */
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
if (status)
return status;
- /* If link is not up return an error indicating treat link as down */
- if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
-
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &speed);
+ if (!link_up)
+ return 0;
/* clear everything but the speed and duplex bits */
speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
@@ -1088,6 +1428,22 @@ static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
return ixgbe_setup_ixfi_x550em(hw, &force_speed);
}
+/** ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
+ * @hw: pointer to hardware structure
+ **/
+static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ status = ixgbe_reset_phy_generic(hw);
+
+ if (status)
+ return status;
+
+ /* Configure Link Status Alarm and Temperature Threshold interrupts */
+ return ixgbe_enable_lasi_ext_t_x550em(hw);
+}
+
/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
* @hw: pointer to hardware structure
*
@@ -1098,25 +1454,32 @@ static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
+ ixgbe_link_speed speed;
s32 ret_val;
- u32 esdp;
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
- esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
- phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+ hw->mac.ops.set_lan_id(hw);
- if (hw->bus.lan_id) {
- esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
- esdp |= IXGBE_ESDP_SDP1_DIR;
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
+ phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+ ixgbe_setup_mux_ctl(hw);
+
+ /* Save NW management interface connected on board. This is used
+ * to determine internal PHY mode.
+ */
+ phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+ /* If internal PHY mode is KR, then initialize KR link */
+ if (phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) {
+ speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ ret_val = ixgbe_setup_kr_speed_x550em(hw, speed);
}
- esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
}
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
- /* Setup function pointers based on detected SFP module and speeds */
+ /* Setup function pointers based on detected hardware */
ixgbe_init_mac_link_ops_X550em(hw);
if (phy->sfp_type != ixgbe_sfp_type_unknown)
phy->ops.reset = NULL;
@@ -1134,11 +1497,30 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
case ixgbe_phy_x550em_ext_t:
- phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em;
+ /* Save NW management interface connected on board. This is used
+ * to determine internal PHY mode
+ */
+ phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+ /* If internal link mode is XFI, then setup iXFI internal link,
+ * else setup KR now.
+ */
+ if (!(phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ phy->ops.setup_internal_link =
+ ixgbe_setup_internal_phy_t_x550em;
+ } else {
+ speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ ret_val = ixgbe_setup_kr_speed_x550em(hw, speed);
+ }
+
+ phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
+ phy->ops.reset = ixgbe_reset_phy_t_X550em;
break;
default:
break;
}
+
return ret_val;
}
@@ -1177,67 +1559,37 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
**/
static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
{
- u32 status;
+ s32 status;
u16 reg;
- u32 retries = 2;
-
- do {
- /* decrement retries counter and exit if we hit 0 */
- if (retries < 1) {
- hw_dbg(hw, "External PHY not yet finished resetting.");
- return IXGBE_ERR_PHY;
- }
- retries--;
-
- status = hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_TX_VENDOR_ALARMS_3,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- &reg);
- if (status)
- return status;
- /* Verify PHY FW reset has completed */
- } while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1);
-
- /* Set port to low power mode */
status = hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- &reg);
- if (status)
- return status;
-
- /* Enable the transmitter */
- status = hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
+ IXGBE_MDIO_TX_VENDOR_ALARMS_3,
IXGBE_MDIO_PMA_PMD_DEV_TYPE,
&reg);
if (status)
return status;
- reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE;
+ /* If PHY FW reset completed bit is set then this is the first
+ * SW instance after a power on so the PHY FW must be un-stalled.
+ */
+ if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_GLOBAL_RES_PR_10,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
- status = hw->phy.ops.write_reg(hw,
- IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- reg);
- if (status)
- return status;
+ reg &= ~IXGBE_MDIO_POWER_UP_STALL;
- /* Un-stall the PHY FW */
- status = hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_GLOBAL_RES_PR_10,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- &reg);
- if (status)
- return status;
-
- reg &= ~IXGBE_MDIO_POWER_UP_STALL;
+ status = hw->phy.ops.write_reg(hw,
+ IXGBE_MDIO_GLOBAL_RES_PR_10,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+ if (status)
+ return status;
+ }
- status = hw->phy.ops.write_reg(hw,
- IXGBE_MDIO_GLOBAL_RES_PR_10,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- reg);
return status;
}
@@ -1254,6 +1606,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
s32 status;
u32 ctrl = 0;
u32 i;
+ u32 hlreg0;
bool link_up = false;
/* Call adapter stop to disable Tx/Rx and clear interrupts */
@@ -1338,6 +1691,15 @@ mac_reset_top:
hw->mac.num_rar_entries = 128;
hw->mac.ops.init_rx_addrs(hw);
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ }
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
+ ixgbe_setup_mux_ctl(hw);
+
return status;
}
@@ -1490,6 +1852,10 @@ static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \
+ .read_reg = &ixgbe_read_phy_reg_generic, \
+ .write_reg = &ixgbe_write_phy_reg_generic, \
+ .setup_link = &ixgbe_setup_phy_link_generic, \
+ .set_phy_power = &ixgbe_set_copper_phy_power, \
.check_overtemp = &ixgbe_tn_check_overtemp, \
.get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
@@ -1497,9 +1863,6 @@ static struct ixgbe_phy_operations phy_ops_X550 = {
X550_COMMON_PHY
.init = NULL,
.identify = &ixgbe_identify_phy_generic,
- .read_reg = &ixgbe_read_phy_reg_generic,
- .write_reg = &ixgbe_write_phy_reg_generic,
- .setup_link = &ixgbe_setup_phy_link_generic,
.read_i2c_combined = &ixgbe_read_i2c_combined_generic,
.write_i2c_combined = &ixgbe_write_i2c_combined_generic,
};
@@ -1508,9 +1871,14 @@ static struct ixgbe_phy_operations phy_ops_X550EM_x = {
X550_COMMON_PHY
.init = &ixgbe_init_phy_ops_X550em,
.identify = &ixgbe_identify_phy_x550em,
- .read_reg = NULL, /* defined later */
- .write_reg = NULL, /* defined later */
- .setup_link = NULL, /* defined later */
+};
+
+static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(X550)
+};
+
+static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(X550EM_x)
};
struct ixgbe_info ixgbe_X550_info = {
@@ -1520,6 +1888,7 @@ struct ixgbe_info ixgbe_X550_info = {
.eeprom_ops = &eeprom_ops_X550,
.phy_ops = &phy_ops_X550,
.mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_X550,
};
struct ixgbe_info ixgbe_X550EM_x_info = {
@@ -1529,4 +1898,5 @@ struct ixgbe_info ixgbe_X550EM_x_info = {
.eeprom_ops = &eeprom_ops_X550EM_x,
.phy_ops = &phy_ops_X550EM_x,
.mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_X550EM_x,
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index e71cdde9c..1d7b00b03 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
static inline bool ixgbevf_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
/**
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index d323a695d..80af9ffce 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_MARVELL
default y
depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 1c75829eb..d52639bc4 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -3125,9 +3125,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mib_counters_clear(mp);
- init_timer(&mp->mib_counters_timer);
- mp->mib_counters_timer.data = (unsigned long)mp;
- mp->mib_counters_timer.function = mib_counters_timer_wrapper;
+ setup_timer(&mp->mib_counters_timer, mib_counters_timer_wrapper,
+ (unsigned long)mp);
mp->mib_counters_timer.expires = jiffies + 30 * HZ;
spin_lock_init(&mp->mib_counters_lock);
@@ -3136,9 +3135,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
- init_timer(&mp->rx_oom);
- mp->rx_oom.data = (unsigned long)mp;
- mp->rx_oom.function = oom_timer_wrapper;
+ setup_timer(&mp->rx_oom, oom_timer_wrapper, (unsigned long)mp);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 74d0389bf..62e48bc0c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1366,7 +1366,7 @@ static void *mvneta_frag_alloc(const struct mvneta_port *pp)
static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
{
if (likely(pp->frag_size <= PAGE_SIZE))
- put_page(virt_to_head_page(data));
+ skb_free_frag(data);
else
kfree(data);
}
@@ -1462,7 +1462,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
struct mvneta_rx_queue *rxq)
{
struct net_device *dev = pp->dev;
- int rx_done, rx_filled;
+ int rx_done;
u32 rcvd_pkts = 0;
u32 rcvd_bytes = 0;
@@ -1473,7 +1473,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
rx_todo = rx_done;
rx_done = 0;
- rx_filled = 0;
/* Fairness NAPI loop */
while (rx_done < rx_todo) {
@@ -1484,7 +1483,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
int rx_bytes, err;
rx_done++;
- rx_filled++;
rx_status = rx_desc->status;
rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
data = (unsigned char *)rx_desc->buf_cookie;
@@ -1524,6 +1522,14 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
continue;
}
+ /* Refill processing */
+ err = mvneta_rx_refill(pp, rx_desc);
+ if (err) {
+ netdev_err(dev, "Linux processing - Can't refill\n");
+ rxq->missed++;
+ goto err_drop_frame;
+ }
+
skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
if (!skb)
goto err_drop_frame;
@@ -1543,14 +1549,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
mvneta_rx_csum(pp, rx_status, skb);
napi_gro_receive(&pp->napi, skb);
-
- /* Refill processing */
- err = mvneta_rx_refill(pp, rx_desc);
- if (err) {
- netdev_err(dev, "Linux processing - Can't refill\n");
- rxq->missed++;
- rx_filled--;
- }
}
if (rcvd_pkts) {
@@ -1563,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
}
/* Update rxq management counters */
- mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
+ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
return rx_done;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 3e8b1bfb1..d9884fd15 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -27,6 +27,8 @@
#include <linux/of_address.h>
#include <linux/phy.h>
#include <linux/clk.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
#include <uapi/linux/ppp_defs.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -299,6 +301,7 @@
/* Coalescing */
#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
+#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
#define MVPP2_RX_COAL_PKTS 32
#define MVPP2_RX_COAL_USEC 100
@@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats {
u64 tx_bytes;
};
+/* Per-CPU port control */
+struct mvpp2_port_pcpu {
+ struct hrtimer tx_done_timer;
+ bool timer_scheduled;
+ /* Tasklet for egress finalization */
+ struct tasklet_struct tx_done_tasklet;
+};
+
struct mvpp2_port {
u8 id;
@@ -679,6 +690,9 @@ struct mvpp2_port {
u32 pending_cause_rx;
struct napi_struct napi;
+ /* Per-CPU port control */
+ struct mvpp2_port_pcpu __percpu *pcpu;
+
/* Flags */
unsigned long flags;
@@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu {
/* Array of transmitted skb */
struct sk_buff **tx_skb;
+ /* Array of transmitted buffers' physical addresses */
+ dma_addr_t *tx_buffs;
+
/* Index of last TX DMA descriptor that was inserted */
int txq_put_index;
@@ -913,8 +930,6 @@ struct mvpp2_bm_pool {
/* Occupied buffers indicator */
atomic_t in_use;
int in_use_thresh;
-
- spinlock_t lock;
};
struct mvpp2_buff_hdr {
@@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
}
static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct mvpp2_tx_desc *tx_desc)
{
txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
+ if (skb)
+ txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
+ tx_desc->buf_phys_addr;
txq_pcpu->txq_put_index++;
if (txq_pcpu->txq_put_index == txq_pcpu->size)
txq_pcpu->txq_put_index = 0;
@@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
bm_pool->pkt_size = 0;
bm_pool->buf_num = 0;
atomic_set(&bm_pool->in_use, 0);
- spin_lock_init(&bm_pool->lock);
return 0;
}
@@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool *
mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
int pkt_size)
{
- unsigned long flags = 0;
struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
int num;
@@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
return NULL;
}
- spin_lock_irqsave(&new_pool->lock, flags);
-
if (new_pool->type == MVPP2_BM_FREE)
new_pool->type = type;
@@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
if (num != pkts_num) {
WARN(1, "pool %d: %d of %d allocated\n",
new_pool->id, num, pkts_num);
- /* We need to undo the bufs_add() allocations */
- spin_unlock_irqrestore(&new_pool->lock, flags);
return NULL;
}
}
@@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
- spin_unlock_irqrestore(&new_pool->lock, flags);
-
return new_pool;
}
/* Initialize pools for swf */
static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
{
- unsigned long flags = 0;
int rxq;
if (!port->pool_long) {
@@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
if (!port->pool_long)
return -ENOMEM;
- spin_lock_irqsave(&port->pool_long->lock, flags);
port->pool_long->port_map |= (1 << port->id);
- spin_unlock_irqrestore(&port->pool_long->lock, flags);
for (rxq = 0; rxq < rxq_number; rxq++)
mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
@@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
if (!port->pool_short)
return -ENOMEM;
- spin_lock_irqsave(&port->pool_short->lock, flags);
port->pool_short->port_map |= (1 << port->id);
- spin_unlock_irqrestore(&port->pool_short->lock, flags);
for (rxq = 0; rxq < rxq_number; rxq++)
mvpp2_rxq_short_pool_set(port, rxq,
@@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg)
mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
(MVPP2_CAUSE_MISC_SUM_MASK |
- MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
}
@@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
rxq->time_coal = usec;
}
-/* Set threshold for TX_DONE pkts coalescing */
-static void mvpp2_tx_done_pkts_coal_set(void *arg)
-{
- struct mvpp2_port *port = arg;
- int queue;
- u32 val;
-
- for (queue = 0; queue < txq_number; queue++) {
- struct mvpp2_tx_queue *txq = port->txqs[queue];
-
- val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
- MVPP2_TRANSMITTED_THRESH_MASK;
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
- }
-}
-
/* Free Tx queue skbuffs */
static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq,
@@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
int i;
for (i = 0; i < num; i++) {
- struct mvpp2_tx_desc *tx_desc = txq->descs +
- txq_pcpu->txq_get_index;
+ dma_addr_t buf_phys_addr =
+ txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
mvpp2_txq_inc_get(txq_pcpu);
@@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
if (!skb)
continue;
- dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr,
- tx_desc->data_size, DMA_TO_DEVICE);
+ dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
}
@@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
u32 cause)
{
- int queue = fls(cause >> 16) - 1;
+ int queue = fls(cause) - 1;
return port->txqs[queue];
}
@@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
netif_tx_wake_queue(nq);
}
+static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
+{
+ struct mvpp2_tx_queue *txq;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ unsigned int tx_todo = 0;
+
+ while (cause) {
+ txq = mvpp2_get_tx_queue(port, cause);
+ if (!txq)
+ break;
+
+ txq_pcpu = this_cpu_ptr(txq->pcpu);
+
+ if (txq_pcpu->count) {
+ mvpp2_txq_done(port, txq, txq_pcpu);
+ tx_todo += txq_pcpu->count;
+ }
+
+ cause &= ~(1 << txq->log_id);
+ }
+ return tx_todo;
+}
+
/* Rx/Tx queue initialization/cleanup methods */
/* Allocate and initialize descriptors for aggr TXQ */
@@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
sizeof(*txq_pcpu->tx_skb),
GFP_KERNEL);
- if (!txq_pcpu->tx_skb) {
- dma_free_coherent(port->dev->dev.parent,
- txq->size * MVPP2_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_phys);
- return -ENOMEM;
- }
+ if (!txq_pcpu->tx_skb)
+ goto error;
+
+ txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (!txq_pcpu->tx_buffs)
+ goto error;
txq_pcpu->count = 0;
txq_pcpu->reserved_num = 0;
@@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
}
return 0;
+
+error:
+ for_each_present_cpu(cpu) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ kfree(txq_pcpu->tx_skb);
+ kfree(txq_pcpu->tx_buffs);
+ }
+
+ dma_free_coherent(port->dev->dev.parent,
+ txq->size * MVPP2_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+
+ return -ENOMEM;
}
/* Free allocated TXQ resources */
@@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
kfree(txq_pcpu->tx_skb);
+ kfree(txq_pcpu->tx_buffs);
}
if (txq->descs)
@@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
goto err_cleanup;
}
- on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
return 0;
@@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev)
}
}
+static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
+{
+ ktime_t interval;
+
+ if (!port_pcpu->timer_scheduled) {
+ port_pcpu->timer_scheduled = true;
+ interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+ hrtimer_start(&port_pcpu->tx_done_timer, interval,
+ HRTIMER_MODE_REL_PINNED);
+ }
+}
+
+static void mvpp2_tx_proc_cb(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+ unsigned int tx_todo, cause;
+
+ if (!netif_running(dev))
+ return;
+ port_pcpu->timer_scheduled = false;
+
+ /* Process all the Tx queues */
+ cause = (1 << txq_number) - 1;
+ tx_todo = mvpp2_tx_done(port, cause);
+
+ /* Set the timer in case not all the packets were processed */
+ if (tx_todo)
+ mvpp2_timer_set(port_pcpu);
+}
+
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
+{
+ struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
+ struct mvpp2_port_pcpu,
+ tx_done_timer);
+
+ tasklet_schedule(&port_pcpu->tx_done_tasklet);
+
+ return HRTIMER_NORESTART;
+}
+
/* Main RX/TX processing routines */
/* Display more error info */
@@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
if (i == (skb_shinfo(skb)->nr_frags - 1)) {
/* Last descriptor */
tx_desc->command = MVPP2_TXD_L_DESC;
- mvpp2_txq_inc_put(txq_pcpu, skb);
+ mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
} else {
/* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0;
- mvpp2_txq_inc_put(txq_pcpu, NULL);
+ mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
}
}
@@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
/* First and Last descriptor */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, skb);
+ mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
} else {
/* First but not Last */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, NULL);
+ mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
/* Continue with other skb fragments */
if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
@@ -5255,6 +5323,17 @@ out:
dev_kfree_skb_any(skb);
}
+ /* Finalize TX processing */
+ if (txq_pcpu->count >= txq->done_pkts_coal)
+ mvpp2_txq_done(port, txq, txq_pcpu);
+
+ /* Set the timer in case not all frags were processed */
+ if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
+ struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+
+ mvpp2_timer_set(port_pcpu);
+ }
+
return NETDEV_TX_OK;
}
@@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause)
netdev_err(dev, "tx fifo underrun error\n");
}
-static void mvpp2_txq_done_percpu(void *arg)
+static int mvpp2_poll(struct napi_struct *napi, int budget)
{
- struct mvpp2_port *port = arg;
- u32 cause_rx_tx, cause_tx, cause_misc;
+ u32 cause_rx_tx, cause_rx, cause_misc;
+ int rx_done = 0;
+ struct mvpp2_port *port = netdev_priv(napi->dev);
/* Rx/Tx cause register
*
@@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg)
*/
cause_rx_tx = mvpp2_read(port->priv,
MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
- cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+ cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
if (cause_misc) {
@@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg)
cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
}
- /* Release TX descriptors */
- if (cause_tx) {
- struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
- struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
-
- if (txq_pcpu->count)
- mvpp2_txq_done(port, txq, txq_pcpu);
- }
-}
-
-static int mvpp2_poll(struct napi_struct *napi, int budget)
-{
- u32 cause_rx_tx, cause_rx;
- int rx_done = 0;
- struct mvpp2_port *port = netdev_priv(napi->dev);
-
- on_each_cpu(mvpp2_txq_done_percpu, port, 1);
-
- cause_rx_tx = mvpp2_read(port->priv,
- MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
/* Process RX packets */
@@ -5561,6 +5621,8 @@ err_cleanup_rxqs:
static int mvpp2_stop(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port_pcpu *port_pcpu;
+ int cpu;
mvpp2_stop_dev(port);
mvpp2_phy_disconnect(port);
@@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev)
on_each_cpu(mvpp2_interrupts_mask, port, 1);
free_irq(port->irq, port);
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ hrtimer_cancel(&port_pcpu->tx_done_timer);
+ port_pcpu->timer_scheduled = false;
+ tasklet_kill(&port_pcpu->tx_done_tasklet);
+ }
mvpp2_cleanup_rxqs(port);
mvpp2_cleanup_txqs(port);
@@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
txq->done_pkts_coal = c->tx_max_coalesced_frames;
}
- on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
return 0;
}
@@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
{
struct device_node *phy_node;
struct mvpp2_port *port;
+ struct mvpp2_port_pcpu *port_pcpu;
struct net_device *dev;
struct resource *res;
const char *dt_mac_addr;
@@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
int features;
int phy_mode;
int priv_common_regs_num = 2;
- int err, i;
+ int err, i, cpu;
dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
rxq_number);
@@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
mvpp2_port_power_up(port);
+ port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
+ if (!port->pcpu) {
+ err = -ENOMEM;
+ goto err_free_txq_pcpu;
+ }
+
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+ port_pcpu->timer_scheduled = false;
+
+ tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
+ (unsigned long)dev);
+ }
+
netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
features = NETIF_F_SG | NETIF_F_IP_CSUM;
dev->features = features | NETIF_F_RXCSUM;
@@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register netdev\n");
- goto err_free_txq_pcpu;
+ goto err_free_port_pcpu;
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
@@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
priv->port_list[id] = port;
return 0;
+err_free_port_pcpu:
+ free_percpu(port->pcpu);
err_free_txq_pcpu:
for (i = 0; i < txq_number; i++)
free_percpu(port->txqs[i]->pcpu);
@@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
int i;
unregister_netdev(port->dev);
+ free_percpu(port->pcpu);
free_percpu(port->stats);
for (i = 0; i < txq_number; i++)
free_percpu(port->txqs[i]->pcpu);
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index 8cf7563a8..52a6665b7 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_MELLANOX
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 529ef0594..0a3202047 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -49,6 +49,7 @@
#include "mlx4.h"
#include "fw.h"
#include "fw_qos.h"
+#include "mlx4_stats.h"
#define CMD_POLL_TOKEN 0xffff
#define INBOX_MASK 0xffffffffffffff00ULL
@@ -685,6 +686,7 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
{
struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
struct mlx4_cmd_context *context;
+ long ret_wait;
int err = 0;
down(&cmd->event_sem);
@@ -710,8 +712,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
if (err)
goto out_reset;
- if (!wait_for_completion_timeout(&context->done,
- msecs_to_jiffies(timeout))) {
+ if (op == MLX4_CMD_SENSE_PORT) {
+ ret_wait =
+ wait_for_completion_interruptible_timeout(&context->done,
+ msecs_to_jiffies(timeout));
+ if (ret_wait < 0) {
+ context->fw_status = 0;
+ context->out_param = 0;
+ context->result = 0;
+ }
+ } else {
+ ret_wait = (long)wait_for_completion_timeout(&context->done,
+ msecs_to_jiffies(timeout));
+ }
+ if (!ret_wait) {
mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
op);
if (op == MLX4_CMD_NOP) {
@@ -882,7 +896,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
{
struct ib_smp *smp = inbox->buf;
u32 index;
- u8 port;
+ u8 port, slave_port;
u8 opcode_modifier;
u16 *table;
int err;
@@ -894,7 +908,8 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
__be32 slave_cap_mask;
__be64 slave_node_guid;
- port = vhcr->in_modifier;
+ slave_port = vhcr->in_modifier;
+ port = mlx4_slave_convert_port(dev, slave, slave_port);
/* network-view bit is for driver use only, and should not be passed to FW */
opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
@@ -930,8 +945,9 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
/*get the slave specific caps:*/
/*do the command */
+ smp->attr_mod = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
- vhcr->in_modifier, opcode_modifier,
+ port, opcode_modifier,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
/* modify the response for slaves */
if (!err && slave != mlx4_master_func_num(dev)) {
@@ -975,7 +991,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
}
if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
- vhcr->in_modifier, opcode_modifier,
+ port, opcode_modifier,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (!err) {
slave_node_guid = mlx4_get_slave_node_guid(dev, slave);
@@ -2915,7 +2931,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->mac = mac;
- mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
+ mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
vf, port, s_info->mac);
return 0;
}
@@ -3164,6 +3180,92 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
+int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
+ struct mlx4_counter *counter_stats, int reset)
+{
+ struct mlx4_cmd_mailbox *mailbox = NULL;
+ struct mlx4_counter *tmp_counter;
+ int err;
+ u32 if_stat_in_mod;
+
+ if (!counter_stats)
+ return -EINVAL;
+
+ if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
+ return 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memset(mailbox->buf, 0, sizeof(struct mlx4_counter));
+ if_stat_in_mod = counter_index;
+ if (reset)
+ if_stat_in_mod |= MLX4_QUERY_IF_STAT_RESET;
+ err = mlx4_cmd_box(dev, 0, mailbox->dma,
+ if_stat_in_mod, 0,
+ MLX4_CMD_QUERY_IF_STAT,
+ MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n",
+ __func__, counter_index);
+ goto if_stat_out;
+ }
+ tmp_counter = (struct mlx4_counter *)mailbox->buf;
+ counter_stats->counter_mode = tmp_counter->counter_mode;
+ if (counter_stats->counter_mode == 0) {
+ counter_stats->rx_frames =
+ cpu_to_be64(be64_to_cpu(counter_stats->rx_frames) +
+ be64_to_cpu(tmp_counter->rx_frames));
+ counter_stats->tx_frames =
+ cpu_to_be64(be64_to_cpu(counter_stats->tx_frames) +
+ be64_to_cpu(tmp_counter->tx_frames));
+ counter_stats->rx_bytes =
+ cpu_to_be64(be64_to_cpu(counter_stats->rx_bytes) +
+ be64_to_cpu(tmp_counter->rx_bytes));
+ counter_stats->tx_bytes =
+ cpu_to_be64(be64_to_cpu(counter_stats->tx_bytes) +
+ be64_to_cpu(tmp_counter->tx_bytes));
+ }
+
+if_stat_out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_counter_stats);
+
+int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct mlx4_counter tmp_vf_stats;
+ int slave;
+ int err = 0;
+
+ if (!vf_stats)
+ return -EINVAL;
+
+ if (!mlx4_is_master(dev))
+ return -EPROTONOSUPPORT;
+
+ slave = mlx4_get_slave_indx(dev, vf_idx);
+ if (slave < 0)
+ return -EINVAL;
+
+ port = mlx4_slaves_closest_port(dev, slave, port);
+ err = mlx4_calc_vf_counters(dev, slave, port, &tmp_vf_stats);
+ if (!err && tmp_vf_stats.counter_mode == 0) {
+ vf_stats->rx_packets = be64_to_cpu(tmp_vf_stats.rx_frames);
+ vf_stats->tx_packets = be64_to_cpu(tmp_vf_stats.tx_frames);
+ vf_stats->rx_bytes = be64_to_cpu(tmp_vf_stats.rx_bytes);
+ vf_stats->tx_bytes = be64_to_cpu(tmp_vf_stats.tx_bytes);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_vf_stats);
+
int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -3197,6 +3299,12 @@ int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
int enabled)
{
struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
+ &priv->dev, slave);
+ int min_port = find_first_bit(actv_ports.ports,
+ priv->dev.caps.num_ports) + 1;
+ int max_port = min_port - 1 +
+ bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
if (slave == mlx4_master_func_num(dev))
return 0;
@@ -3206,6 +3314,11 @@ int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
enabled < 0 || enabled > 1)
return -EINVAL;
+ if (min_port == max_port && dev->caps.num_ports > 1) {
+ mlx4_info(dev, "SMI access disallowed for single ported VFs\n");
+ return -EPROTONOSUPPORT;
+ }
+
priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index e71f31387..3348e646d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -292,7 +292,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
u64 mtt_addr;
int err;
- if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
+ if (vector >= dev->caps.num_comp_vectors)
return -EINVAL;
cq->vector = vector;
@@ -319,7 +319,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq_context->flags |= cpu_to_be32(1 << 19);
cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
- cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
+ cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
mtt_addr = mlx4_mtt_addr(dev, mtt);
@@ -339,11 +339,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
init_completion(&cq->free);
cq->comp = mlx4_add_cq_to_tasklet;
cq->tasklet_ctx.priv =
- &priv->eq_table.eq[cq->vector].tasklet_ctx;
+ &priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx;
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
- cq->irq = priv->eq_table.eq[cq->vector].irq;
+ cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;
return 0;
err_radix:
@@ -368,7 +368,10 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
- synchronize_irq(priv->eq_table.eq[cq->vector].irq);
+ synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
+ if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
+ synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 22da4d0d0..63769df87 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -66,6 +66,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq->ring = ring;
cq->is_tx = mode;
+ cq->vector = mdev->dev->caps.num_comp_vectors;
/* Allocate HW buffers on provided NUMA node.
* dev->numa_node is used in mtt range allocation flow.
@@ -101,12 +102,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int err = 0;
char name[25];
int timestamp_en = 0;
- struct cpu_rmap *rmap =
-#ifdef CONFIG_RFS_ACCEL
- priv->dev->rx_cpu_rmap;
-#else
- NULL;
-#endif
+ bool assigned_eq = false;
cq->dev = mdev->pndev[priv->port];
cq->mcq.set_ci_db = cq->wqres.db.db;
@@ -116,23 +112,19 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
memset(cq->buf, 0, cq->buf_size);
if (cq->is_tx == RX) {
- if (mdev->dev->caps.comp_pool) {
- if (!cq->vector) {
- sprintf(name, "%s-%d", priv->dev->name,
- cq->ring);
- /* Set IRQ for specific name (per ring) */
- if (mlx4_assign_eq(mdev->dev, name, rmap,
- &cq->vector)) {
- cq->vector = (cq->ring + 1 + priv->port)
- % mdev->dev->caps.num_comp_vectors;
- mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
- name);
- }
-
+ if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
+ cq->vector)) {
+ cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);
+
+ err = mlx4_assign_eq(mdev->dev, priv->port,
+ &cq->vector);
+ if (err) {
+ mlx4_err(mdev, "Failed assigning an EQ to %s\n",
+ name);
+ goto free_eq;
}
- } else {
- cq->vector = (cq->ring + 1 + priv->port) %
- mdev->dev->caps.num_comp_vectors;
+
+ assigned_eq = true;
}
cq->irq_desc =
@@ -159,7 +151,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
&mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
cq->vector, 0, timestamp_en);
if (err)
- return err;
+ goto free_eq;
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event;
@@ -168,13 +160,6 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
NAPI_POLL_WEIGHT);
} else {
- struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
-
- err = irq_set_affinity_hint(cq->mcq.irq,
- ring->affinity_mask);
- if (err)
- mlx4_warn(mdev, "Failed setting affinity hint\n");
-
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
napi_hash_add(&cq->napi);
}
@@ -182,6 +167,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
napi_enable(&cq->napi);
return 0;
+
+free_eq:
+ if (assigned_eq)
+ mlx4_release_eq(mdev->dev, cq->vector);
+ cq->vector = mdev->dev->caps.num_comp_vectors;
+ return err;
}
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
@@ -191,9 +182,9 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
- if (priv->mdev->dev->caps.comp_pool && cq->vector) {
+ if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
+ cq->is_tx == RX)
mlx4_release_eq(priv->mdev->dev, cq->vector);
- }
cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
@@ -207,7 +198,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
if (!cq->is_tx) {
napi_hash_del(&cq->napi);
synchronize_rcu();
- irq_set_affinity_hint(cq->mcq.irq, NULL);
}
netif_napi_del(&cq->napi);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index a2ddf3d75..99ba1c50e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -119,6 +119,12 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
"rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
+ /* pf statistics */
+ "pf_rx_packets",
+ "pf_rx_bytes",
+ "pf_tx_packets",
+ "pf_tx_bytes",
+
/* priority flow control statistics rx */
"rx_pause_prio_0", "rx_pause_duration_prio_0",
"rx_pause_transition_prio_0",
@@ -368,6 +374,11 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
if (bitmap_iterator_test(&it))
data[index++] = ((unsigned long *)&priv->port_stats)[i];
+ for (i = 0; i < NUM_PF_STATS; i++, bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ data[index++] =
+ ((unsigned long *)&priv->pf_stats)[i];
+
for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX;
i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
@@ -448,6 +459,12 @@ static void mlx4_en_get_strings(struct net_device *dev,
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
+ for (i = 0; i < NUM_PF_STATS; i++, strings++,
+ bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[strings]);
+
for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index a5a0b8420..e0de2fd1c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1597,6 +1597,9 @@ int mlx4_en_start_port(struct net_device *dev)
}
mdev->mac_removed[priv->port] = 0;
+ priv->counter_index =
+ mlx4_get_default_counter_index(mdev->dev, priv->port);
+
err = mlx4_en_config_rss_steer(priv);
if (err) {
en_err(priv, "Failed configuring rss steering\n");
@@ -1755,6 +1758,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
/* Set port as not active */
priv->port_up = false;
+ priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
/* Promsicuous mode */
if (mdev->dev->caps.steering_mode ==
@@ -1891,6 +1895,7 @@ static void mlx4_en_clear_stats(struct net_device *dev)
sizeof(priv->rx_priority_flowstats));
memset(&priv->tx_priority_flowstats, 0,
sizeof(priv->tx_priority_flowstats));
+ memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
for (i = 0; i < priv->tx_ring_num; i++) {
priv->tx_ring[i]->bytes = 0;
@@ -1954,7 +1959,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
int i;
#ifdef CONFIG_RFS_ACCEL
- free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
priv->dev->rx_cpu_rmap = NULL;
#endif
@@ -2008,11 +2012,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
}
#ifdef CONFIG_RFS_ACCEL
- if (priv->mdev->dev->caps.comp_pool) {
- priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
- if (!priv->dev->rx_cpu_rmap)
- goto err;
- }
+ priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
#endif
return 0;
@@ -2288,6 +2288,15 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
}
+static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct mlx4_en_priv *en_priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = en_priv->mdev;
+
+ return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
+}
+
#define PORT_ID_BYTE_LEN 8
static int mlx4_en_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
@@ -2485,6 +2494,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_set_vf_rate = mlx4_en_set_vf_rate,
.ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
.ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
+ .ndo_get_vf_stats = mlx4_en_get_vf_stats,
.ndo_get_vf_config = mlx4_en_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx4_en_netpoll,
@@ -2682,7 +2692,7 @@ void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
u8 rx_ppp, u8 rx_pause,
u8 tx_ppp, u8 tx_pause)
{
- int last_i = NUM_MAIN_STATS + NUM_PORT_STATS;
+ int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
if (!mlx4_is_slave(dev) &&
(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
@@ -2744,6 +2754,11 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
last_i += NUM_PORT_STATS;
+ if (mlx4_is_master(dev))
+ bitmap_set(stats_bitmap->bitmap, last_i,
+ NUM_PF_STATS);
+ last_i += NUM_PF_STATS;
+
mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
rx_ppp, rx_pause,
tx_ppp, tx_pause);
@@ -2779,6 +2794,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv = netdev_priv(dev);
memset(priv, 0, sizeof(struct mlx4_en_priv));
+ priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 0a56f010c..ee99e6718 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -149,6 +149,7 @@ static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num)
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
{
+ struct mlx4_counter tmp_counter_stats;
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
struct mlx4_en_stat_out_flow_control_mbox *flowstats;
struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
@@ -156,7 +157,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
struct mlx4_cmd_mailbox *mailbox;
u64 in_mod = reset << 8 | port;
int err;
- int i;
+ int i, counter_index;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
@@ -202,6 +203,20 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.tso_packets += ring->tso_packets;
priv->port_stats.xmit_more += ring->xmit_more;
}
+ if (mlx4_is_master(mdev->dev)) {
+ stats->rx_packets = en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
+ &mlx4_en_stats->RTOT_prio_1,
+ NUM_PRIORITIES);
+ stats->tx_packets = en_stats_adder(&mlx4_en_stats->TTOT_prio_0,
+ &mlx4_en_stats->TTOT_prio_1,
+ NUM_PRIORITIES);
+ stats->rx_bytes = en_stats_adder(&mlx4_en_stats->ROCT_prio_0,
+ &mlx4_en_stats->ROCT_prio_1,
+ NUM_PRIORITIES);
+ stats->tx_bytes = en_stats_adder(&mlx4_en_stats->TOCT_prio_0,
+ &mlx4_en_stats->TOCT_prio_1,
+ NUM_PRIORITIES);
+ }
/* net device stats */
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
@@ -296,6 +311,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
spin_unlock_bh(&priv->stats_lock);
+ memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
+ counter_index = mlx4_get_default_counter_index(mdev->dev, port);
+ err = mlx4_get_counter_stats(mdev->dev, counter_index,
+ &tmp_counter_stats, reset);
+
/* 0xffs indicates invalid value */
memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
@@ -314,6 +334,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
spin_lock_bh(&priv->stats_lock);
+ if (tmp_counter_stats.counter_mode == 0) {
+ priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes);
+ priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes);
+ priv->pf_stats.rx_packets = be64_to_cpu(tmp_counter_stats.rx_frames);
+ priv->pf_stats.tx_packets = be64_to_cpu(tmp_counter_stats.tx_frames);
+ }
+
for (i = 0; i < MLX4_NUM_PRIORITIES; i++) {
priv->rx_priority_flowstats[i].rx_pause =
be64_to_cpu(flowstats[i].rx_pause);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 34f2fdf4f..e482fa1bb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -66,7 +66,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
context->pri_path.sched_queue |= user_prio << 3;
context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
}
- context->pri_path.counter_index = 0xff;
+ context->pri_path.counter_index = priv->counter_index;
context->cqn_send = cpu_to_be32(cqn);
context->cqn_recv = cpu_to_be32(cqn);
context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index eab4e080e..9c145dddd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -246,7 +246,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
{
- BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
return ring->prod == ring->cons;
}
@@ -337,15 +336,10 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
struct mlx4_dev *dev = mdev->dev;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
- if (!dev->caps.comp_pool)
- num_of_eqs = max_t(int, MIN_RX_RINGS,
- min_t(int,
- dev->caps.num_comp_vectors,
- DEF_RX_RINGS));
- else
- num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
- dev->caps.comp_pool/
- dev->caps.num_ports) - 1;
+ num_of_eqs = max_t(int, MIN_RX_RINGS,
+ min_t(int,
+ mlx4_get_eqs_per_port(mdev->dev, i),
+ DEF_RX_RINGS));
num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
min_t(int, num_of_eqs,
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 2619c9fbf..8e81e53c3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -145,7 +145,7 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
struct mlx4_eqe *eqe;
u8 slave;
- int i;
+ int i, phys_port, slave_port;
for (eqe = next_slave_event_eqe(slave_eq); eqe;
eqe = next_slave_event_eqe(slave_eq)) {
@@ -154,9 +154,20 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
/* All active slaves need to receive the event */
if (slave == ALL_SLAVES) {
for (i = 0; i <= dev->persist->num_vfs; i++) {
+ phys_port = 0;
+ if (eqe->type == MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT &&
+ eqe->subtype == MLX4_DEV_PMC_SUBTYPE_PORT_INFO) {
+ phys_port = eqe->event.port_mgmt_change.port;
+ slave_port = mlx4_phys_to_slave_port(dev, i, phys_port);
+ if (slave_port < 0) /* VF doesn't have this port */
+ continue;
+ eqe->event.port_mgmt_change.port = slave_port;
+ }
if (mlx4_GEN_EQE(dev, i, eqe))
mlx4_warn(dev, "Failed to generate event for slave %d\n",
i);
+ if (phys_port)
+ eqe->event.port_mgmt_change.port = phys_port;
}
} else {
if (mlx4_GEN_EQE(dev, slave, eqe))
@@ -210,6 +221,22 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
slave_event(dev, slave, eqe);
}
+#if defined(CONFIG_SMP)
+static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
+{
+ int hint_err;
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_eq *eq = &priv->eq_table.eq[vec];
+
+ if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
+ return;
+
+ hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
+ if (hint_err)
+ mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err);
+}
+#endif
+
int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
{
struct mlx4_eqe eqe;
@@ -224,7 +251,7 @@ int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
- eqe.event.port_mgmt_change.port = port;
+ eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
return mlx4_GEN_EQE(dev, slave, &eqe);
}
@@ -241,7 +268,7 @@ int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
- eqe.event.port_mgmt_change.port = port;
+ eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
return mlx4_GEN_EQE(dev, slave, &eqe);
}
@@ -251,6 +278,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
u8 port_subtype_change)
{
struct mlx4_eqe eqe;
+ u8 slave_port = mlx4_phys_to_slave_port(dev, slave, port);
/*don't send if we don't have the that slave */
if (dev->persist->num_vfs < slave)
@@ -259,7 +287,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
eqe.subtype = port_subtype_change;
- eqe.event.port_change.port = cpu_to_be32(port << 28);
+ eqe.event.port_change.port = cpu_to_be32(slave_port << 28);
mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__,
port_subtype_change, slave, port);
@@ -573,7 +601,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
continue;
mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
__func__, i, port);
- s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
+ s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
eqe->event.port_change.port =
cpu_to_be32(
@@ -589,6 +617,10 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) {
if (i == mlx4_master_func_num(dev))
continue;
+ eqe->event.port_change.port =
+ cpu_to_be32(
+ (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
+ | (mlx4_phys_to_slave_port(dev, i, port) << 28));
mlx4_slave_event(dev, i, eqe);
}
}
@@ -608,7 +640,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
continue;
if (i == mlx4_master_func_num(dev))
continue;
- s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
+ s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
eqe->event.port_change.port =
cpu_to_be32(
@@ -879,8 +911,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev)
* we need to map, take the difference of highest index and
* the lowest index we'll use and add 1.
*/
- return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
- dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
+ return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
+ dev->caps.reserved_eqs / 4 + 1;
}
static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
@@ -1069,32 +1101,21 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
static void mlx4_free_irqs(struct mlx4_dev *dev)
{
struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
- struct mlx4_priv *priv = mlx4_priv(dev);
- int i, vec;
+ int i;
if (eq_table->have_irq)
free_irq(dev->persist->pdev->irq, dev);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) {
+ free_cpumask_var(eq_table->eq[i].affinity_mask);
+#if defined(CONFIG_SMP)
+ irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
+#endif
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
eq_table->eq[i].have_irq = 0;
}
- for (i = 0; i < dev->caps.comp_pool; i++) {
- /*
- * Freeing the assigned irq's
- * all bits should be 0, but we need to validate
- */
- if (priv->msix_ctl.pool_bm & 1ULL << i) {
- /* NO need protecting*/
- vec = dev->caps.num_comp_vectors + 1 + i;
- free_irq(priv->eq_table.eq[vec].irq,
- &priv->eq_table.eq[vec]);
- }
- }
-
-
kfree(eq_table->irq_names);
}
@@ -1175,76 +1196,73 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
}
priv->eq_table.irq_names =
- kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
- dev->caps.comp_pool),
+ kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
GFP_KERNEL);
if (!priv->eq_table.irq_names) {
err = -ENOMEM;
- goto err_out_bitmap;
+ goto err_out_clr_int;
}
- for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
- err = mlx4_create_eq(dev, dev->caps.num_cqs -
- dev->caps.reserved_cqs +
- MLX4_NUM_SPARE_EQE,
- (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
- &priv->eq_table.eq[i]);
- if (err) {
- --i;
- goto err_out_unmap;
- }
- }
-
- err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
- (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
- &priv->eq_table.eq[dev->caps.num_comp_vectors]);
- if (err)
- goto err_out_comp;
-
- /*if additional completion vectors poolsize is 0 this loop will not run*/
- for (i = dev->caps.num_comp_vectors + 1;
- i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
+ if (i == MLX4_EQ_ASYNC) {
+ err = mlx4_create_eq(dev,
+ MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
+ 0, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
+ } else {
+ struct mlx4_eq *eq = &priv->eq_table.eq[i];
+#ifdef CONFIG_RFS_ACCEL
+ int port = find_first_bit(eq->actv_ports.ports,
+ dev->caps.num_ports) + 1;
+
+ if (port <= dev->caps.num_ports) {
+ struct mlx4_port_info *info =
+ &mlx4_priv(dev)->port[port];
+
+ if (!info->rmap) {
+ info->rmap = alloc_irq_cpu_rmap(
+ mlx4_get_eqs_per_port(dev, port));
+ if (!info->rmap) {
+ mlx4_warn(dev, "Failed to allocate cpu rmap\n");
+ err = -ENOMEM;
+ goto err_out_unmap;
+ }
+ }
- err = mlx4_create_eq(dev, dev->caps.num_cqs -
- dev->caps.reserved_cqs +
- MLX4_NUM_SPARE_EQE,
- (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
- &priv->eq_table.eq[i]);
- if (err) {
- --i;
- goto err_out_unmap;
+ err = irq_cpu_rmap_add(
+ info->rmap, eq->irq);
+ if (err)
+ mlx4_warn(dev, "Failed adding irq rmap\n");
+ }
+#endif
+ err = mlx4_create_eq(dev, dev->caps.num_cqs -
+ dev->caps.reserved_cqs +
+ MLX4_NUM_SPARE_EQE,
+ (dev->flags & MLX4_FLAG_MSI_X) ?
+ i + 1 - !!(i > MLX4_EQ_ASYNC) : 0,
+ eq);
}
+ if (err)
+ goto err_out_unmap;
}
-
if (dev->flags & MLX4_FLAG_MSI_X) {
const char *eq_name;
- for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
- if (i < dev->caps.num_comp_vectors) {
- snprintf(priv->eq_table.irq_names +
- i * MLX4_IRQNAME_SIZE,
- MLX4_IRQNAME_SIZE,
- "mlx4-comp-%d@pci:%s", i,
- pci_name(dev->persist->pdev));
- } else {
- snprintf(priv->eq_table.irq_names +
- i * MLX4_IRQNAME_SIZE,
- MLX4_IRQNAME_SIZE,
- "mlx4-async@pci:%s",
- pci_name(dev->persist->pdev));
- }
+ snprintf(priv->eq_table.irq_names +
+ MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE,
+ "mlx4-async@pci:%s",
+ pci_name(dev->persist->pdev));
+ eq_name = priv->eq_table.irq_names +
+ MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE;
- eq_name = priv->eq_table.irq_names +
- i * MLX4_IRQNAME_SIZE;
- err = request_irq(priv->eq_table.eq[i].irq,
- mlx4_msi_x_interrupt, 0, eq_name,
- priv->eq_table.eq + i);
- if (err)
- goto err_out_async;
+ err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq,
+ mlx4_msi_x_interrupt, 0, eq_name,
+ priv->eq_table.eq + MLX4_EQ_ASYNC);
+ if (err)
+ goto err_out_unmap;
- priv->eq_table.eq[i].have_irq = 1;
- }
+ priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1;
} else {
snprintf(priv->eq_table.irq_names,
MLX4_IRQNAME_SIZE,
@@ -1253,36 +1271,38 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
err = request_irq(dev->persist->pdev->irq, mlx4_interrupt,
IRQF_SHARED, priv->eq_table.irq_names, dev);
if (err)
- goto err_out_async;
+ goto err_out_unmap;
priv->eq_table.have_irq = 1;
}
err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
if (err)
mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
- for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
- eq_set_ci(&priv->eq_table.eq[i], 1);
+ /* arm ASYNC eq */
+ eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1);
return 0;
-err_out_async:
- mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
-
-err_out_comp:
- i = dev->caps.num_comp_vectors - 1;
-
err_out_unmap:
- while (i >= 0) {
- mlx4_free_eq(dev, &priv->eq_table.eq[i]);
- --i;
+ while (i >= 0)
+ mlx4_free_eq(dev, &priv->eq_table.eq[i--]);
+#ifdef CONFIG_RFS_ACCEL
+ for (i = 1; i <= dev->caps.num_ports; i++) {
+ if (mlx4_priv(dev)->port[i].rmap) {
+ free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
+ mlx4_priv(dev)->port[i].rmap = NULL;
+ }
}
+#endif
+ mlx4_free_irqs(dev);
+
+err_out_clr_int:
if (!mlx4_is_slave(dev))
mlx4_unmap_clr_int(dev);
- mlx4_free_irqs(dev);
err_out_bitmap:
mlx4_unmap_uar(dev);
@@ -1300,11 +1320,19 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
int i;
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
+#ifdef CONFIG_RFS_ACCEL
+ for (i = 1; i <= dev->caps.num_ports; i++) {
+ if (mlx4_priv(dev)->port[i].rmap) {
+ free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
+ mlx4_priv(dev)->port[i].rmap = NULL;
+ }
+ }
+#endif
mlx4_free_irqs(dev);
- for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
if (!mlx4_is_slave(dev))
@@ -1355,87 +1383,169 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
/* Return to default */
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
return err;
}
EXPORT_SYMBOL(mlx4_test_interrupts);
-int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
- int *vector)
+bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ vector = MLX4_CQ_TO_EQ_VECTOR(vector);
+ if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) ||
+ (vector == MLX4_EQ_ASYNC))
+ return false;
+
+ return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports);
+}
+EXPORT_SYMBOL(mlx4_is_eq_vector_valid);
+u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port)
+{
struct mlx4_priv *priv = mlx4_priv(dev);
- int vec = 0, err = 0, i;
+ unsigned int i;
+ unsigned int sum = 0;
+
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; i++)
+ sum += !!test_bit(port - 1,
+ priv->eq_table.eq[i].actv_ports.ports);
+
+ return sum;
+}
+EXPORT_SYMBOL(mlx4_get_eqs_per_port);
+
+int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ vector = MLX4_CQ_TO_EQ_VECTOR(vector);
+ if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1))
+ return -EINVAL;
+
+ return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports,
+ dev->caps.num_ports) > 1);
+}
+EXPORT_SYMBOL(mlx4_is_eq_shared);
+
+struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port)
+{
+ return mlx4_priv(dev)->port[port].rmap;
+}
+EXPORT_SYMBOL(mlx4_get_cpu_rmap);
+
+int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err = 0, i = 0;
+ u32 min_ref_count_val = (u32)-1;
+ int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector);
+ int *prequested_vector = NULL;
+
mutex_lock(&priv->msix_ctl.pool_lock);
- for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
- if (~priv->msix_ctl.pool_bm & 1ULL << i) {
- priv->msix_ctl.pool_bm |= 1ULL << i;
- vec = dev->caps.num_comp_vectors + 1 + i;
- snprintf(priv->eq_table.irq_names +
- vec * MLX4_IRQNAME_SIZE,
- MLX4_IRQNAME_SIZE, "%s", name);
-#ifdef CONFIG_RFS_ACCEL
- if (rmap) {
- err = irq_cpu_rmap_add(rmap,
- priv->eq_table.eq[vec].irq);
- if (err)
- mlx4_warn(dev, "Failed adding irq rmap\n");
+ if (requested_vector < (dev->caps.num_comp_vectors + 1) &&
+ (requested_vector >= 0) &&
+ (requested_vector != MLX4_EQ_ASYNC)) {
+ if (test_bit(port - 1,
+ priv->eq_table.eq[requested_vector].actv_ports.ports)) {
+ prequested_vector = &requested_vector;
+ } else {
+ struct mlx4_eq *eq;
+
+ for (i = 1; i < port;
+ requested_vector += mlx4_get_eqs_per_port(dev, i++))
+ ;
+
+ eq = &priv->eq_table.eq[requested_vector];
+ if (requested_vector < dev->caps.num_comp_vectors + 1 &&
+ test_bit(port - 1, eq->actv_ports.ports)) {
+ prequested_vector = &requested_vector;
}
-#endif
- err = request_irq(priv->eq_table.eq[vec].irq,
- mlx4_msi_x_interrupt, 0,
- &priv->eq_table.irq_names[vec<<5],
- priv->eq_table.eq + vec);
- if (err) {
- /*zero out bit by fliping it*/
- priv->msix_ctl.pool_bm ^= 1 << i;
- vec = 0;
- continue;
- /*we dont want to break here*/
+ }
+ }
+
+ if (!prequested_vector) {
+ requested_vector = -1;
+ for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1;
+ i++) {
+ struct mlx4_eq *eq = &priv->eq_table.eq[i];
+
+ if (min_ref_count_val > eq->ref_count &&
+ test_bit(port - 1, eq->actv_ports.ports)) {
+ min_ref_count_val = eq->ref_count;
+ requested_vector = i;
}
+ }
+
+ if (requested_vector < 0) {
+ err = -ENOSPC;
+ goto err_unlock;
+ }
+
+ prequested_vector = &requested_vector;
+ }
+
+ if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) &&
+ dev->flags & MLX4_FLAG_MSI_X) {
+ set_bit(*prequested_vector, priv->msix_ctl.pool_bm);
+ snprintf(priv->eq_table.irq_names +
+ *prequested_vector * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE, "mlx4-%d@%s",
+ *prequested_vector, dev_name(&dev->persist->pdev->dev));
- eq_set_ci(&priv->eq_table.eq[vec], 1);
+ err = request_irq(priv->eq_table.eq[*prequested_vector].irq,
+ mlx4_msi_x_interrupt, 0,
+ &priv->eq_table.irq_names[*prequested_vector << 5],
+ priv->eq_table.eq + *prequested_vector);
+
+ if (err) {
+ clear_bit(*prequested_vector, priv->msix_ctl.pool_bm);
+ *prequested_vector = -1;
+ } else {
+#if defined(CONFIG_SMP)
+ mlx4_set_eq_affinity_hint(priv, *prequested_vector);
+#endif
+ eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1);
+ priv->eq_table.eq[*prequested_vector].have_irq = 1;
}
}
+
+ if (!err && *prequested_vector >= 0)
+ priv->eq_table.eq[*prequested_vector].ref_count++;
+
+err_unlock:
mutex_unlock(&priv->msix_ctl.pool_lock);
- if (vec) {
- *vector = vec;
- } else {
+ if (!err && *prequested_vector >= 0)
+ *vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector);
+ else
*vector = 0;
- err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
- }
+
return err;
}
EXPORT_SYMBOL(mlx4_assign_eq);
-int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- return priv->eq_table.eq[vec].irq;
+ return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq;
}
EXPORT_SYMBOL(mlx4_eq_get_irq);
void mlx4_release_eq(struct mlx4_dev *dev, int vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- /*bm index*/
- int i = vec - dev->caps.num_comp_vectors - 1;
-
- if (likely(i >= 0)) {
- /*sanity check , making sure were not trying to free irq's
- Belonging to a legacy EQ*/
- mutex_lock(&priv->msix_ctl.pool_lock);
- if (priv->msix_ctl.pool_bm & 1ULL << i) {
- free_irq(priv->eq_table.eq[vec].irq,
- &priv->eq_table.eq[vec]);
- priv->msix_ctl.pool_bm &= ~(1ULL << i);
- }
- mutex_unlock(&priv->msix_ctl.pool_lock);
- }
+ int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec);
+ mutex_lock(&priv->msix_ctl.pool_lock);
+ priv->eq_table.eq[eq_vec].ref_count--;
+
+ /* once we allocated EQ, we don't release it because it might be binded
+ * to cpu_rmap.
+ */
+ mutex_unlock(&priv->msix_ctl.pool_lock);
}
EXPORT_SYMBOL(mlx4_release_eq);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index ced5ecab5..29c2a017a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -479,7 +479,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
- dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
+ if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) &&
+ (port_type_array[0] == MLX4_PORT_TYPE_IB) &&
+ (port_type_array[1] == MLX4_PORT_TYPE_ETH)) {
+ mlx4_warn(dev,
+ "Granular QoS per VF not supported with IB/Eth configuration\n");
+ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP;
+ }
+
+ dev->caps.max_counters = dev_cap->max_counters;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
@@ -1674,6 +1682,25 @@ static int map_internal_clock(struct mlx4_dev *dev)
return 0;
}
+int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
+ struct mlx4_clock_params *params)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (mlx4_is_slave(dev))
+ return -ENOTSUPP;
+
+ if (!params)
+ return -EINVAL;
+
+ params->bar = priv->fw.clock_bar;
+ params->offset = priv->fw.clock_offset;
+ params->size = MLX4_CLOCK_SIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params);
+
static void unmap_internal_clock(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2193,20 +2220,78 @@ err_free_icm:
static int mlx4_init_counters_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int nent;
+ int nent_pow2;
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
return -ENOENT;
- nent = dev->caps.max_counters;
- return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
+ if (!dev->caps.max_counters)
+ return -ENOSPC;
+
+ nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
+ /* reserve last counter index for sink counter */
+ return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2,
+ nent_pow2 - 1, 0,
+ nent_pow2 - dev->caps.max_counters + 1);
}
static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
{
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
+ return;
+
+ if (!dev->caps.max_counters)
+ return;
+
mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
}
+static void mlx4_cleanup_default_counters(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int port;
+
+ for (port = 0; port < dev->caps.num_ports; port++)
+ if (priv->def_counter[port] != -1)
+ mlx4_counter_free(dev, priv->def_counter[port]);
+}
+
+static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int port, err = 0;
+ u32 idx;
+
+ for (port = 0; port < dev->caps.num_ports; port++)
+ priv->def_counter[port] = -1;
+
+ for (port = 0; port < dev->caps.num_ports; port++) {
+ err = mlx4_counter_alloc(dev, &idx);
+
+ if (!err || err == -ENOSPC) {
+ priv->def_counter[port] = idx;
+ } else if (err == -ENOENT) {
+ err = 0;
+ continue;
+ } else if (mlx4_is_slave(dev) && err == -EINVAL) {
+ priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
+ mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
+ MLX4_SINK_COUNTER_INDEX(dev));
+ err = 0;
+ } else {
+ mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
+ __func__, port + 1, err);
+ mlx4_cleanup_default_counters(dev);
+ return err;
+ }
+
+ mlx4_dbg(dev, "%s: default counter index %d for port %d\n",
+ __func__, priv->def_counter[port], port + 1);
+ }
+
+ return err;
+}
+
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2215,8 +2300,10 @@ int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
return -ENOENT;
*idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
- if (*idx == -1)
- return -ENOMEM;
+ if (*idx == -1) {
+ *idx = MLX4_SINK_COUNTER_INDEX(dev);
+ return -ENOSPC;
+ }
return 0;
}
@@ -2239,8 +2326,35 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
}
EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
+static int __mlx4_clear_if_stat(struct mlx4_dev *dev,
+ u8 counter_index)
+{
+ struct mlx4_cmd_mailbox *if_stat_mailbox;
+ int err;
+ u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET;
+
+ if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(if_stat_mailbox))
+ return PTR_ERR(if_stat_mailbox);
+
+ err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0,
+ MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, if_stat_mailbox);
+ return err;
+}
+
void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
{
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
+ return;
+
+ if (idx == MLX4_SINK_COUNTER_INDEX(dev))
+ return;
+
+ __mlx4_clear_if_stat(dev, idx);
+
mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
return;
}
@@ -2260,6 +2374,14 @@ void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
}
EXPORT_SYMBOL_GPL(mlx4_counter_free);
+int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return priv->def_counter[port - 1];
+}
+EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index);
+
void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2364,11 +2486,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
if (err) {
if (dev->flags & MLX4_FLAG_MSI_X) {
mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
- priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
mlx4_warn(dev, "Trying again without MSI-X\n");
} else {
mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
- priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
}
@@ -2395,10 +2517,18 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_srq_table_free;
}
- err = mlx4_init_counters_table(dev);
- if (err && err != -ENOENT) {
- mlx4_err(dev, "Failed to initialize counters table, aborting\n");
- goto err_qp_table_free;
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_init_counters_table(dev);
+ if (err && err != -ENOENT) {
+ mlx4_err(dev, "Failed to initialize counters table, aborting\n");
+ goto err_qp_table_free;
+ }
+ }
+
+ err = mlx4_allocate_default_counters(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to allocate default counters, aborting\n");
+ goto err_counters_table_free;
}
if (!mlx4_is_slave(dev)) {
@@ -2432,15 +2562,19 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
if (err) {
mlx4_err(dev, "Failed to set port %d, aborting\n",
port);
- goto err_counters_table_free;
+ goto err_default_countes_free;
}
}
}
return 0;
+err_default_countes_free:
+ mlx4_cleanup_default_counters(dev);
+
err_counters_table_free:
- mlx4_cleanup_counters_table(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_cleanup_counters_table(dev);
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
@@ -2481,14 +2615,45 @@ err_uar_table_free:
return err;
}
+static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
+{
+ int requested_cpu = 0;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_eq *eq;
+ int off = 0;
+ int i;
+
+ if (eqn > dev->caps.num_comp_vectors)
+ return -EINVAL;
+
+ for (i = 1; i < port; i++)
+ off += mlx4_get_eqs_per_port(dev, i);
+
+ requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
+
+ /* Meaning EQs are shared, and this call comes from the second port */
+ if (requested_cpu < 0)
+ return 0;
+
+ eq = &priv->eq_table.eq[eqn];
+
+ if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_set_cpu(requested_cpu, eq->affinity_mask);
+
+ return 0;
+}
+
static void mlx4_enable_msi_x(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries;
int i;
+ int port = 0;
if (msi_x) {
- int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ;
+ int nreq = dev->caps.num_ports * num_online_cpus() + 1;
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
nreq);
@@ -2503,20 +2668,55 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
nreq);
- if (nreq < 0) {
+ if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
kfree(entries);
goto no_msi;
- } else if (nreq < MSIX_LEGACY_SZ +
- dev->caps.num_ports * MIN_MSIX_P_PORT) {
- /*Working in legacy mode , all EQ's shared*/
- dev->caps.comp_pool = 0;
- dev->caps.num_comp_vectors = nreq - 1;
- } else {
- dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
- dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
}
- for (i = 0; i < nreq; ++i)
- priv->eq_table.eq[i].irq = entries[i].vector;
+ /* 1 is reserved for events (asyncrounous EQ) */
+ dev->caps.num_comp_vectors = nreq - 1;
+
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
+ bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
+ dev->caps.num_ports);
+
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
+ if (i == MLX4_EQ_ASYNC)
+ continue;
+
+ priv->eq_table.eq[i].irq =
+ entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
+
+ if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
+ bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
+ dev->caps.num_ports);
+ /* We don't set affinity hint when there
+ * aren't enough EQs
+ */
+ } else {
+ set_bit(port,
+ priv->eq_table.eq[i].actv_ports.ports);
+ if (mlx4_init_affinity_hint(dev, port + 1, i))
+ mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
+ i);
+ }
+ /* We divide the Eqs evenly between the two ports.
+ * (dev->caps.num_comp_vectors / dev->caps.num_ports)
+ * refers to the number of Eqs per port
+ * (i.e eqs_per_port). Theoretically, we would like to
+ * write something like (i + 1) % eqs_per_port == 0.
+ * However, since there's an asynchronous Eq, we have
+ * to skip over it by comparing this condition to
+ * !!((i + 1) > MLX4_EQ_ASYNC).
+ */
+ if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
+ ((i + 1) %
+ (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
+ !!((i + 1) > MLX4_EQ_ASYNC))
+ /* If dev->caps.num_comp_vectors < dev->caps.num_ports,
+ * everything is shared anyway.
+ */
+ port++;
+ }
dev->flags |= MLX4_FLAG_MSI_X;
@@ -2526,10 +2726,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
no_msi:
dev->caps.num_comp_vectors = 1;
- dev->caps.comp_pool = 0;
- for (i = 0; i < 2; ++i)
+ BUG_ON(MLX4_EQ_ASYNC >= 2);
+ for (i = 0; i < 2; ++i) {
priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
+ if (i != MLX4_EQ_ASYNC) {
+ bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
+ dev->caps.num_ports);
+ }
+ }
}
static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
@@ -2594,6 +2799,10 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(info->rmap);
+ info->rmap = NULL;
+#endif
}
static int mlx4_init_steering(struct mlx4_dev *dev)
@@ -2749,6 +2958,7 @@ disable_sriov:
free_mem:
dev->persist->num_vfs = 0;
kfree(dev->dev_vfs);
+ dev->dev_vfs = NULL;
return dev_flags & ~MLX4_FLAG_MASTER;
}
@@ -2900,6 +3110,7 @@ slave_start:
existing_vfs,
reset_flow);
+ mlx4_close_fw(dev);
mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
dev->flags = dev_flags;
if (!SRIOV_VALID_STATE(dev->flags)) {
@@ -2988,18 +3199,6 @@ slave_start:
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
if (mlx4_is_master(dev)) {
- int ib_ports = 0;
-
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
- ib_ports++;
-
- if (ib_ports &&
- (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
- mlx4_err(dev,
- "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
- err = -EINVAL;
- goto err_close;
- }
if (dev->caps.num_ports < 2 &&
num_vfs_argc > 1) {
err = -EINVAL;
@@ -3036,7 +3235,7 @@ slave_start:
if (err)
goto err_master_mfunc;
- priv->msix_ctl.pool_bm = 0;
+ bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
mutex_init(&priv->msix_ctl.pool_lock);
mlx4_enable_msi_x(dev);
@@ -3058,7 +3257,6 @@ slave_start:
!mlx4_is_mfunc(dev)) {
dev->flags &= ~MLX4_FLAG_MSI_X;
dev->caps.num_comp_vectors = 1;
- dev->caps.comp_pool = 0;
pci_disable_msix(pdev);
err = mlx4_setup_hca(dev);
}
@@ -3109,7 +3307,9 @@ err_port:
for (--port; port >= 1; --port)
mlx4_cleanup_port_info(&priv->port[port]);
- mlx4_cleanup_counters_table(dev);
+ mlx4_cleanup_default_counters(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_cleanup_counters_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);
@@ -3407,7 +3607,9 @@ static void mlx4_unload_one(struct pci_dev *pdev)
mlx4_free_resource_tracker(dev,
RES_TR_FREE_SLAVES_ONLY);
- mlx4_cleanup_counters_table(dev);
+ mlx4_cleanup_default_counters(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_cleanup_counters_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 502d3dd2c..a092c5c34 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -65,6 +65,8 @@
#define INIT_HCA_TPT_MW_ENABLE (1 << 7)
+#define MLX4_QUERY_IF_STAT_RESET BIT(31)
+
enum {
MLX4_HCR_BASE = 0x80680,
MLX4_HCR_SIZE = 0x0001c,
@@ -287,6 +289,12 @@ struct mlx4_icm_table {
#define MLX4_CQE_SIZE_MASK_STRIDE 0x3
#define MLX4_EQE_SIZE_MASK_STRIDE 0x30
+#define MLX4_EQ_ASYNC 0
+#define MLX4_EQ_TO_CQ_VECTOR(vector) ((vector) - \
+ !!((int)(vector) >= MLX4_EQ_ASYNC))
+#define MLX4_CQ_TO_EQ_VECTOR(vector) ((vector) + \
+ !!((int)(vector) >= MLX4_EQ_ASYNC))
+
/*
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
*/
@@ -391,6 +399,9 @@ struct mlx4_eq {
struct mlx4_buf_list *page_list;
struct mlx4_mtt mtt;
struct mlx4_eq_tasklet tasklet_ctx;
+ struct mlx4_active_ports actv_ports;
+ u32 ref_count;
+ cpumask_var_t affinity_mask;
};
struct mlx4_slave_eqe {
@@ -808,6 +819,7 @@ struct mlx4_port_info {
struct mlx4_vlan_table vlan_table;
struct mlx4_roce_gid_table gid_table;
int base_qpn;
+ struct cpu_rmap *rmap;
};
struct mlx4_sense {
@@ -818,7 +830,7 @@ struct mlx4_sense {
};
struct mlx4_msix_ctl {
- u64 pool_bm;
+ DECLARE_BITMAP(pool_bm, MAX_MSIX);
struct mutex pool_lock;
};
@@ -864,6 +876,7 @@ struct mlx4_priv {
struct mlx4_qp_table qp_table;
struct mlx4_mcg_table mcg_table;
struct mlx4_bitmap counters_bitmap;
+ int def_counter[MLX4_MAX_PORTS];
struct mlx4_catas_err catas_err;
@@ -997,6 +1010,8 @@ int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
+int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
+ struct mlx4_counter *data);
int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 909fcf803..666d1669e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -339,7 +339,7 @@ struct mlx4_en_cq {
struct napi_struct napi;
int size;
int buf_size;
- unsigned vector;
+ int vector;
enum cq_type is_tx;
u16 moder_time;
u16 moder_cnt;
@@ -567,6 +567,7 @@ struct mlx4_en_priv {
#endif
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
+ struct mlx4_en_counter_stats pf_stats;
struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES];
struct mlx4_en_flow_stats_tx tx_priority_flowstats[MLX4_NUM_PRIORITIES];
struct mlx4_en_flow_stats_rx rx_flowstats;
@@ -582,6 +583,7 @@ struct mlx4_en_priv {
struct device *ddev;
struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
struct hwtstamp_config hwtstamp_config;
+ u32 counter_index;
#ifdef CONFIG_MLX4_EN_DCB
struct ieee_ets ets;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 00555832a..7fd466c0b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -23,6 +23,14 @@ struct mlx4_en_pkt_stats {
#define NUM_PKT_STATS 43
};
+struct mlx4_en_counter_stats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+#define NUM_PF_STATS 4
+};
+
struct mlx4_en_port_stats {
unsigned long tso_packets;
unsigned long xmit_more;
@@ -99,7 +107,7 @@ enum {
};
#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \
- NUM_FLOW_STATS + NUM_PERF_STATS)
+ NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS)
#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \
sizeof(((struct net_device_stats *)0)->n))
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 2bf437aaf..bae8b22ed 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -82,7 +82,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
u64 total_size = 0;
struct mlx4_resource *profile;
- struct mlx4_resource tmp;
struct sysinfo si;
int i, j;
@@ -149,11 +148,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
*/
for (i = MLX4_RES_NUM; i > 0; --i)
for (j = 1; j < i; ++j) {
- if (profile[j].size > profile[j - 1].size) {
- tmp = profile[j];
- profile[j] = profile[j - 1];
- profile[j - 1] = tmp;
- }
+ if (profile[j].size > profile[j - 1].size)
+ swap(profile[j], profile[j - 1]);
}
for (i = 0; i < MLX4_RES_NUM; ++i) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index b75214a80..20268634a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -749,7 +749,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
{
int sort[MLX4_NUM_QP_REGION];
- int i, j, tmp;
+ int i, j;
int last_base = dev->caps.num_qps;
for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
@@ -758,11 +758,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) {
for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) {
if (dev->caps.reserved_qps_cnt[sort[j]] >
- dev->caps.reserved_qps_cnt[sort[j - 1]]) {
- tmp = sort[j];
- sort[j] = sort[j - 1];
- sort[j - 1] = tmp;
- }
+ dev->caps.reserved_qps_cnt[sort[j - 1]])
+ swap(sort[j], sort[j - 1]);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index bafe2180c..731423ca5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -46,8 +46,11 @@
#include "mlx4.h"
#include "fw.h"
+#include "mlx4_stats.h"
#define MLX4_MAC_VALID (1ull << 63)
+#define MLX4_PF_COUNTERS_PER_PORT 2
+#define MLX4_VF_COUNTERS_PER_PORT 1
struct mac_res {
struct list_head list;
@@ -459,11 +462,21 @@ void mlx4_init_quotas(struct mlx4_dev *dev)
dev->quotas.mpt =
priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
}
+
+static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
+{
+ /* reduce the sink counter */
+ return (dev->caps.max_counters - 1 -
+ (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
+ / MLX4_MAX_PORTS;
+}
+
int mlx4_init_resource_tracker(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i, j;
int t;
+ int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
priv->mfunc.master.res_tracker.slave_list =
kzalloc(dev->num_slaves * sizeof(struct slave_list),
@@ -499,6 +512,9 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
res_alloc->allocated = kzalloc((dev->persist->
num_vfs + 1) *
sizeof(int), GFP_KERNEL);
+ /* Reduce the sink counter */
+ if (i == RES_COUNTER)
+ res_alloc->res_free = dev->caps.max_counters - 1;
if (!res_alloc->quota || !res_alloc->guaranteed ||
!res_alloc->allocated)
@@ -577,9 +593,17 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
break;
case RES_COUNTER:
res_alloc->quota[t] = dev->caps.max_counters;
- res_alloc->guaranteed[t] = 0;
if (t == mlx4_master_func_num(dev))
- res_alloc->res_free = res_alloc->quota[t];
+ res_alloc->guaranteed[t] =
+ MLX4_PF_COUNTERS_PER_PORT *
+ MLX4_MAX_PORTS;
+ else if (t <= max_vfs_guarantee_counter)
+ res_alloc->guaranteed[t] =
+ MLX4_VF_COUNTERS_PER_PORT *
+ MLX4_MAX_PORTS;
+ else
+ res_alloc->guaranteed[t] = 0;
+ res_alloc->res_free -= res_alloc->guaranteed[t];
break;
default:
break;
@@ -700,6 +724,9 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
}
}
+static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
+ u8 slave, int port);
+
static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_cmd_mailbox *inbox,
u8 slave, u32 qpn)
@@ -715,6 +742,10 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
+ err = handle_counter(dev, qpc, slave, port);
+ if (err)
+ goto out;
+
if (MLX4_VGT != vp_oper->state.default_vlan) {
/* the reserved QPs (special, proxy, tunnel)
* do not operate over vlans
@@ -859,6 +890,83 @@ static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
spin_unlock_irq(mlx4_tlock(dev));
}
+static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param, int port);
+
+static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
+ int counter_index)
+{
+ struct res_common *r;
+ struct res_counter *counter;
+ int ret = 0;
+
+ if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
+ return ret;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = find_res(dev, counter_index, RES_COUNTER);
+ if (!r || r->owner != slave)
+ ret = -EINVAL;
+ counter = container_of(r, struct res_counter, com);
+ if (!counter->port)
+ counter->port = port;
+
+ spin_unlock_irq(mlx4_tlock(dev));
+ return ret;
+}
+
+static int handle_unexisting_counter(struct mlx4_dev *dev,
+ struct mlx4_qp_context *qpc, u8 slave,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *tmp;
+ struct res_counter *counter;
+ u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry(tmp,
+ &tracker->slave_list[slave].res_list[RES_COUNTER],
+ list) {
+ counter = container_of(tmp, struct res_counter, com);
+ if (port == counter->port) {
+ qpc->pri_path.counter_index = counter->com.res_id;
+ spin_unlock_irq(mlx4_tlock(dev));
+ return 0;
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ /* No existing counter, need to allocate a new counter */
+ err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
+ port);
+ if (err == -ENOENT) {
+ err = 0;
+ } else if (err && err != -ENOSPC) {
+ mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
+ __func__, slave, err);
+ } else {
+ qpc->pri_path.counter_index = counter_idx;
+ mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
+ __func__, slave, qpc->pri_path.counter_index);
+ err = 0;
+ }
+
+ return err;
+}
+
+static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
+ u8 slave, int port)
+{
+ if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
+ return handle_existing_counter(dev, slave, port,
+ qpc->pri_path.counter_index);
+
+ return handle_unexisting_counter(dev, qpc, slave, port);
+}
+
static struct res_common *alloc_qp_tr(int id)
{
struct res_qp *ret;
@@ -952,7 +1060,7 @@ static struct res_common *alloc_srq_tr(int id)
return &ret->com;
}
-static struct res_common *alloc_counter_tr(int id)
+static struct res_common *alloc_counter_tr(int id, int port)
{
struct res_counter *ret;
@@ -962,6 +1070,7 @@ static struct res_common *alloc_counter_tr(int id)
ret->com.res_id = id;
ret->com.state = RES_COUNTER_ALLOCATED;
+ ret->port = port;
return &ret->com;
}
@@ -1022,7 +1131,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
pr_err("implementation missing\n");
return NULL;
case RES_COUNTER:
- ret = alloc_counter_tr(id);
+ ret = alloc_counter_tr(id, extra);
break;
case RES_XRCD:
ret = alloc_xrcdn_tr(id);
@@ -1039,6 +1148,53 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
return ret;
}
+int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
+ struct mlx4_counter *data)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *tmp;
+ struct res_counter *counter;
+ int *counters_arr;
+ int i = 0, err = 0;
+
+ memset(data, 0, sizeof(*data));
+
+ counters_arr = kmalloc_array(dev->caps.max_counters,
+ sizeof(*counters_arr), GFP_KERNEL);
+ if (!counters_arr)
+ return -ENOMEM;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry(tmp,
+ &tracker->slave_list[slave].res_list[RES_COUNTER],
+ list) {
+ counter = container_of(tmp, struct res_counter, com);
+ if (counter->port == port) {
+ counters_arr[i] = (int)tmp->res_id;
+ i++;
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+ counters_arr[i] = -1;
+
+ i = 0;
+
+ while (counters_arr[i] != -1) {
+ err = mlx4_get_counter_stats(dev, counters_arr[i], data,
+ 0);
+ if (err) {
+ memset(data, 0, sizeof(*data));
+ goto table_changed;
+ }
+ i++;
+ }
+
+table_changed:
+ kfree(counters_arr);
+ return 0;
+}
+
static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
enum mlx4_resource type, int extra)
{
@@ -2001,7 +2157,7 @@ static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
}
static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
- u64 in_param, u64 *out_param)
+ u64 in_param, u64 *out_param, int port)
{
u32 index;
int err;
@@ -2019,7 +2175,7 @@ static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
}
- err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
+ err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
if (err) {
__mlx4_counter_free(dev, index);
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
@@ -2101,7 +2257,7 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
case RES_COUNTER:
err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
- vhcr->in_param, &vhcr->out_param);
+ vhcr->in_param, &vhcr->out_param, 0);
break;
case RES_XRCD:
@@ -2335,6 +2491,9 @@ static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return -EINVAL;
index = get_param_l(&in_param);
+ if (index == MLX4_SINK_COUNTER_INDEX(dev))
+ return 0;
+
err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
if (err)
return err;
@@ -2703,6 +2862,10 @@ static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
context->qkey = cpu_to_be32(qkey);
}
+static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
+ struct mlx4_qp_context *qpc,
+ struct mlx4_cmd_mailbox *inbox);
+
int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -2725,6 +2888,10 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
struct res_srq *srq;
int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+ err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
+ if (err)
+ return err;
+
err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
if (err)
return err;
@@ -3526,8 +3693,8 @@ static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
((port & 1) << 6);
- if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
- mlx4_is_eth(dev, port + 1)) {
+ if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
+ qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
qpc->pri_path.sched_queue = pri_sched_queue;
}
@@ -3965,6 +4132,22 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
return 0;
}
+static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
+ struct _rule_hw *eth_header)
+{
+ if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
+ is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
+ struct mlx4_net_trans_rule_hw_eth *eth =
+ (struct mlx4_net_trans_rule_hw_eth *)eth_header;
+ struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
+ bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
+ next_rule->rsvd == 0;
+
+ if (last_rule)
+ ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
+ }
+}
+
/*
* In case of missing eth header, append eth header with a MAC address
* assigned to the VF.
@@ -4117,6 +4300,12 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
rule_header = (struct _rule_hw *)(ctrl + 1);
header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
+ if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
+ handle_eth_header_mcast_prio(ctrl, rule_header);
+
+ if (slave == dev->caps.function)
+ goto execute;
+
switch (header_id) {
case MLX4_NET_TRANS_RULE_ID_ETH:
if (validate_eth_header_mac(slave, rule_header, rlist)) {
@@ -4143,6 +4332,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
goto err_put;
}
+execute:
err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
vhcr->in_modifier, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 8ff57e8e3..158c88c69 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -3,6 +3,18 @@
#
config MLX5_CORE
- tristate
+ tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
depends on PCI
default n
+ ---help---
+ Core driver for low level functionality of the ConnectX-4 and
+ Connect-IB cards by Mellanox Technologies.
+
+config MLX5_CORE_EN
+ bool "Mellanox Technologies ConnectX-4 Ethernet support"
+ depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
+ default n
+ ---help---
+ Ethernet support in Mellanox Technologies ConnectX-4 NIC.
+ Ethernet and Infiniband support in ConnectX-4 are currently mutually
+ exclusive.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 105780bb9..26a68b8af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -2,4 +2,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
- mad.o
+ mad.o transobj.o vport.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o \
+ en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \
+ en_txrx.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index ac0f7bf4b..0715b4975 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -42,95 +42,36 @@
#include "mlx5_core.h"
/* Handling for queue buffers -- we allocate a bunch of memory and
- * register it in a memory region at HCA virtual address 0. If the
- * requested size is > max_direct, we split the allocation into
- * multiple pages, so we don't require too much contiguous memory.
+ * register it in a memory region at HCA virtual address 0.
*/
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
- struct mlx5_buf *buf)
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
{
dma_addr_t t;
buf->size = size;
- if (size <= max_direct) {
- buf->nbufs = 1;
- buf->npages = 1;
- buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
- buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
- size, &t, GFP_KERNEL);
- if (!buf->direct.buf)
- return -ENOMEM;
-
- buf->direct.map = t;
-
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
- } else {
- int i;
-
- buf->direct.buf = NULL;
- buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
- buf->npages = buf->nbufs;
- buf->page_shift = PAGE_SHIFT;
- buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
- GFP_KERNEL);
- if (!buf->page_list)
- return -ENOMEM;
-
- for (i = 0; i < buf->nbufs; i++) {
- buf->page_list[i].buf =
- dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE,
- &t, GFP_KERNEL);
- if (!buf->page_list[i].buf)
- goto err_free;
-
- buf->page_list[i].map = t;
- }
-
- if (BITS_PER_LONG == 64) {
- struct page **pages;
- pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL);
- if (!pages)
- goto err_free;
- for (i = 0; i < buf->nbufs; i++)
- pages[i] = virt_to_page(buf->page_list[i].buf);
- buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
- kfree(pages);
- if (!buf->direct.buf)
- goto err_free;
- }
- }
+ buf->npages = 1;
+ buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
+ buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
+ size, &t, GFP_KERNEL);
+ if (!buf->direct.buf)
+ return -ENOMEM;
- return 0;
+ buf->direct.map = t;
-err_free:
- mlx5_buf_free(dev, buf);
+ while (t & ((1 << buf->page_shift) - 1)) {
+ --buf->page_shift;
+ buf->npages *= 2;
+ }
- return -ENOMEM;
+ return 0;
}
EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
{
- int i;
-
- if (buf->nbufs == 1)
- dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
- buf->direct.map);
- else {
- if (BITS_PER_LONG == 64)
- vunmap(buf->direct.buf);
-
- for (i = 0; i < buf->nbufs; i++)
- if (buf->page_list[i].buf)
- dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
- buf->page_list[i].buf,
- buf->page_list[i].map);
- kfree(buf->page_list);
- }
+ dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
+ buf->direct.map);
}
EXPORT_SYMBOL_GPL(mlx5_buf_free);
@@ -230,10 +171,7 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
int i;
for (i = 0; i < buf->npages; i++) {
- if (buf->nbufs == 1)
- addr = buf->direct.map + (i << buf->page_shift);
- else
- addr = buf->page_list[i].map;
+ addr = buf->direct.map + (i << buf->page_shift);
pas[i] = cpu_to_be64(addr);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e3273faf4..75ff58dc1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -75,25 +75,6 @@ enum {
MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
};
-enum {
- MLX5_CMD_STAT_OK = 0x0,
- MLX5_CMD_STAT_INT_ERR = 0x1,
- MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
- MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
- MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
- MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
- MLX5_CMD_STAT_RES_BUSY = 0x6,
- MLX5_CMD_STAT_LIM_ERR = 0x8,
- MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
- MLX5_CMD_STAT_IX_ERR = 0xa,
- MLX5_CMD_STAT_NO_RES_ERR = 0xf,
- MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
- MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
- MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
- MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
- MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
-};
-
static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out,
@@ -390,8 +371,17 @@ const char *mlx5_command_str(int command)
case MLX5_CMD_OP_ARM_RQ:
return "ARM_RQ";
- case MLX5_CMD_OP_RESIZE_SRQ:
- return "RESIZE_SRQ";
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ return "CREATE_XRC_SRQ";
+
+ case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+ return "DESTROY_XRC_SRQ";
+
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ return "QUERY_XRC_SRQ";
+
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ return "ARM_XRC_SRQ";
case MLX5_CMD_OP_ALLOC_PD:
return "ALLOC_PD";
@@ -408,8 +398,8 @@ const char *mlx5_command_str(int command)
case MLX5_CMD_OP_ATTACH_TO_MCG:
return "ATTACH_TO_MCG";
- case MLX5_CMD_OP_DETACH_FROM_MCG:
- return "DETACH_FROM_MCG";
+ case MLX5_CMD_OP_DETTACH_FROM_MCG:
+ return "DETTACH_FROM_MCG";
case MLX5_CMD_OP_ALLOC_XRCD:
return "ALLOC_XRCD";
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index eb0cf81f5..04ab7e445 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -219,6 +219,24 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
}
EXPORT_SYMBOL(mlx5_core_modify_cq);
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+ struct mlx5_core_cq *cq,
+ u16 cq_period,
+ u16 cq_max_count)
+{
+ struct mlx5_modify_cq_mbox_in in;
+
+ memset(&in, 0, sizeof(in));
+
+ in.cqn = cpu_to_be32(cq->cqn);
+ in.ctx.cq_period = cpu_to_be16(cq_period);
+ in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
+ in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
+ MLX5_CQ_MODIFY_COUNT);
+
+ return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
+}
+
int mlx5_init_cq_table(struct mlx5_core_dev *dev)
{
struct mlx5_cq_table *table = &dev->priv.cq_table;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
new file mode 100644
index 000000000..3d23bd657
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -0,0 +1,521 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/cq.h>
+#include <linux/mlx5/vport.h>
+#include "wq.h"
+#include "transobj.h"
+#include "mlx5_core.h"
+
+#define MLX5E_MAX_NUM_TC 8
+
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
+
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
+
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (16 * 1024)
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
+#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7
+
+#define MLX5E_TX_CQ_POLL_BUDGET 128
+#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
+
+static const char vport_strings[][ETH_GSTRING_LEN] = {
+ /* vport statistics */
+ "rx_packets",
+ "rx_bytes",
+ "tx_packets",
+ "tx_bytes",
+ "rx_error_packets",
+ "rx_error_bytes",
+ "tx_error_packets",
+ "tx_error_bytes",
+ "rx_unicast_packets",
+ "rx_unicast_bytes",
+ "tx_unicast_packets",
+ "tx_unicast_bytes",
+ "rx_multicast_packets",
+ "rx_multicast_bytes",
+ "tx_multicast_packets",
+ "tx_multicast_bytes",
+ "rx_broadcast_packets",
+ "rx_broadcast_bytes",
+ "tx_broadcast_packets",
+ "tx_broadcast_bytes",
+
+ /* SW counters */
+ "tso_packets",
+ "tso_bytes",
+ "lro_packets",
+ "lro_bytes",
+ "rx_csum_good",
+ "rx_csum_none",
+ "tx_csum_offload",
+ "tx_queue_stopped",
+ "tx_queue_wake",
+ "tx_queue_dropped",
+ "rx_wqe_err",
+};
+
+struct mlx5e_vport_stats {
+ /* HW counters */
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 rx_error_packets;
+ u64 rx_error_bytes;
+ u64 tx_error_packets;
+ u64 tx_error_bytes;
+ u64 rx_unicast_packets;
+ u64 rx_unicast_bytes;
+ u64 tx_unicast_packets;
+ u64 tx_unicast_bytes;
+ u64 rx_multicast_packets;
+ u64 rx_multicast_bytes;
+ u64 tx_multicast_packets;
+ u64 tx_multicast_bytes;
+ u64 rx_broadcast_packets;
+ u64 rx_broadcast_bytes;
+ u64 tx_broadcast_packets;
+ u64 tx_broadcast_bytes;
+
+ /* SW counters */
+ u64 tso_packets;
+ u64 tso_bytes;
+ u64 lro_packets;
+ u64 lro_bytes;
+ u64 rx_csum_good;
+ u64 rx_csum_none;
+ u64 tx_csum_offload;
+ u64 tx_queue_stopped;
+ u64 tx_queue_wake;
+ u64 tx_queue_dropped;
+ u64 rx_wqe_err;
+
+#define NUM_VPORT_COUNTERS 31
+};
+
+static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
+ "packets",
+ "csum_none",
+ "lro_packets",
+ "lro_bytes",
+ "wqe_err"
+};
+
+struct mlx5e_rq_stats {
+ u64 packets;
+ u64 csum_none;
+ u64 lro_packets;
+ u64 lro_bytes;
+ u64 wqe_err;
+#define NUM_RQ_STATS 5
+};
+
+static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
+ "packets",
+ "tso_packets",
+ "tso_bytes",
+ "csum_offload_none",
+ "stopped",
+ "wake",
+ "dropped",
+ "nop"
+};
+
+struct mlx5e_sq_stats {
+ u64 packets;
+ u64 tso_packets;
+ u64 tso_bytes;
+ u64 csum_offload_none;
+ u64 stopped;
+ u64 wake;
+ u64 dropped;
+ u64 nop;
+#define NUM_SQ_STATS 8
+};
+
+struct mlx5e_stats {
+ struct mlx5e_vport_stats vport;
+};
+
+struct mlx5e_params {
+ u8 log_sq_size;
+ u8 log_rq_size;
+ u16 num_channels;
+ u8 default_vlan_prio;
+ u8 num_tc;
+ u16 rx_cq_moderation_usec;
+ u16 rx_cq_moderation_pkts;
+ u16 tx_cq_moderation_usec;
+ u16 tx_cq_moderation_pkts;
+ u16 min_rx_wqes;
+ u16 rx_hash_log_tbl_sz;
+ bool lro_en;
+ u32 lro_wqe_sz;
+};
+
+enum {
+ MLX5E_RQ_STATE_POST_WQES_ENABLE,
+};
+
+enum cq_flags {
+ MLX5E_CQ_HAS_CQES = 1,
+};
+
+struct mlx5e_cq {
+ /* data path - accessed per cqe */
+ struct mlx5_cqwq wq;
+ unsigned long flags;
+
+ /* data path - accessed per napi poll */
+ struct napi_struct *napi;
+ struct mlx5_core_cq mcq;
+ struct mlx5e_channel *channel;
+
+ /* control */
+ struct mlx5_wq_ctrl wq_ctrl;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_rq {
+ /* data path */
+ struct mlx5_wq_ll wq;
+ u32 wqe_sz;
+ struct sk_buff **skb;
+
+ struct device *pdev;
+ struct net_device *netdev;
+ struct mlx5e_rq_stats stats;
+ struct mlx5e_cq cq;
+
+ unsigned long state;
+ int ix;
+
+ /* control */
+ struct mlx5_wq_ctrl wq_ctrl;
+ u32 rqn;
+ struct mlx5e_channel *channel;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_tx_skb_cb {
+ u32 num_bytes;
+ u8 num_wqebbs;
+ u8 num_dma;
+};
+
+#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
+
+struct mlx5e_sq_dma {
+ dma_addr_t addr;
+ u32 size;
+};
+
+enum {
+ MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
+};
+
+struct mlx5e_sq {
+ /* data path */
+
+ /* dirtied @completion */
+ u16 cc;
+ u32 dma_fifo_cc;
+
+ /* dirtied @xmit */
+ u16 pc ____cacheline_aligned_in_smp;
+ u32 dma_fifo_pc;
+ u32 bf_offset;
+ struct mlx5e_sq_stats stats;
+
+ struct mlx5e_cq cq;
+
+ /* pointers to per packet info: write@xmit, read@completion */
+ struct sk_buff **skb;
+ struct mlx5e_sq_dma *dma_fifo;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ u32 dma_fifo_mask;
+ void __iomem *uar_map;
+ struct netdev_queue *txq;
+ u32 sqn;
+ u32 bf_buf_size;
+ u16 max_inline;
+ u16 edge;
+ struct device *pdev;
+ __be32 mkey_be;
+ unsigned long state;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5_uar uar;
+ struct mlx5e_channel *channel;
+ int tc;
+} ____cacheline_aligned_in_smp;
+
+static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
+{
+ return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
+ (sq->cc == sq->pc));
+}
+
+enum channel_flags {
+ MLX5E_CHANNEL_NAPI_SCHED = 1,
+};
+
+struct mlx5e_channel {
+ /* data path */
+ struct mlx5e_rq rq;
+ struct mlx5e_sq sq[MLX5E_MAX_NUM_TC];
+ struct napi_struct napi;
+ struct device *pdev;
+ struct net_device *netdev;
+ __be32 mkey_be;
+ u8 num_tc;
+ unsigned long flags;
+ int tc_to_txq_map[MLX5E_MAX_NUM_TC];
+
+ /* control */
+ struct mlx5e_priv *priv;
+ int ix;
+ int cpu;
+};
+
+enum mlx5e_traffic_types {
+ MLX5E_TT_IPV4_TCP = 0,
+ MLX5E_TT_IPV6_TCP = 1,
+ MLX5E_TT_IPV4_UDP = 2,
+ MLX5E_TT_IPV6_UDP = 3,
+ MLX5E_TT_IPV4 = 4,
+ MLX5E_TT_IPV6 = 5,
+ MLX5E_TT_ANY = 6,
+ MLX5E_NUM_TT = 7,
+};
+
+enum {
+ MLX5E_RQT_SPREADING = 0,
+ MLX5E_RQT_DEFAULT_RQ = 1,
+ MLX5E_NUM_RQT = 2,
+};
+
+struct mlx5e_eth_addr_info {
+ u8 addr[ETH_ALEN + 2];
+ u32 tt_vec;
+ u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */
+};
+
+#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
+
+struct mlx5e_eth_addr_db {
+ struct hlist_head netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE];
+ struct hlist_head netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE];
+ struct mlx5e_eth_addr_info broadcast;
+ struct mlx5e_eth_addr_info allmulti;
+ struct mlx5e_eth_addr_info promisc;
+ bool broadcast_enabled;
+ bool allmulti_enabled;
+ bool promisc_enabled;
+};
+
+enum {
+ MLX5E_STATE_ASYNC_EVENTS_ENABLE,
+ MLX5E_STATE_OPENED,
+};
+
+struct mlx5e_vlan_db {
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u32 active_vlans_ft_ix[VLAN_N_VID];
+ u32 untagged_rule_ft_ix;
+ u32 any_vlan_rule_ft_ix;
+ bool filter_disabled;
+};
+
+struct mlx5e_flow_table {
+ void *vlan;
+ void *main;
+};
+
+struct mlx5e_priv {
+ /* priv data path fields - start */
+ int num_tc;
+ int default_vlan_prio;
+ struct mlx5e_sq **txq_to_sq_map;
+ /* priv data path fields - end */
+
+ unsigned long state;
+ struct mutex state_lock; /* Protects Interface state */
+ struct mlx5_uar cq_uar;
+ u32 pdn;
+ u32 tdn;
+ struct mlx5_core_mr mr;
+
+ struct mlx5e_channel **channel;
+ u32 tisn[MLX5E_MAX_NUM_TC];
+ u32 rqtn;
+ u32 tirn[MLX5E_NUM_TT];
+
+ struct mlx5e_flow_table ft;
+ struct mlx5e_eth_addr_db eth_addr;
+ struct mlx5e_vlan_db vlan;
+
+ struct mlx5e_params params;
+ spinlock_t async_events_spinlock; /* sync hw events */
+ struct work_struct update_carrier_work;
+ struct work_struct set_rx_mode_work;
+ struct delayed_work update_stats_work;
+
+ struct mlx5_core_dev *mdev;
+ struct net_device *netdev;
+ struct mlx5e_stats stats;
+};
+
+#define MLX5E_NET_IP_ALIGN 2
+
+struct mlx5e_tx_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_eth_seg eth;
+};
+
+struct mlx5e_rx_wqe {
+ struct mlx5_wqe_srq_next_seg next;
+ struct mlx5_wqe_data_seg data;
+};
+
+enum mlx5e_link_mode {
+ MLX5E_1000BASE_CX_SGMII = 0,
+ MLX5E_1000BASE_KX = 1,
+ MLX5E_10GBASE_CX4 = 2,
+ MLX5E_10GBASE_KX4 = 3,
+ MLX5E_10GBASE_KR = 4,
+ MLX5E_20GBASE_KR2 = 5,
+ MLX5E_40GBASE_CR4 = 6,
+ MLX5E_40GBASE_KR4 = 7,
+ MLX5E_56GBASE_R4 = 8,
+ MLX5E_10GBASE_CR = 12,
+ MLX5E_10GBASE_SR = 13,
+ MLX5E_10GBASE_ER = 14,
+ MLX5E_40GBASE_SR4 = 15,
+ MLX5E_40GBASE_LR4 = 16,
+ MLX5E_100GBASE_CR4 = 20,
+ MLX5E_100GBASE_SR4 = 21,
+ MLX5E_100GBASE_KR4 = 22,
+ MLX5E_100GBASE_LR4 = 23,
+ MLX5E_100BASE_TX = 24,
+ MLX5E_100BASE_T = 25,
+ MLX5E_10GBASE_T = 26,
+ MLX5E_25GBASE_CR = 27,
+ MLX5E_25GBASE_KR = 28,
+ MLX5E_25GBASE_SR = 29,
+ MLX5E_50GBASE_CR2 = 30,
+ MLX5E_50GBASE_KR2 = 31,
+ MLX5E_LINK_MODES_NUMBER,
+};
+
+#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+
+void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback);
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq);
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
+int mlx5e_napi_poll(struct napi_struct *napi, int budget);
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq);
+bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
+
+void mlx5e_update_stats(struct mlx5e_priv *priv);
+
+int mlx5e_open_flow_table(struct mlx5e_priv *priv);
+void mlx5e_close_flow_table(struct mlx5e_priv *priv);
+void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_work(struct work_struct *work);
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid);
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid);
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
+int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
+void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
+
+int mlx5e_open_locked(struct net_device *netdev);
+int mlx5e_close_locked(struct net_device *netdev);
+int mlx5e_update_priv_params(struct mlx5e_priv *priv,
+ struct mlx5e_params *new_params);
+
+static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
+ struct mlx5e_tx_wqe *wqe)
+{
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ *sq->wq.db = cpu_to_be32(sq->pc);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)&wqe->ctrl,
+ sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
+ NULL);
+
+ sq->bf_offset ^= sq->bf_buf_size;
+}
+
+static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
+{
+ struct mlx5_core_cq *mcq;
+
+ mcq = &cq->mcq;
+ mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
+}
+
+extern const struct ethtool_ops mlx5e_ethtool_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
new file mode 100644
index 000000000..388938482
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -0,0 +1,679 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+static void mlx5e_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")",
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+ strlcpy(drvinfo->bus_info, pci_name(mdev->pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+static const struct {
+ u32 supported;
+ u32 advertised;
+ u32 speed;
+} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = {
+ [MLX5E_1000BASE_CX_SGMII] = {
+ .supported = SUPPORTED_1000baseKX_Full,
+ .advertised = ADVERTISED_1000baseKX_Full,
+ .speed = 1000,
+ },
+ [MLX5E_1000BASE_KX] = {
+ .supported = SUPPORTED_1000baseKX_Full,
+ .advertised = ADVERTISED_1000baseKX_Full,
+ .speed = 1000,
+ },
+ [MLX5E_10GBASE_CX4] = {
+ .supported = SUPPORTED_10000baseKX4_Full,
+ .advertised = ADVERTISED_10000baseKX4_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_KX4] = {
+ .supported = SUPPORTED_10000baseKX4_Full,
+ .advertised = ADVERTISED_10000baseKX4_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_KR] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_20GBASE_KR2] = {
+ .supported = SUPPORTED_20000baseKR2_Full,
+ .advertised = ADVERTISED_20000baseKR2_Full,
+ .speed = 20000,
+ },
+ [MLX5E_40GBASE_CR4] = {
+ .supported = SUPPORTED_40000baseCR4_Full,
+ .advertised = ADVERTISED_40000baseCR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_40GBASE_KR4] = {
+ .supported = SUPPORTED_40000baseKR4_Full,
+ .advertised = ADVERTISED_40000baseKR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_56GBASE_R4] = {
+ .supported = SUPPORTED_56000baseKR4_Full,
+ .advertised = ADVERTISED_56000baseKR4_Full,
+ .speed = 56000,
+ },
+ [MLX5E_10GBASE_CR] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_SR] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_ER] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_40GBASE_SR4] = {
+ .supported = SUPPORTED_40000baseSR4_Full,
+ .advertised = ADVERTISED_40000baseSR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_40GBASE_LR4] = {
+ .supported = SUPPORTED_40000baseLR4_Full,
+ .advertised = ADVERTISED_40000baseLR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_100GBASE_CR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100GBASE_SR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100GBASE_KR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100GBASE_LR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100BASE_TX] = {
+ .speed = 100,
+ },
+ [MLX5E_100BASE_T] = {
+ .supported = SUPPORTED_100baseT_Full,
+ .advertised = ADVERTISED_100baseT_Full,
+ .speed = 100,
+ },
+ [MLX5E_10GBASE_T] = {
+ .supported = SUPPORTED_10000baseT_Full,
+ .advertised = ADVERTISED_10000baseT_Full,
+ .speed = 1000,
+ },
+ [MLX5E_25GBASE_CR] = {
+ .speed = 25000,
+ },
+ [MLX5E_25GBASE_KR] = {
+ .speed = 25000,
+ },
+ [MLX5E_25GBASE_SR] = {
+ .speed = 25000,
+ },
+ [MLX5E_50GBASE_CR2] = {
+ .speed = 50000,
+ },
+ [MLX5E_50GBASE_KR2] = {
+ .speed = 50000,
+ },
+};
+
+static int mlx5e_get_sset_count(struct net_device *dev, int sset)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return NUM_VPORT_COUNTERS +
+ priv->params.num_channels * NUM_RQ_STATS +
+ priv->params.num_channels * priv->num_tc *
+ NUM_SQ_STATS;
+ /* fallthrough */
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mlx5e_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
+{
+ int i, j, tc, idx = 0;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ break;
+
+ case ETH_SS_TEST:
+ break;
+
+ case ETH_SS_STATS:
+ /* VPORT counters */
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vport_strings[i]);
+
+ /* per channel counters */
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ "rx%d_%s", i, rq_stats_strings[j]);
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (tc = 0; tc < priv->num_tc; tc++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ sprintf(data +
+ (idx++) * ETH_GSTRING_LEN,
+ "tx%d_%d_%s", i, tc,
+ sq_stats_strings[j]);
+ break;
+ }
+}
+
+static void mlx5e_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int i, j, tc, idx = 0;
+
+ if (!data)
+ return;
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_update_stats(priv);
+ mutex_unlock(&priv->state_lock);
+
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ data[idx++] = ((u64 *)&priv->stats.vport)[i];
+
+ /* per channel counters */
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ data[idx++] = !test_bit(MLX5E_STATE_OPENED,
+ &priv->state) ? 0 :
+ ((u64 *)&priv->channel[i]->rq.stats)[j];
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (tc = 0; tc < priv->num_tc; tc++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ data[idx++] = !test_bit(MLX5E_STATE_OPENED,
+ &priv->state) ? 0 :
+ ((u64 *)&priv->channel[i]->sq[tc].stats)[j];
+}
+
+static void mlx5e_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+ param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
+ param->rx_pending = 1 << priv->params.log_rq_size;
+ param->tx_pending = 1 << priv->params.log_sq_size;
+}
+
+static int mlx5e_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_params new_params;
+ u16 min_rx_wqes;
+ u8 log_rq_size;
+ u8 log_sq_size;
+ int err = 0;
+
+ if (param->rx_jumbo_pending) {
+ netdev_info(dev, "%s: rx_jumbo_pending not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (param->rx_mini_pending) {
+ netdev_info(dev, "%s: rx_mini_pending not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
+ netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
+ __func__, param->rx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+ return -EINVAL;
+ }
+ if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) {
+ netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
+ __func__, param->rx_pending,
+ 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE);
+ return -EINVAL;
+ }
+ if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
+ netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
+ __func__, param->tx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
+ return -EINVAL;
+ }
+ if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
+ netdev_info(dev, "%s: tx_pending (%d) > max (%d)\n",
+ __func__, param->tx_pending,
+ 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
+ return -EINVAL;
+ }
+
+ log_rq_size = order_base_2(param->rx_pending);
+ log_sq_size = order_base_2(param->tx_pending);
+ min_rx_wqes = min_t(u16, param->rx_pending - 1,
+ MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
+
+ if (log_rq_size == priv->params.log_rq_size &&
+ log_sq_size == priv->params.log_sq_size &&
+ min_rx_wqes == priv->params.min_rx_wqes)
+ return 0;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->params;
+ new_params.log_rq_size = log_rq_size;
+ new_params.log_sq_size = log_sq_size;
+ new_params.min_rx_wqes = min_rx_wqes;
+ err = mlx5e_update_priv_params(priv, &new_params);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static void mlx5e_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+
+ ch->max_combined = ncv;
+ ch->combined_count = priv->params.num_channels;
+}
+
+static int mlx5e_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+ unsigned int count = ch->combined_count;
+ struct mlx5e_params new_params;
+ int err = 0;
+
+ if (!count) {
+ netdev_info(dev, "%s: combined_count=0 not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (ch->rx_count || ch->tx_count) {
+ netdev_info(dev, "%s: separate rx/tx count not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (count > ncv) {
+ netdev_info(dev, "%s: count (%d) > max (%d)\n",
+ __func__, count, ncv);
+ return -EINVAL;
+ }
+
+ if (priv->params.num_channels == count)
+ return 0;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->params;
+ new_params.num_channels = count;
+ err = mlx5e_update_priv_params(priv, &new_params);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static int mlx5e_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
+ coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
+ coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
+ coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts;
+
+ return 0;
+}
+
+static int mlx5e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_channel *c;
+ int tc;
+ int i;
+
+ priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
+ priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
+ priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
+ priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
+
+ for (i = 0; i < priv->params.num_channels; ++i) {
+ c = priv->channel[i];
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ mlx5_core_modify_cq_moderation(mdev,
+ &c->sq[tc].cq.mcq,
+ coal->tx_coalesce_usecs,
+ coal->tx_max_coalesced_frames);
+ }
+
+ mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
+ coal->rx_coalesce_usecs,
+ coal->rx_max_coalesced_frames);
+ }
+
+ return 0;
+}
+
+static u32 ptys2ethtool_supported_link(u32 eth_proto_cap)
+{
+ int i;
+ u32 supported_modes = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (eth_proto_cap & MLX5E_PROT_MASK(i))
+ supported_modes |= ptys2ethtool_table[i].supported;
+ }
+ return supported_modes;
+}
+
+static u32 ptys2ethtool_adver_link(u32 eth_proto_cap)
+{
+ int i;
+ u32 advertising_modes = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (eth_proto_cap & MLX5E_PROT_MASK(i))
+ advertising_modes |= ptys2ethtool_table[i].advertised;
+ }
+ return advertising_modes;
+}
+
+static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
+{
+ if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+ return SUPPORTED_FIBRE;
+ }
+
+ if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
+ return SUPPORTED_Backplane;
+ }
+ return 0;
+}
+
+static void get_speed_duplex(struct net_device *netdev,
+ u32 eth_proto_oper,
+ struct ethtool_cmd *cmd)
+{
+ int i;
+ u32 speed = SPEED_UNKNOWN;
+ u8 duplex = DUPLEX_UNKNOWN;
+
+ if (!netif_carrier_ok(netdev))
+ goto out;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (eth_proto_oper & MLX5E_PROT_MASK(i)) {
+ speed = ptys2ethtool_table[i].speed;
+ duplex = DUPLEX_FULL;
+ break;
+ }
+ }
+out:
+ ethtool_cmd_speed_set(cmd, speed);
+ cmd->duplex = duplex;
+}
+
+static void get_supported(u32 eth_proto_cap, u32 *supported)
+{
+ *supported |= ptys2ethtool_supported_port(eth_proto_cap);
+ *supported |= ptys2ethtool_supported_link(eth_proto_cap);
+ *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+}
+
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
+ u8 rx_pause, u32 *advertising)
+{
+ *advertising |= ptys2ethtool_adver_link(eth_proto_cap);
+ *advertising |= tx_pause ? ADVERTISED_Pause : 0;
+ *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0;
+}
+
+static u8 get_connector_port(u32 eth_proto)
+{
+ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+ return PORT_FIBRE;
+ }
+
+ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) {
+ return PORT_DA;
+ }
+
+ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) {
+ return PORT_NONE;
+ }
+
+ return PORT_OTHER;
+}
+
+static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising)
+{
+ *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp);
+}
+
+static int mlx5e_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ u32 eth_proto_lp;
+ u32 eth_proto_oper;
+ int err;
+
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
+
+ if (err) {
+ netdev_err(netdev, "%s: query port ptys failed: %d\n",
+ __func__, err);
+ goto err_query_ptys;
+ }
+
+ eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+ eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+ eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+ eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
+
+ cmd->supported = 0;
+ cmd->advertising = 0;
+
+ get_supported(eth_proto_cap, &cmd->supported);
+ get_advertising(eth_proto_admin, 0, 0, &cmd->advertising);
+ get_speed_duplex(netdev, eth_proto_oper, cmd);
+
+ eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+
+ cmd->port = get_connector_port(eth_proto_oper);
+ get_lp_advertising(eth_proto_lp, &cmd->lp_advertising);
+
+ cmd->transceiver = XCVR_INTERNAL;
+
+err_query_ptys:
+ return err;
+}
+
+static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes)
+{
+ u32 i, ptys_modes = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (ptys2ethtool_table[i].advertised & link_modes)
+ ptys_modes |= MLX5E_PROT_MASK(i);
+ }
+
+ return ptys_modes;
+}
+
+static u32 mlx5e_ethtool2ptys_speed_link(u32 speed)
+{
+ u32 i, speed_links = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (ptys2ethtool_table[i].speed == speed)
+ speed_links |= MLX5E_PROT_MASK(i);
+ }
+
+ return speed_links;
+}
+
+static int mlx5e_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 link_modes;
+ u32 speed;
+ u32 eth_proto_cap, eth_proto_admin;
+ u8 port_status;
+ int err;
+
+ speed = ethtool_cmd_speed(cmd);
+
+ link_modes = cmd->autoneg == AUTONEG_ENABLE ?
+ mlx5e_ethtool2ptys_adver_link(cmd->advertising) :
+ mlx5e_ethtool2ptys_speed_link(speed);
+
+ err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
+ if (err) {
+ netdev_err(netdev, "%s: query port eth proto cap failed: %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ link_modes = link_modes & eth_proto_cap;
+ if (!link_modes) {
+ netdev_err(netdev, "%s: Not supported link mode(s) requested",
+ __func__);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mlx5_query_port_proto_admin(mdev, &eth_proto_admin, MLX5_PTYS_EN);
+ if (err) {
+ netdev_err(netdev, "%s: query port eth proto admin failed: %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ if (link_modes == eth_proto_admin)
+ goto out;
+
+ err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+ if (err) {
+ netdev_err(netdev, "%s: set port eth proto admin failed: %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ err = mlx5_query_port_status(mdev, &port_status);
+ if (err)
+ goto out;
+
+ if (port_status == MLX5_PORT_DOWN)
+ return 0;
+
+ err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
+ if (err)
+ goto out;
+ err = mlx5_set_port_status(mdev, MLX5_PORT_UP);
+out:
+ return err;
+}
+
+const struct ethtool_ops mlx5e_ethtool_ops = {
+ .get_drvinfo = mlx5e_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlx5e_get_strings,
+ .get_sset_count = mlx5e_get_sset_count,
+ .get_ethtool_stats = mlx5e_get_ethtool_stats,
+ .get_ringparam = mlx5e_get_ringparam,
+ .set_ringparam = mlx5e_set_ringparam,
+ .get_channels = mlx5e_get_channels,
+ .set_channels = mlx5e_set_channels,
+ .get_coalesce = mlx5e_get_coalesce,
+ .set_coalesce = mlx5e_set_coalesce,
+ .get_settings = mlx5e_get_settings,
+ .set_settings = mlx5e_set_settings,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
new file mode 100644
index 000000000..120db80c4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -0,0 +1,860 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/mlx5/flow_table.h>
+#include "en.h"
+
+enum {
+ MLX5E_FULLMATCH = 0,
+ MLX5E_ALLMULTI = 1,
+ MLX5E_PROMISC = 2,
+};
+
+enum {
+ MLX5E_UC = 0,
+ MLX5E_MC_IPV4 = 1,
+ MLX5E_MC_IPV6 = 2,
+ MLX5E_MC_OTHER = 3,
+};
+
+enum {
+ MLX5E_ACTION_NONE = 0,
+ MLX5E_ACTION_ADD = 1,
+ MLX5E_ACTION_DEL = 2,
+};
+
+struct mlx5e_eth_addr_hash_node {
+ struct hlist_node hlist;
+ u8 action;
+ struct mlx5e_eth_addr_info ai;
+};
+
+static inline int mlx5e_hash_eth_addr(u8 *addr)
+{
+ return addr[5];
+}
+
+static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ int ix = mlx5e_hash_eth_addr(addr);
+ int found = 0;
+
+ hlist_for_each_entry(hn, &hash[ix], hlist)
+ if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
+ found = 1;
+ break;
+ }
+
+ if (found) {
+ hn->action = MLX5E_ACTION_NONE;
+ return;
+ }
+
+ hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
+ if (!hn)
+ return;
+
+ ether_addr_copy(hn->ai.addr, addr);
+ hn->action = MLX5E_ACTION_ADD;
+
+ hlist_add_head(&hn->hlist, &hash[ix]);
+}
+
+static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
+{
+ hlist_del(&hn->hlist);
+ kfree(hn);
+}
+
+static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai)
+{
+ void *ft = priv->ft.main;
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
+}
+
+static int mlx5e_get_eth_addr_type(u8 *addr)
+{
+ if (is_unicast_ether_addr(addr))
+ return MLX5E_UC;
+
+ if ((addr[0] == 0x01) &&
+ (addr[1] == 0x00) &&
+ (addr[2] == 0x5e) &&
+ !(addr[3] & 0x80))
+ return MLX5E_MC_IPV4;
+
+ if ((addr[0] == 0x33) &&
+ (addr[1] == 0x33))
+ return MLX5E_MC_IPV6;
+
+ return MLX5E_MC_OTHER;
+}
+
+static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
+{
+ int eth_addr_type;
+ u32 ret;
+
+ switch (type) {
+ case MLX5E_FULLMATCH:
+ eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
+ switch (eth_addr_type) {
+ case MLX5E_UC:
+ ret =
+ (1 << MLX5E_TT_IPV4_TCP) |
+ (1 << MLX5E_TT_IPV6_TCP) |
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+
+ case MLX5E_MC_IPV4:
+ ret =
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ 0;
+ break;
+
+ case MLX5E_MC_IPV6:
+ ret =
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV6) |
+ 0;
+ break;
+
+ case MLX5E_MC_OTHER:
+ ret =
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+ }
+
+ break;
+
+ case MLX5E_ALLMULTI:
+ ret =
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+
+ default: /* MLX5E_PROMISC */
+ ret =
+ (1 << MLX5E_TT_IPV4_TCP) |
+ (1 << MLX5E_TT_IPV6_TCP) |
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+ }
+
+ return ret;
+}
+
+static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai, int type,
+ void *flow_context, void *match_criteria)
+{
+ u8 match_criteria_enable = 0;
+ void *match_value;
+ void *dest;
+ u8 *dmac;
+ u8 *match_criteria_dmac;
+ void *ft = priv->ft.main;
+ u32 *tirn = priv->tirn;
+ u32 tt_vec;
+ int err;
+
+ match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ dmac = MLX5_ADDR_OF(fte_match_param, match_value,
+ outer_headers.dmac_47_16);
+ match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers.dmac_47_16);
+ dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+ MLX5_SET(flow_context, flow_context, action,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+ MLX5_SET(dest_format_struct, dest, destination_type,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
+
+ switch (type) {
+ case MLX5E_FULLMATCH:
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ memset(match_criteria_dmac, 0xff, ETH_ALEN);
+ ether_addr_copy(dmac, ai->addr);
+ break;
+
+ case MLX5E_ALLMULTI:
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ match_criteria_dmac[0] = 0x01;
+ dmac[0] = 0x01;
+ break;
+
+ case MLX5E_PROMISC:
+ break;
+ }
+
+ tt_vec = mlx5e_get_tt_vec(ai, type);
+
+ if (tt_vec & (1 << MLX5E_TT_ANY)) {
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_ANY]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_ANY]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_ANY);
+ }
+
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.ethertype);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV4]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+ }
+
+ if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV6]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+ }
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_UDP);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_UDP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+ }
+
+ if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_UDP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+ }
+
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_TCP);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_TCP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+ }
+
+ if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_TCP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+ }
+
+ return 0;
+}
+
+static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai, int type)
+{
+ u32 *flow_context;
+ u32 *match_criteria;
+ int err;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+ MLX5_ST_SZ_BYTES(dest_format_struct));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!flow_context || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto add_eth_addr_rule_out;
+ }
+
+ err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context,
+ match_criteria);
+ if (err)
+ netdev_err(priv->netdev, "%s: failed\n", __func__);
+
+add_eth_addr_rule_out:
+ kvfree(match_criteria);
+ kvfree(flow_context);
+ return err;
+}
+
+enum mlx5e_vlan_rule_type {
+ MLX5E_VLAN_RULE_TYPE_UNTAGGED,
+ MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+};
+
+static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+ u8 match_criteria_enable = 0;
+ u32 *flow_context;
+ void *match_value;
+ void *dest;
+ u32 *match_criteria;
+ u32 *ft_ix;
+ int err;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+ MLX5_ST_SZ_BYTES(dest_format_struct));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!flow_context || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto add_vlan_rule_out;
+ }
+ match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+ MLX5_SET(flow_context, flow_context, action,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+ MLX5_SET(dest_format_struct, dest, destination_type,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ mlx5_get_flow_table_id(priv->ft.main));
+
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.vlan_tag);
+
+ switch (rule_type) {
+ case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+ ft_ix = &priv->vlan.untagged_rule_ft_ix;
+ break;
+ case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+ ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
+ MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+ 1);
+ break;
+ default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
+ ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
+ MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+ 1);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
+ vid);
+ break;
+ }
+
+ err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
+ match_criteria, flow_context, ft_ix);
+ if (err)
+ netdev_err(priv->netdev, "%s: failed\n", __func__);
+
+add_vlan_rule_out:
+ kvfree(match_criteria);
+ kvfree(flow_context);
+ return err;
+}
+
+static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+ switch (rule_type) {
+ case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.untagged_rule_ft_ix);
+ break;
+ case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.any_vlan_rule_ft_ix);
+ break;
+ case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.active_vlans_ft_ix[vid]);
+ break;
+ }
+}
+
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
+{
+ WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+ if (priv->vlan.filter_disabled) {
+ priv->vlan.filter_disabled = false;
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ }
+}
+
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
+{
+ WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+ if (!priv->vlan.filter_disabled) {
+ priv->vlan.filter_disabled = true;
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ }
+}
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ mutex_lock(&priv->state_lock);
+
+ set_bit(vid, priv->vlan.active_vlans);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+ vid);
+
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mutex_lock(&priv->state_lock);
+
+ clear_bit(vid, priv->vlan.active_vlans);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+
+ mutex_unlock(&priv->state_lock);
+
+ return 0;
+}
+
+int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
+{
+ u16 vid;
+ int err;
+
+ for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+ vid);
+ if (err)
+ return err;
+ }
+
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ if (err)
+ return err;
+
+ if (priv->vlan.filter_disabled) {
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
+{
+ u16 vid;
+
+ if (priv->vlan.filter_disabled)
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+
+ for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+}
+
+#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
+ for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
+ hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
+
+static void mlx5e_execute_action(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_hash_node *hn)
+{
+ switch (hn->action) {
+ case MLX5E_ACTION_ADD:
+ mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+ hn->action = MLX5E_ACTION_NONE;
+ break;
+
+ case MLX5E_ACTION_DEL:
+ mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
+ mlx5e_del_eth_addr_from_hash(hn);
+ break;
+ }
+}
+
+static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct netdev_hw_addr *ha;
+
+ netif_addr_lock_bh(netdev);
+
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
+ priv->netdev->dev_addr);
+
+ netdev_for_each_uc_addr(ha, netdev)
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
+
+ netdev_for_each_mc_addr(ha, netdev)
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
+
+ netif_addr_unlock_bh(netdev);
+}
+
+static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ struct hlist_node *tmp;
+ int i;
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+ mlx5e_execute_action(priv, hn);
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+ mlx5e_execute_action(priv, hn);
+}
+
+static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ struct hlist_node *tmp;
+ int i;
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+ hn->action = MLX5E_ACTION_DEL;
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+ hn->action = MLX5E_ACTION_DEL;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_sync_netdev_addr(priv);
+
+ mlx5e_apply_netdev_addr(priv);
+}
+
+void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+ struct net_device *ndev = priv->netdev;
+
+ bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
+ bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
+ bool broadcast_enabled = rx_mode_enable;
+
+ bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
+ bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
+ bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
+ bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
+ bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
+ bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
+
+ if (enable_promisc)
+ mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
+ if (enable_allmulti)
+ mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+ if (enable_broadcast)
+ mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+
+ mlx5e_handle_netdev_addr(priv);
+
+ if (disable_broadcast)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
+ if (disable_allmulti)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
+ if (disable_promisc)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+
+ ea->promisc_enabled = promisc_enabled;
+ ea->allmulti_enabled = allmulti_enabled;
+ ea->broadcast_enabled = broadcast_enabled;
+}
+
+void mlx5e_set_rx_mode_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ set_rx_mode_work);
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_set_rx_mode_core(priv);
+ mutex_unlock(&priv->state_lock);
+}
+
+void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
+{
+ ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
+}
+
+static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table_group *g;
+ u8 *dmac;
+
+ g = kcalloc(9, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return -ENOMEM;
+
+ g[0].log_sz = 2;
+ g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[1].log_sz = 1;
+ g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+ outer_headers.ethertype);
+
+ g[2].log_sz = 0;
+
+ g[3].log_sz = 14;
+ g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+ MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[4].log_sz = 13;
+ g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+ MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
+ outer_headers.ethertype);
+
+ g[5].log_sz = 11;
+ g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+
+ g[6].log_sz = 2;
+ g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[7].log_sz = 1;
+ g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
+ outer_headers.ethertype);
+
+ g[8].log_sz = 0;
+ g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+ 9, g);
+ kfree(g);
+
+ return priv->ft.main ? 0 : -ENOMEM;
+}
+
+static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5_destroy_flow_table(priv->ft.main);
+}
+
+static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table_group *g;
+
+ g = kcalloc(2, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return -ENOMEM;
+
+ g[0].log_sz = 12;
+ g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.first_vid);
+
+ /* untagged + any vlan id */
+ g[1].log_sz = 1;
+ g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+ outer_headers.vlan_tag);
+
+ priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+ 2, g);
+
+ kfree(g);
+ return priv->ft.vlan ? 0 : -ENOMEM;
+}
+
+static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5_destroy_flow_table(priv->ft.vlan);
+}
+
+int mlx5e_open_flow_table(struct mlx5e_priv *priv)
+{
+ int err;
+
+ err = mlx5e_create_main_flow_table(priv);
+ if (err)
+ return err;
+
+ err = mlx5e_create_vlan_flow_table(priv);
+ if (err)
+ goto err_destroy_main_flow_table;
+
+ return 0;
+
+err_destroy_main_flow_table:
+ mlx5e_destroy_main_flow_table(priv);
+
+ return err;
+}
+
+void mlx5e_close_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5e_destroy_vlan_flow_table(priv);
+ mlx5e_destroy_main_flow_table(priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
new file mode 100644
index 000000000..40206da1f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -0,0 +1,1915 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/flow_table.h>
+#include "en.h"
+
+struct mlx5e_rq_param {
+ u32 rqc[MLX5_ST_SZ_DW(rqc)];
+ struct mlx5_wq_param wq;
+};
+
+struct mlx5e_sq_param {
+ u32 sqc[MLX5_ST_SZ_DW(sqc)];
+ struct mlx5_wq_param wq;
+};
+
+struct mlx5e_cq_param {
+ u32 cqc[MLX5_ST_SZ_DW(cqc)];
+ struct mlx5_wq_param wq;
+ u16 eq_ix;
+};
+
+struct mlx5e_channel_param {
+ struct mlx5e_rq_param rq;
+ struct mlx5e_sq_param sq;
+ struct mlx5e_cq_param rx_cq;
+ struct mlx5e_cq_param tx_cq;
+};
+
+static void mlx5e_update_carrier(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 port_state;
+
+ port_state = mlx5_query_vport_state(mdev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT);
+
+ if (port_state == VPORT_STATE_UP)
+ netif_carrier_on(priv->netdev);
+ else
+ netif_carrier_off(priv->netdev);
+}
+
+static void mlx5e_update_carrier_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ update_carrier_work);
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_update_carrier(priv);
+ mutex_unlock(&priv->state_lock);
+}
+
+void mlx5e_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_vport_stats *s = &priv->stats.vport;
+ struct mlx5e_rq_stats *rq_stats;
+ struct mlx5e_sq_stats *sq_stats;
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ u64 tx_offload_none;
+ int i, j;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return;
+
+ /* Collect firts the SW counters and then HW for consistency */
+ s->tso_packets = 0;
+ s->tso_bytes = 0;
+ s->tx_queue_stopped = 0;
+ s->tx_queue_wake = 0;
+ s->tx_queue_dropped = 0;
+ tx_offload_none = 0;
+ s->lro_packets = 0;
+ s->lro_bytes = 0;
+ s->rx_csum_none = 0;
+ s->rx_wqe_err = 0;
+ for (i = 0; i < priv->params.num_channels; i++) {
+ rq_stats = &priv->channel[i]->rq.stats;
+
+ s->lro_packets += rq_stats->lro_packets;
+ s->lro_bytes += rq_stats->lro_bytes;
+ s->rx_csum_none += rq_stats->csum_none;
+ s->rx_wqe_err += rq_stats->wqe_err;
+
+ for (j = 0; j < priv->num_tc; j++) {
+ sq_stats = &priv->channel[i]->sq[j].stats;
+
+ s->tso_packets += sq_stats->tso_packets;
+ s->tso_bytes += sq_stats->tso_bytes;
+ s->tx_queue_stopped += sq_stats->stopped;
+ s->tx_queue_wake += sq_stats->wake;
+ s->tx_queue_dropped += sq_stats->dropped;
+ tx_offload_none += sq_stats->csum_offload_none;
+ }
+ }
+
+ /* HW counters */
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_vport_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+ MLX5_SET(query_vport_counter_in, in, op_mod, 0);
+ MLX5_SET(query_vport_counter_in, in, other_vport, 0);
+
+ memset(out, 0, outlen);
+
+ if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
+ goto free_out;
+
+#define MLX5_GET_CTR(p, x) \
+ MLX5_GET64(query_vport_counter_out, p, x)
+
+ s->rx_error_packets =
+ MLX5_GET_CTR(out, received_errors.packets);
+ s->rx_error_bytes =
+ MLX5_GET_CTR(out, received_errors.octets);
+ s->tx_error_packets =
+ MLX5_GET_CTR(out, transmit_errors.packets);
+ s->tx_error_bytes =
+ MLX5_GET_CTR(out, transmit_errors.octets);
+
+ s->rx_unicast_packets =
+ MLX5_GET_CTR(out, received_eth_unicast.packets);
+ s->rx_unicast_bytes =
+ MLX5_GET_CTR(out, received_eth_unicast.octets);
+ s->tx_unicast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
+ s->tx_unicast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
+
+ s->rx_multicast_packets =
+ MLX5_GET_CTR(out, received_eth_multicast.packets);
+ s->rx_multicast_bytes =
+ MLX5_GET_CTR(out, received_eth_multicast.octets);
+ s->tx_multicast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
+ s->tx_multicast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
+
+ s->rx_broadcast_packets =
+ MLX5_GET_CTR(out, received_eth_broadcast.packets);
+ s->rx_broadcast_bytes =
+ MLX5_GET_CTR(out, received_eth_broadcast.octets);
+ s->tx_broadcast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
+ s->tx_broadcast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+
+ s->rx_packets =
+ s->rx_unicast_packets +
+ s->rx_multicast_packets +
+ s->rx_broadcast_packets;
+ s->rx_bytes =
+ s->rx_unicast_bytes +
+ s->rx_multicast_bytes +
+ s->rx_broadcast_bytes;
+ s->tx_packets =
+ s->tx_unicast_packets +
+ s->tx_multicast_packets +
+ s->tx_broadcast_packets;
+ s->tx_bytes =
+ s->tx_unicast_bytes +
+ s->tx_multicast_bytes +
+ s->tx_broadcast_bytes;
+
+ /* Update calculated offload counters */
+ s->tx_csum_offload = s->tx_packets - tx_offload_none;
+ s->rx_csum_good = s->rx_packets - s->rx_csum_none;
+
+free_out:
+ kvfree(out);
+}
+
+static void mlx5e_update_stats_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
+ update_stats_work);
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_update_stats(priv);
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(
+ MLX5E_UPDATE_STATS_INTERVAL));
+ }
+ mutex_unlock(&priv->state_lock);
+}
+
+static void __mlx5e_async_event(struct mlx5e_priv *priv,
+ enum mlx5_dev_event event)
+{
+ switch (event) {
+ case MLX5_DEV_EVENT_PORT_UP:
+ case MLX5_DEV_EVENT_PORT_DOWN:
+ schedule_work(&priv->update_carrier_work);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
+ enum mlx5_dev_event event, unsigned long param)
+{
+ struct mlx5e_priv *priv = vpriv;
+
+ spin_lock(&priv->async_events_spinlock);
+ if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+ __mlx5e_async_event(priv, event);
+ spin_unlock(&priv->async_events_spinlock);
+}
+
+static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
+{
+ set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+}
+
+static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
+{
+ spin_lock_irq(&priv->async_events_spinlock);
+ clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ spin_unlock_irq(&priv->async_events_spinlock);
+}
+
+#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+
+static int mlx5e_create_rq(struct mlx5e_channel *c,
+ struct mlx5e_rq_param *param,
+ struct mlx5e_rq *rq)
+{
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ void *rqc = param->rqc;
+ void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ int wq_sz;
+ int err;
+ int i;
+
+ err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+ &rq->wq_ctrl);
+ if (err)
+ return err;
+
+ rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
+
+ wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (!rq->skb) {
+ err = -ENOMEM;
+ goto err_rq_wq_destroy;
+ }
+
+ rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
+ MLX5E_SW2HW_MTU(priv->netdev->mtu);
+ rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
+
+ for (i = 0; i < wq_sz; i++) {
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
+ u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
+
+ wqe->data.lkey = c->mkey_be;
+ wqe->data.byte_count =
+ cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
+ }
+
+ rq->pdev = c->pdev;
+ rq->netdev = c->netdev;
+ rq->channel = c;
+ rq->ix = c->ix;
+
+ return 0;
+
+err_rq_wq_destroy:
+ mlx5_wq_destroy(&rq->wq_ctrl);
+
+ return err;
+}
+
+static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
+{
+ kfree(rq->skb);
+ mlx5_wq_destroy(&rq->wq_ctrl);
+}
+
+static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *rqc;
+ void *wq;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
+ sizeof(u64) * rq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ memcpy(rqc, param->rqc, sizeof(param->rqc));
+
+ MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, flush_in_error_en, 1);
+ MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_array(&rq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *rqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+ MLX5_SET(rqc, rqc, state, next_state);
+
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_disable_rq(struct mlx5e_rq *rq)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mlx5_core_destroy_rq(mdev, rq->rqn);
+}
+
+static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_wq_ll *wq = &rq->wq;
+ int i;
+
+ for (i = 0; i < 1000; i++) {
+ if (wq->cur_sz >= priv->params.min_rx_wqes)
+ return 0;
+
+ msleep(20);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mlx5e_open_rq(struct mlx5e_channel *c,
+ struct mlx5e_rq_param *param,
+ struct mlx5e_rq *rq)
+{
+ int err;
+
+ err = mlx5e_create_rq(c, param, rq);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_rq(rq, param);
+ if (err)
+ goto err_destroy_rq;
+
+ err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err)
+ goto err_disable_rq;
+
+ set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+ mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
+
+ return 0;
+
+err_disable_rq:
+ mlx5e_disable_rq(rq);
+err_destroy_rq:
+ mlx5e_destroy_rq(rq);
+
+ return err;
+}
+
+static void mlx5e_close_rq(struct mlx5e_rq *rq)
+{
+ clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+ napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
+
+ mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+ while (!mlx5_wq_ll_is_empty(&rq->wq))
+ msleep(20);
+
+ /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
+ napi_synchronize(&rq->channel->napi);
+
+ mlx5e_disable_rq(rq);
+ mlx5e_destroy_rq(rq);
+}
+
+static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+{
+ kfree(sq->dma_fifo);
+ kfree(sq->skb);
+}
+
+static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+{
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+ sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
+ sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
+ numa);
+
+ if (!sq->skb || !sq->dma_fifo) {
+ mlx5e_free_sq_db(sq);
+ return -ENOMEM;
+ }
+
+ sq->dma_fifo_mask = df_sz - 1;
+
+ return 0;
+}
+
+static int mlx5e_create_sq(struct mlx5e_channel *c,
+ int tc,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_sq *sq)
+{
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *sqc = param->sqc;
+ void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ int txq_ix;
+ int err;
+
+ err = mlx5_alloc_map_uar(mdev, &sq->uar);
+ if (err)
+ return err;
+
+ err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
+ &sq->wq_ctrl);
+ if (err)
+ goto err_unmap_free_uar;
+
+ sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
+ sq->uar_map = sq->uar.map;
+ sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+ err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
+ if (err)
+ goto err_sq_wq_destroy;
+
+ txq_ix = c->ix + tc * priv->params.num_channels;
+ sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
+
+ sq->pdev = c->pdev;
+ sq->mkey_be = c->mkey_be;
+ sq->channel = c;
+ sq->tc = tc;
+ sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+ priv->txq_to_sq_map[txq_ix] = sq;
+
+ return 0;
+
+err_sq_wq_destroy:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+
+err_unmap_free_uar:
+ mlx5_unmap_free_uar(mdev, &sq->uar);
+
+ return err;
+}
+
+static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+
+ mlx5e_free_sq_db(sq);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+ mlx5_unmap_free_uar(priv->mdev, &sq->uar);
+}
+
+static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *sqc;
+ void *wq;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, param->sqc, sizeof(param->sqc));
+
+ MLX5_SET(sqc, sqc, user_index, sq->tc);
+ MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
+ MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, tis_lst_sz, 1);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, sq->uar.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *sqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+ MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+ MLX5_SET(sqc, sqc, state, next_state);
+
+ err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_disable_sq(struct mlx5e_sq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mlx5_core_destroy_sq(mdev, sq->sqn);
+}
+
+static int mlx5e_open_sq(struct mlx5e_channel *c,
+ int tc,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_sq *sq)
+{
+ int err;
+
+ err = mlx5e_create_sq(c, tc, param, sq);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_sq(sq, param);
+ if (err)
+ goto err_destroy_sq;
+
+ err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
+ if (err)
+ goto err_disable_sq;
+
+ set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+ netdev_tx_reset_queue(sq->txq);
+ netif_tx_start_queue(sq->txq);
+
+ return 0;
+
+err_disable_sq:
+ mlx5e_disable_sq(sq);
+err_destroy_sq:
+ mlx5e_destroy_sq(sq);
+
+ return err;
+}
+
+static inline void netif_tx_disable_queue(struct netdev_queue *txq)
+{
+ __netif_tx_lock_bh(txq);
+ netif_tx_stop_queue(txq);
+ __netif_tx_unlock_bh(txq);
+}
+
+static void mlx5e_close_sq(struct mlx5e_sq *sq)
+{
+ clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+ napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
+ netif_tx_disable_queue(sq->txq);
+
+ /* ensure hw is notified of all pending wqes */
+ if (mlx5e_sq_has_room_for(sq, 1))
+ mlx5e_send_nop(sq, true);
+
+ mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+ while (sq->cc != sq->pc) /* wait till sq is empty */
+ msleep(20);
+
+ /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
+ napi_synchronize(&sq->channel->napi);
+
+ mlx5e_disable_sq(sq);
+ mlx5e_destroy_sq(sq);
+}
+
+static int mlx5e_create_cq(struct mlx5e_channel *c,
+ struct mlx5e_cq_param *param,
+ struct mlx5e_cq *cq)
+{
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ int eqn_not_used;
+ int irqn;
+ int err;
+ u32 i;
+
+ param->wq.numa = cpu_to_node(c->cpu);
+ param->eq_ix = c->ix;
+
+ err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
+ &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+
+ cq->napi = &c->napi;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+ *mcq->set_ci_db = 0;
+ *mcq->arm_db = 0;
+ mcq->vector = param->eq_ix;
+ mcq->comp = mlx5e_completion_event;
+ mcq->event = mlx5e_cq_error_event;
+ mcq->irqn = irqn;
+ mcq->uar = &priv->cq_uar;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+ cqe->op_own = 0xf1;
+ }
+
+ cq->channel = c;
+
+ return 0;
+}
+
+static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
+{
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
+{
+ struct mlx5e_channel *c = cq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+
+ void *in;
+ void *cqc;
+ int inlen;
+ int irqn_not_used;
+ int eqn;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ memcpy(cqc, param->cqc, sizeof(param->cqc));
+
+ mlx5_fill_page_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+
+ MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen);
+
+ kvfree(in);
+
+ if (err)
+ return err;
+
+ mlx5e_cq_arm(cq);
+
+ return 0;
+}
+
+static void mlx5e_disable_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_channel *c = cq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mlx5_core_destroy_cq(mdev, &cq->mcq);
+}
+
+static int mlx5e_open_cq(struct mlx5e_channel *c,
+ struct mlx5e_cq_param *param,
+ struct mlx5e_cq *cq,
+ u16 moderation_usecs,
+ u16 moderation_frames)
+{
+ int err;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ err = mlx5e_create_cq(c, param, cq);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_cq(cq, param);
+ if (err)
+ goto err_destroy_cq;
+
+ err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
+ moderation_usecs,
+ moderation_frames);
+ if (err)
+ goto err_destroy_cq;
+
+ return 0;
+
+err_destroy_cq:
+ mlx5e_destroy_cq(cq);
+
+ return err;
+}
+
+static void mlx5e_close_cq(struct mlx5e_cq *cq)
+{
+ mlx5e_disable_cq(cq);
+ mlx5e_destroy_cq(cq);
+}
+
+static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
+{
+ return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+}
+
+static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
+ struct mlx5e_channel_param *cparam)
+{
+ struct mlx5e_priv *priv = c->priv;
+ int err;
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
+ priv->params.tx_cq_moderation_usec,
+ priv->params.tx_cq_moderation_pkts);
+ if (err)
+ goto err_close_tx_cqs;
+ }
+
+ return 0;
+
+err_close_tx_cqs:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_cq(&c->sq[tc].cq);
+
+ return err;
+}
+
+static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_close_cq(&c->sq[tc].cq);
+}
+
+static int mlx5e_open_sqs(struct mlx5e_channel *c,
+ struct mlx5e_channel_param *cparam)
+{
+ int err;
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
+ if (err)
+ goto err_close_sqs;
+ }
+
+ return 0;
+
+err_close_sqs:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_sq(&c->sq[tc]);
+
+ return err;
+}
+
+static void mlx5e_close_sqs(struct mlx5e_channel *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_close_sq(&c->sq[tc]);
+}
+
+static void mlx5e_build_tc_to_txq_map(struct mlx5e_channel *c,
+ int num_channels)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
+ c->tc_to_txq_map[i] = c->ix + i * num_channels;
+}
+
+static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+ struct mlx5e_channel_param *cparam,
+ struct mlx5e_channel **cp)
+{
+ struct net_device *netdev = priv->netdev;
+ int cpu = mlx5e_get_cpu(priv, ix);
+ struct mlx5e_channel *c;
+ int err;
+
+ c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
+ if (!c)
+ return -ENOMEM;
+
+ c->priv = priv;
+ c->ix = ix;
+ c->cpu = cpu;
+ c->pdev = &priv->mdev->pdev->dev;
+ c->netdev = priv->netdev;
+ c->mkey_be = cpu_to_be32(priv->mr.key);
+ c->num_tc = priv->num_tc;
+
+ mlx5e_build_tc_to_txq_map(c, priv->params.num_channels);
+
+ netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
+
+ err = mlx5e_open_tx_cqs(c, cparam);
+ if (err)
+ goto err_napi_del;
+
+ err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
+ priv->params.rx_cq_moderation_usec,
+ priv->params.rx_cq_moderation_pkts);
+ if (err)
+ goto err_close_tx_cqs;
+
+ napi_enable(&c->napi);
+
+ err = mlx5e_open_sqs(c, cparam);
+ if (err)
+ goto err_disable_napi;
+
+ err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
+ if (err)
+ goto err_close_sqs;
+
+ netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
+ *cp = c;
+
+ return 0;
+
+err_close_sqs:
+ mlx5e_close_sqs(c);
+
+err_disable_napi:
+ napi_disable(&c->napi);
+ mlx5e_close_cq(&c->rq.cq);
+
+err_close_tx_cqs:
+ mlx5e_close_tx_cqs(c);
+
+err_napi_del:
+ netif_napi_del(&c->napi);
+ kfree(c);
+
+ return err;
+}
+
+static void mlx5e_close_channel(struct mlx5e_channel *c)
+{
+ mlx5e_close_rq(&c->rq);
+ mlx5e_close_sqs(c);
+ napi_disable(&c->napi);
+ mlx5e_close_cq(&c->rq.cq);
+ mlx5e_close_tx_cqs(c);
+ netif_napi_del(&c->napi);
+ kfree(c);
+}
+
+static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
+ struct mlx5e_rq_param *param)
+{
+ void *rqc = param->rqc;
+ void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
+ MLX5_SET(wq, wq, pd, priv->pdn);
+
+ param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.linear = 1;
+}
+
+static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, priv->pdn);
+
+ param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+}
+
+static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
+}
+
+static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
+
+ mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
+
+ mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
+ struct mlx5e_channel_param *cparam)
+{
+ memset(cparam, 0, sizeof(*cparam));
+
+ mlx5e_build_rq_param(priv, &cparam->rq);
+ mlx5e_build_sq_param(priv, &cparam->sq);
+ mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
+ mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
+}
+
+static int mlx5e_open_channels(struct mlx5e_priv *priv)
+{
+ struct mlx5e_channel_param cparam;
+ int err = -ENOMEM;
+ int i;
+ int j;
+
+ priv->channel = kcalloc(priv->params.num_channels,
+ sizeof(struct mlx5e_channel *), GFP_KERNEL);
+
+ priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc,
+ sizeof(struct mlx5e_sq *), GFP_KERNEL);
+
+ if (!priv->channel || !priv->txq_to_sq_map)
+ goto err_free_txq_to_sq_map;
+
+ mlx5e_build_channel_param(priv, &cparam);
+ for (i = 0; i < priv->params.num_channels; i++) {
+ err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
+ if (err)
+ goto err_close_channels;
+ }
+
+ for (j = 0; j < priv->params.num_channels; j++) {
+ err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
+ if (err)
+ goto err_close_channels;
+ }
+
+ return 0;
+
+err_close_channels:
+ for (i--; i >= 0; i--)
+ mlx5e_close_channel(priv->channel[i]);
+
+err_free_txq_to_sq_map:
+ kfree(priv->txq_to_sq_map);
+ kfree(priv->channel);
+
+ return err;
+}
+
+static void mlx5e_close_channels(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ mlx5e_close_channel(priv->channel[i]);
+
+ kfree(priv->txq_to_sq_map);
+ kfree(priv->channel);
+}
+
+static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+ void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(tisc, tisc, prio, tc);
+ MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+
+ return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+}
+
+static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
+{
+ mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+}
+
+static int mlx5e_open_tises(struct mlx5e_priv *priv)
+{
+ int num_tc = priv->num_tc;
+ int err;
+ int tc;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ err = mlx5e_open_tis(priv, tc);
+ if (err)
+ goto err_close_tises;
+ }
+
+ return 0;
+
+err_close_tises:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_tis(priv, tc);
+
+ return err;
+}
+
+static void mlx5e_close_tises(struct mlx5e_priv *priv)
+{
+ int num_tc = priv->num_tc;
+ int tc;
+
+ for (tc = 0; tc < num_tc; tc++)
+ mlx5e_close_tis(priv, tc);
+}
+
+static int mlx5e_open_rqt(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+ void *rqtc;
+ int inlen;
+ int err;
+ int sz;
+ int i;
+
+ sz = 1 << priv->params.rx_hash_log_tbl_sz;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+ for (i = 0; i < sz; i++) {
+ int ix = i % priv->params.num_channels;
+
+ MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
+ }
+
+ MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
+ if (!err)
+ priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_close_rqt(struct mlx5e_priv *priv)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
+
+ mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+{
+ void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+ MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
+#define ROUGH_MAX_L2_L3_HDR_SZ 256
+
+#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_L4_SPORT |\
+ MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+ if (priv->params.lro_en) {
+ MLX5_SET(tirc, tirc, lro_enable_mask,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+ MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+ (priv->params.lro_wqe_sz -
+ ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+ MLX5_CAP_ETH(priv->mdev,
+ lro_timer_supported_periods[3]));
+ }
+
+ switch (tt) {
+ case MLX5E_TT_ANY:
+ MLX5_SET(tirc, tirc, disp_type,
+ MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, inline_rqn,
+ priv->channel[0]->rq.rqn);
+ break;
+ default:
+ MLX5_SET(tirc, tirc, disp_type,
+ MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table,
+ priv->rqtn);
+ MLX5_SET(tirc, tirc, rx_hash_fn,
+ MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
+ rx_hash_toeplitz_key),
+ MLX5_FLD_SZ_BYTES(tirc,
+ rx_hash_toeplitz_key));
+ break;
+ }
+
+ switch (tt) {
+ case MLX5E_TT_IPV4_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV6_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV4_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV6_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV4:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+
+ case MLX5E_TT_IPV6:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+ }
+}
+
+static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ void *tirc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ mlx5e_build_tir_ctx(priv, tirc, tt);
+
+ err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
+{
+ mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+}
+
+static int mlx5e_open_tirs(struct mlx5e_priv *priv)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++) {
+ err = mlx5e_open_tir(priv, i);
+ if (err)
+ goto err_close_tirs;
+ }
+
+ return 0;
+
+err_close_tirs:
+ for (i--; i >= 0; i--)
+ mlx5e_close_tir(priv, i);
+
+ return err;
+}
+
+static void mlx5e_close_tirs(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++)
+ mlx5e_close_tir(priv, i);
+}
+
+static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int hw_mtu;
+ int err;
+
+ err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
+ if (err)
+ return err;
+
+ mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+
+ if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
+ netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
+ __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
+
+ netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
+ return 0;
+}
+
+int mlx5e_open_locked(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int num_txqs;
+ int err;
+
+ num_txqs = priv->params.num_channels * priv->params.num_tc;
+ netif_set_real_num_tx_queues(netdev, num_txqs);
+ netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
+
+ err = mlx5e_set_dev_port_mtu(netdev);
+ if (err)
+ return err;
+
+ err = mlx5e_open_tises(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
+ __func__, err);
+ return err;
+ }
+
+ err = mlx5e_open_channels(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
+ __func__, err);
+ goto err_close_tises;
+ }
+
+ err = mlx5e_open_rqt(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n",
+ __func__, err);
+ goto err_close_channels;
+ }
+
+ err = mlx5e_open_tirs(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
+ __func__, err);
+ goto err_close_rqls;
+ }
+
+ err = mlx5e_open_flow_table(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
+ __func__, err);
+ goto err_close_tirs;
+ }
+
+ err = mlx5e_add_all_vlan_rules(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
+ __func__, err);
+ goto err_close_flow_table;
+ }
+
+ mlx5e_init_eth_addr(priv);
+
+ set_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ mlx5e_update_carrier(priv);
+ mlx5e_set_rx_mode_core(priv);
+
+ schedule_delayed_work(&priv->update_stats_work, 0);
+ return 0;
+
+err_close_flow_table:
+ mlx5e_close_flow_table(priv);
+
+err_close_tirs:
+ mlx5e_close_tirs(priv);
+
+err_close_rqls:
+ mlx5e_close_rqt(priv);
+
+err_close_channels:
+ mlx5e_close_channels(priv);
+
+err_close_tises:
+ mlx5e_close_tises(priv);
+
+ return err;
+}
+
+static int mlx5e_open(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_open_locked(netdev);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+int mlx5e_close_locked(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ clear_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ mlx5e_set_rx_mode_core(priv);
+ mlx5e_del_all_vlan_rules(priv);
+ netif_carrier_off(priv->netdev);
+ mlx5e_close_flow_table(priv);
+ mlx5e_close_tirs(priv);
+ mlx5e_close_rqt(priv);
+ mlx5e_close_channels(priv);
+ mlx5e_close_tises(priv);
+
+ return 0;
+}
+
+static int mlx5e_close(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_close_locked(netdev);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+int mlx5e_update_priv_params(struct mlx5e_priv *priv,
+ struct mlx5e_params *new_params)
+{
+ int err = 0;
+ int was_opened;
+
+ WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(priv->netdev);
+
+ priv->params = *new_params;
+
+ if (was_opened)
+ err = mlx5e_open_locked(priv->netdev);
+
+ return err;
+}
+
+static struct rtnl_link_stats64 *
+mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_vport_stats *vstats = &priv->stats.vport;
+
+ stats->rx_packets = vstats->rx_packets;
+ stats->rx_bytes = vstats->rx_bytes;
+ stats->tx_packets = vstats->tx_packets;
+ stats->tx_bytes = vstats->tx_bytes;
+ stats->multicast = vstats->rx_multicast_packets +
+ vstats->tx_multicast_packets;
+ stats->tx_errors = vstats->tx_error_packets;
+ stats->rx_errors = vstats->rx_error_packets;
+ stats->tx_dropped = vstats->tx_queue_dropped;
+ stats->rx_crc_errors = 0;
+ stats->rx_length_errors = 0;
+
+ return stats;
+}
+
+static void mlx5e_set_rx_mode(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ schedule_work(&priv->set_rx_mode_work);
+}
+
+static int mlx5e_set_mac(struct net_device *netdev, void *addr)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ netif_addr_lock_bh(netdev);
+ ether_addr_copy(netdev->dev_addr, saddr->sa_data);
+ netif_addr_unlock_bh(netdev);
+
+ schedule_work(&priv->set_rx_mode_work);
+
+ return 0;
+}
+
+static int mlx5e_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ netdev_features_t changes = features ^ netdev->features;
+ struct mlx5e_params new_params;
+ bool update_params = false;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->params;
+
+ if (changes & NETIF_F_LRO) {
+ new_params.lro_en = !!(features & NETIF_F_LRO);
+ update_params = true;
+ }
+
+ if (update_params)
+ mlx5e_update_priv_params(priv, &new_params);
+
+ if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ mlx5e_enable_vlan_filter(priv);
+ else
+ mlx5e_disable_vlan_filter(priv);
+ }
+
+ mutex_unlock(&priv->state_lock);
+
+ return 0;
+}
+
+static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int max_mtu;
+ int err;
+
+ mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+
+ if (new_mtu > max_mtu) {
+ netdev_err(netdev,
+ "%s: Bad MTU (%d) > (%d) Max\n",
+ __func__, new_mtu, max_mtu);
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv->state_lock);
+ netdev->mtu = new_mtu;
+ err = mlx5e_update_priv_params(priv, &priv->params);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static struct net_device_ops mlx5e_netdev_ops = {
+ .ndo_open = mlx5e_open,
+ .ndo_stop = mlx5e_close,
+ .ndo_start_xmit = mlx5e_xmit,
+ .ndo_get_stats64 = mlx5e_get_stats,
+ .ndo_set_rx_mode = mlx5e_set_rx_mode,
+ .ndo_set_mac_address = mlx5e_set_mac,
+ .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
+ .ndo_set_features = mlx5e_set_features,
+ .ndo_change_mtu = mlx5e_change_mtu,
+};
+
+static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
+{
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return -ENOTSUPP;
+ if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
+ !MLX5_CAP_GEN(mdev, nic_flow_table) ||
+ !MLX5_CAP_ETH(mdev, csum_cap) ||
+ !MLX5_CAP_ETH(mdev, max_lso_cap) ||
+ !MLX5_CAP_ETH(mdev, vlan_cap) ||
+ !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
+ MLX5_CAP_FLOWTABLE(mdev,
+ flow_table_properties_nic_receive.max_ft_level)
+ < 3) {
+ mlx5_core_warn(mdev,
+ "Not creating net device, some required device capabilities are missing\n");
+ return -ENOTSUPP;
+ }
+ return 0;
+}
+
+static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ int num_comp_vectors)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ priv->params.log_sq_size =
+ MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+ priv->params.log_rq_size =
+ MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ priv->params.rx_cq_moderation_usec =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+ priv->params.rx_cq_moderation_pkts =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
+ priv->params.tx_cq_moderation_usec =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+ priv->params.tx_cq_moderation_pkts =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ priv->params.min_rx_wqes =
+ MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
+ priv->params.rx_hash_log_tbl_sz =
+ (order_base_2(num_comp_vectors) >
+ MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
+ order_base_2(num_comp_vectors) :
+ MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
+ priv->params.num_tc = 1;
+ priv->params.default_vlan_prio = 0;
+
+ priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
+ priv->params.lro_wqe_sz =
+ MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->params.num_channels = num_comp_vectors;
+ priv->num_tc = priv->params.num_tc;
+ priv->default_vlan_prio = priv->params.default_vlan_prio;
+
+ spin_lock_init(&priv->async_events_spinlock);
+ mutex_init(&priv->state_lock);
+
+ INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
+ INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+ INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+}
+
+static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5_query_nic_vport_mac_address(priv->mdev, netdev->dev_addr);
+}
+
+static void mlx5e_build_netdev(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
+
+ if (priv->num_tc > 1) {
+ mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
+ }
+
+ netdev->netdev_ops = &mlx5e_netdev_ops;
+ netdev->watchdog_timeo = 15 * HZ;
+
+ netdev->ethtool_ops = &mlx5e_ethtool_ops;
+
+ netdev->vlan_features |= NETIF_F_SG;
+ netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_IPV6_CSUM;
+ netdev->vlan_features |= NETIF_F_GRO;
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_RXCSUM;
+ netdev->vlan_features |= NETIF_F_RXHASH;
+
+ if (!!MLX5_CAP_ETH(mdev, lro_cap))
+ netdev->vlan_features |= NETIF_F_LRO;
+
+ netdev->hw_features = netdev->vlan_features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->features = netdev->hw_features;
+ if (!priv->params.lro_en)
+ netdev->features &= ~NETIF_F_LRO;
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ mlx5e_set_netdev_dev_addr(netdev);
+}
+
+static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
+ struct mlx5_core_mr *mr)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_create_mkey_mbox_in *in;
+ int err;
+
+ in = mlx5_vzalloc(sizeof(*in));
+ if (!in)
+ return -ENOMEM;
+
+ in->seg.flags = MLX5_PERM_LOCAL_WRITE |
+ MLX5_PERM_LOCAL_READ |
+ MLX5_ACCESS_MODE_PA;
+ in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
+ in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+
+ err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
+ NULL);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
+{
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int ncv = mdev->priv.eq_table.num_comp_vectors;
+ int err;
+
+ if (mlx5e_check_required_hca_cap(mdev))
+ return NULL;
+
+ netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv);
+ if (!netdev) {
+ mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
+ return NULL;
+ }
+
+ mlx5e_build_netdev_priv(mdev, netdev, ncv);
+ mlx5e_build_netdev(netdev);
+
+ netif_carrier_off(netdev);
+
+ priv = netdev_priv(netdev);
+
+ err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n",
+ __func__, err);
+ goto err_free_netdev;
+ }
+
+ err = mlx5_core_alloc_pd(mdev, &priv->pdn);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n",
+ __func__, err);
+ goto err_unmap_free_uar;
+ }
+
+ err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n",
+ __func__, err);
+ goto err_dealloc_pd;
+ }
+
+ err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
+ __func__, err);
+ goto err_dealloc_transport_domain;
+ }
+
+ err = register_netdev(netdev);
+ if (err) {
+ netdev_err(netdev, "%s: register_netdev failed, %d\n",
+ __func__, err);
+ goto err_destroy_mkey;
+ }
+
+ mlx5e_enable_async_events(priv);
+
+ return priv;
+
+err_destroy_mkey:
+ mlx5_core_destroy_mkey(mdev, &priv->mr);
+
+err_dealloc_transport_domain:
+ mlx5_dealloc_transport_domain(mdev, priv->tdn);
+
+err_dealloc_pd:
+ mlx5_core_dealloc_pd(mdev, priv->pdn);
+
+err_unmap_free_uar:
+ mlx5_unmap_free_uar(mdev, &priv->cq_uar);
+
+err_free_netdev:
+ free_netdev(netdev);
+
+ return NULL;
+}
+
+static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+ struct net_device *netdev = priv->netdev;
+
+ unregister_netdev(netdev);
+ mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
+ mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
+ mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
+ mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
+ mlx5e_disable_async_events(priv);
+ flush_scheduled_work();
+ free_netdev(netdev);
+}
+
+static void *mlx5e_get_netdev(void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+
+ return priv->netdev;
+}
+
+static struct mlx5_interface mlx5e_interface = {
+ .add = mlx5e_create_netdev,
+ .remove = mlx5e_destroy_netdev,
+ .event = mlx5e_async_event,
+ .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
+ .get_dev = mlx5e_get_netdev,
+};
+
+void mlx5e_init(void)
+{
+ mlx5_register_interface(&mlx5e_interface);
+}
+
+void mlx5e_cleanup(void)
+{
+ mlx5_unregister_interface(&mlx5e_interface);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
new file mode 100644
index 000000000..9a9374131
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include "en.h"
+
+static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
+ struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(rq->pdev,
+ /* hw start padding */
+ skb->data,
+ /* hw end padding */
+ rq->wqe_sz,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
+ goto err_free_skb;
+
+ skb_reserve(skb, MLX5E_NET_IP_ALIGN);
+
+ *((dma_addr_t *)skb->cb) = dma_addr;
+ wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
+
+ rq->skb[ix] = skb;
+
+ return 0;
+
+err_free_skb:
+ dev_kfree_skb(skb);
+
+ return -ENOMEM;
+}
+
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state)))
+ return false;
+
+ while (!mlx5_wq_ll_is_full(wq)) {
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+
+ if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head)))
+ break;
+
+ mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+ }
+
+ /* ensure wqes are visible to device before updating doorbell record */
+ dma_wmb();
+
+ mlx5_wq_ll_update_db_record(wq);
+
+ return !mlx5_wq_ll_is_full(wq);
+}
+
+static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN);
+ struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+ struct tcphdr *tcp;
+
+ u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
+ int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
+ (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
+
+ u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
+
+ if (eth->h_proto == htons(ETH_P_IP)) {
+ tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+ sizeof(struct iphdr));
+ ipv6 = NULL;
+ } else {
+ tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+ sizeof(struct ipv6hdr));
+ ipv4 = NULL;
+ }
+
+ if (get_cqe_lro_tcppsh(cqe))
+ tcp->psh = 1;
+
+ if (tcp_ack) {
+ tcp->ack = 1;
+ tcp->ack_seq = cqe->lro_ack_seq_num;
+ tcp->window = cqe->lro_tcp_win;
+ }
+
+ if (ipv4) {
+ ipv4->ttl = cqe->lro_min_ttl;
+ ipv4->tot_len = cpu_to_be16(tot_len);
+ ipv4->check = 0;
+ ipv4->check = ip_fast_csum((unsigned char *)ipv4,
+ ipv4->ihl);
+ } else {
+ ipv6->hop_limit = cqe->lro_min_ttl;
+ ipv6->payload_len = cpu_to_be16(tot_len -
+ sizeof(struct ipv6hdr));
+ }
+}
+
+static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
+ struct sk_buff *skb)
+{
+ u8 cht = cqe->rss_hash_type;
+ int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
+ (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
+ PKT_HASH_TYPE_NONE;
+ skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
+}
+
+static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+ struct mlx5e_rq *rq,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rq->netdev;
+ u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ int lro_num_seg;
+
+ skb_put(skb, cqe_bcnt);
+
+ lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
+ if (lro_num_seg > 1) {
+ mlx5e_lro_update_hdr(skb, cqe);
+ skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ rq->stats.lro_packets++;
+ rq->stats.lro_bytes += cqe_bcnt;
+ }
+
+ if (likely(netdev->features & NETIF_F_RXCSUM) &&
+ (cqe->hds_ip_ext & CQE_L2_OK) &&
+ (cqe->hds_ip_ext & CQE_L3_OK) &&
+ (cqe->hds_ip_ext & CQE_L4_OK)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ rq->stats.csum_none++;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ skb_record_rx_queue(skb, rq->ix);
+
+ if (likely(netdev->features & NETIF_F_RXHASH))
+ mlx5e_skb_set_hash(cqe, skb);
+
+ if (cqe_has_vlan(cqe))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ be16_to_cpu(cqe->vlan_info));
+}
+
+bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+{
+ struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
+ int i;
+
+ /* avoid accessing cq (dma coherent memory) if not needed */
+ if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
+ return false;
+
+ for (i = 0; i < budget; i++) {
+ struct mlx5e_rx_wqe *wqe;
+ struct mlx5_cqe64 *cqe;
+ struct sk_buff *skb;
+ __be16 wqe_counter_be;
+ u16 wqe_counter;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (!cqe)
+ break;
+
+ mlx5_cqwq_pop(&cq->wq);
+
+ wqe_counter_be = cqe->wqe_counter;
+ wqe_counter = be16_to_cpu(wqe_counter_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ skb = rq->skb[wqe_counter];
+ prefetch(skb->data);
+ rq->skb[wqe_counter] = NULL;
+
+ dma_unmap_single(rq->pdev,
+ *((dma_addr_t *)skb->cb),
+ rq->wqe_sz,
+ DMA_FROM_DEVICE);
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ dev_kfree_skb(skb);
+ goto wq_ll_pop;
+ }
+
+ mlx5e_build_rx_skb(cqe, rq, skb);
+ rq->stats.packets++;
+ napi_gro_receive(cq->napi, skb);
+
+wq_ll_pop:
+ mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+ &wqe->next.next_wqe_index);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ if (i == budget) {
+ set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
new file mode 100644
index 000000000..03f28f438
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include "en.h"
+
+#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
+#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
+ MLX5E_SQ_NOPS_ROOM)
+
+void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+
+ memset(cseg, 0, sizeof(*cseg));
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
+
+ sq->skb[pi] = NULL;
+ sq->pc++;
+
+ if (notify_hw) {
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ mlx5e_tx_notify_hw(sq, wqe);
+ }
+}
+
+static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr,
+ u32 *size)
+{
+ sq->dma_fifo_pc--;
+ *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr;
+ *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size;
+}
+
+static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+ dma_addr_t addr;
+ u32 size;
+ int i;
+
+ for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
+ mlx5e_dma_pop_last_pushed(sq, &addr, &size);
+ dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
+ }
+}
+
+static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr,
+ u32 size)
+{
+ sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
+ sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
+ sq->dma_fifo_pc++;
+}
+
+static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr,
+ u32 *size)
+{
+ *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr;
+ *size = sq->dma_fifo[i & sq->dma_fifo_mask].size;
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int channel_ix = fallback(dev, skb);
+ int up = skb_vlan_tag_present(skb) ?
+ skb->vlan_tci >> VLAN_PRIO_SHIFT :
+ priv->default_vlan_prio;
+ int tc = netdev_get_prio_tc_map(dev, up);
+
+ return priv->channel[channel_ix]->tc_to_txq_map[tc];
+}
+
+static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
+ struct sk_buff *skb)
+{
+#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+ return MLX5E_MIN_INLINE;
+}
+
+static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg;
+
+ u8 opcode = MLX5_OPCODE_SEND;
+ dma_addr_t dma_addr = 0;
+ u16 headlen;
+ u16 ds_cnt;
+ u16 ihs;
+ int i;
+
+ memset(wqe, 0, sizeof(*wqe));
+
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+ else
+ sq->stats.csum_offload_none++;
+
+ if (skb_is_gso(skb)) {
+ u32 payload_len;
+
+ eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ opcode = MLX5_OPCODE_LSO;
+ ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ payload_len = skb->len - ihs;
+ MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len +
+ (skb_shinfo(skb)->gso_segs - 1) * ihs;
+ sq->stats.tso_packets++;
+ sq->stats.tso_bytes += payload_len;
+ } else {
+ ihs = mlx5e_get_inline_hdr_size(sq, skb);
+ MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
+ ETH_ZLEN);
+ }
+
+ skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
+ skb_pull_inline(skb, ihs);
+
+ eseg->inline_hdr_sz = cpu_to_be16(ihs);
+
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+ ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
+ MLX5_SEND_WQE_DS);
+ dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
+
+ MLX5E_TX_SKB_CB(skb)->num_dma = 0;
+
+ headlen = skb_headlen(skb);
+ if (headlen) {
+ dma_addr = dma_map_single(sq->pdev, skb->data, headlen,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+ goto dma_unmap_wqe_err;
+
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->lkey = sq->mkey_be;
+ dseg->byte_count = cpu_to_be32(headlen);
+
+ mlx5e_dma_push(sq, dma_addr, headlen);
+ MLX5E_TX_SKB_CB(skb)->num_dma++;
+
+ dseg++;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ int fsz = skb_frag_size(frag);
+
+ dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+ goto dma_unmap_wqe_err;
+
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->lkey = sq->mkey_be;
+ dseg->byte_count = cpu_to_be32(fsz);
+
+ mlx5e_dma_push(sq, dma_addr, fsz);
+ MLX5E_TX_SKB_CB(skb)->num_dma++;
+
+ dseg++;
+ }
+
+ ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+
+ sq->skb[pi] = skb;
+
+ MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt,
+ MLX5_SEND_WQEBB_NUM_DS);
+ sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
+
+ netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes);
+
+ if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
+ netif_tx_stop_queue(sq->txq);
+ sq->stats.stopped++;
+ }
+
+ if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ mlx5e_tx_notify_hw(sq, wqe);
+ }
+
+ /* fill sq edge with nops to avoid wqe wrap around */
+ while ((sq->pc & wq->sz_m1) > sq->edge)
+ mlx5e_send_nop(sq, false);
+
+ sq->stats.packets++;
+ return NETDEV_TX_OK;
+
+dma_unmap_wqe_err:
+ sq->stats.dropped++;
+ mlx5e_dma_unmap_wqe_err(sq, skb);
+
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)];
+
+ return mlx5e_sq_xmit(sq, skb);
+}
+
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_sq *sq;
+ u32 dma_fifo_cc;
+ u32 nbytes;
+ u16 npkts;
+ u16 sqcc;
+ int i;
+
+ /* avoid accessing cq (dma coherent memory) if not needed */
+ if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
+ return false;
+
+ sq = container_of(cq, struct mlx5e_sq, cq);
+
+ npkts = 0;
+ nbytes = 0;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ /* avoid dirtying sq cache line every cqe */
+ dma_fifo_cc = sq->dma_fifo_cc;
+
+ for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+ struct mlx5_cqe64 *cqe;
+ u16 wqe_counter;
+ bool last_wqe;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (!cqe)
+ break;
+
+ mlx5_cqwq_pop(&cq->wq);
+
+ wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+ do {
+ struct sk_buff *skb;
+ u16 ci;
+ int j;
+
+ last_wqe = (sqcc == wqe_counter);
+
+ ci = sqcc & sq->wq.sz_m1;
+ skb = sq->skb[ci];
+
+ if (unlikely(!skb)) { /* nop */
+ sq->stats.nop++;
+ sqcc++;
+ continue;
+ }
+
+ for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
+ dma_addr_t addr;
+ u32 size;
+
+ mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size);
+ dma_fifo_cc++;
+ dma_unmap_single(sq->pdev, addr, size,
+ DMA_TO_DEVICE);
+ }
+
+ npkts++;
+ nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes;
+ sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
+ dev_kfree_skb(skb);
+ } while (!last_wqe);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->dma_fifo_cc = dma_fifo_cc;
+ sq->cc = sqcc;
+
+ netdev_tx_completed_queue(sq->txq, npkts, nbytes);
+
+ if (netif_tx_queue_stopped(sq->txq) &&
+ mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
+ likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
+ netif_tx_wake_queue(sq->txq);
+ sq->stats.wake++;
+ }
+ if (i == MLX5E_TX_CQ_POLL_BUDGET) {
+ set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
new file mode 100644
index 000000000..2c7cb6755
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
+{
+ struct mlx5_cqwq *wq = &cq->wq;
+ u32 ci = mlx5_cqwq_get_ci(wq);
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
+ int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
+ int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
+
+ if (cqe_ownership_bit != sw_ownership_val)
+ return NULL;
+
+ /* ensure cqe content is read after cqe ownership bit */
+ rmb();
+
+ return cqe;
+}
+
+int mlx5e_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
+ napi);
+ bool busy = false;
+ int i;
+
+ clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
+
+ for (i = 0; i < c->num_tc; i++)
+ busy |= mlx5e_poll_tx_cq(&c->sq[i].cq);
+
+ busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget);
+
+ busy |= mlx5e_post_rx_wqes(&c->rq);
+
+ if (busy)
+ return budget;
+
+ napi_complete(napi);
+
+ /* avoid losing completion event during/after polling cqs */
+ if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) {
+ napi_schedule(napi);
+ return 0;
+ }
+
+ for (i = 0; i < c->num_tc; i++)
+ mlx5e_cq_arm(&c->sq[i].cq);
+ mlx5e_cq_arm(&c->rq.cq);
+
+ return 0;
+}
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq)
+{
+ struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+
+ set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+ set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
+ barrier();
+ napi_schedule(cq->napi);
+}
+
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
+{
+ struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+ struct mlx5e_channel *c = cq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct net_device *netdev = priv->netdev;
+
+ netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
+ __func__, mcq->cqn, event);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 58800e4f3..a40b96d4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -339,15 +339,14 @@ static void init_eq_buf(struct mlx5_eq *eq)
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, struct mlx5_uar *uar)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_priv *priv = &dev->priv;
struct mlx5_create_eq_mbox_in *in;
struct mlx5_create_eq_mbox_out out;
int err;
int inlen;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
- err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
- &eq->buf);
+ err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
if (err)
return err;
@@ -378,14 +377,15 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
goto err_in;
}
- snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
+ snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
name, pci_name(dev->pdev));
+
eq->eqn = out.eq_number;
eq->irqn = vecidx;
eq->dev = dev;
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
- err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
- eq->name, eq);
+ err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
+ priv->irq_info[vecidx].name, eq);
if (err)
goto err_eq;
@@ -401,7 +401,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
return 0;
err_irq:
- free_irq(table->msix_arr[vecidx].vector, eq);
+ free_irq(priv->msix_arr[vecidx].vector, eq);
err_eq:
mlx5_cmd_destroy_eq(dev, eq->eqn);
@@ -417,16 +417,15 @@ EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
int err;
mlx5_debug_eq_remove(dev, eq);
- free_irq(table->msix_arr[eq->irqn].vector, eq);
+ free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
err = mlx5_cmd_destroy_eq(dev, eq->eqn);
if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
- synchronize_irq(table->msix_arr[eq->irqn].vector);
+ synchronize_irq(dev->priv.msix_arr[eq->irqn].vector);
mlx5_buf_free(dev, &eq->buf);
return err;
@@ -456,7 +455,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
int err;
- if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
+ if (MLX5_CAP_GEN(dev, pg))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
@@ -479,7 +478,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
err = mlx5_create_map_eq(dev, &table->pages_eq,
MLX5_EQ_VEC_PAGES,
- dev->caps.gen.max_vf + 1,
+ /* TODO: sriov max_vf + */ 1,
1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
&dev->priv.uuari.uars[0]);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
new file mode 100644
index 000000000..ca90b9bc3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/flow_table.h>
+#include "mlx5_core.h"
+
+struct mlx5_ftg {
+ struct mlx5_flow_table_group g;
+ u32 id;
+ u32 start_ix;
+};
+
+struct mlx5_flow_table {
+ struct mlx5_core_dev *dev;
+ u8 level;
+ u8 type;
+ u32 id;
+ struct mutex mutex; /* sync bitmap alloc */
+ u16 num_groups;
+ struct mlx5_ftg *group;
+ unsigned long *bitmap;
+ u32 size;
+};
+
+static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix,
+ u32 flow_index, void *flow_context)
+{
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)];
+ u32 *in;
+ void *in_flow_context;
+ int fcdls =
+ MLX5_GET(flow_context, flow_context, destination_list_size) *
+ MLX5_ST_SZ_BYTES(dest_format_struct);
+ int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(set_fte_in, in, table_type, ft->type);
+ MLX5_SET(set_fte_in, in, table_id, ft->id);
+ MLX5_SET(set_fte_in, in, flow_index, flow_index);
+ MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+
+ in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+ memcpy(in_flow_context, flow_context,
+ MLX5_ST_SZ_BYTES(flow_context) + fcdls);
+
+ MLX5_SET(flow_context, in_flow_context, group_id,
+ ft->group[group_ix].id);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+ sizeof(out));
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
+ u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v)
+ MLX5_SET_DFTEI(in, table_type, ft->type);
+ MLX5_SET_DFTEI(in, table_id, ft->id);
+ MLX5_SET_DFTEI(in, flow_index, flow_index);
+ MLX5_SET_DFTEI(in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+
+ mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v)
+ MLX5_SET_DFGI(in, table_type, ft->type);
+ MLX5_SET_DFGI(in, table_id, ft->id);
+ MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ MLX5_SET_DFGI(in, group_id, ft->group[i].id);
+ mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
+ u32 *in;
+ void *in_match_criteria;
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_group *g = &ft->group[i].g;
+ u32 start_ix = ft->group[i].start_ix;
+ u32 end_ix = start_ix + (1 << g->log_sz) - 1;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+ in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in,
+ match_criteria);
+
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v)
+ MLX5_SET_CFGI(in, table_type, ft->type);
+ MLX5_SET_CFGI(in, table_id, ft->id);
+ MLX5_SET_CFGI(in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
+ MLX5_SET_CFGI(in, start_flow_index, start_ix);
+ MLX5_SET_CFGI(in, end_flow_index, end_ix);
+ MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable);
+
+ memcpy(in_match_criteria, g->match_criteria,
+ MLX5_ST_SZ_BYTES(fte_match_param));
+
+ err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+ sizeof(out));
+ if (!err)
+ ft->group[i].id = MLX5_GET(create_flow_group_out, out,
+ group_id);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft)
+{
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++)
+ mlx5_destroy_flow_group_cmd(ft, i);
+}
+
+static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++) {
+ err = mlx5_create_flow_group_cmd(ft, i);
+ if (err)
+ goto err_destroy_flow_table_groups;
+ }
+
+ return 0;
+
+err_destroy_flow_table_groups:
+ for (i--; i >= 0; i--)
+ mlx5_destroy_flow_group_cmd(ft, i);
+
+ return err;
+}
+
+static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(create_flow_table_in, in, table_type, ft->type);
+ MLX5_SET(create_flow_table_in, in, level, ft->level);
+ MLX5_SET(create_flow_table_in, in, log_size, order_base_2(ft->size));
+
+ MLX5_SET(create_flow_table_in, in, opcode,
+ MLX5_CMD_OP_CREATE_FLOW_TABLE);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ return err;
+
+ ft->id = MLX5_GET(create_flow_table_out, out, table_id);
+
+ return 0;
+}
+
+static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v)
+ MLX5_SET_DFTI(in, table_type, ft->type);
+ MLX5_SET_DFTI(in, table_id, ft->id);
+ MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+
+ mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable,
+ u32 *match_criteria, int *group_ix)
+{
+ void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers);
+ void *mc_misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ misc_parameters);
+ void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ inner_headers);
+ int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+ int mc_misc_sz = MLX5_ST_SZ_BYTES(fte_match_set_misc);
+ int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++) {
+ struct mlx5_flow_table_group *g = &ft->group[i].g;
+ void *gmc_outer = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ outer_headers);
+ void *gmc_misc = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ misc_parameters);
+ void *gmc_inner = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ inner_headers);
+
+ if (g->match_criteria_enable != match_criteria_enable)
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS)
+ if (memcmp(mc_outer, gmc_outer, mc_outer_sz))
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS)
+ if (memcmp(mc_misc, gmc_misc, mc_misc_sz))
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS)
+ if (memcmp(mc_inner, gmc_inner, mc_inner_sz))
+ continue;
+
+ *group_ix = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix)
+{
+ struct mlx5_ftg *g = &ft->group[group_ix];
+ int err = 0;
+
+ mutex_lock(&ft->mutex);
+
+ *ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix);
+ if (*ix >= (g->start_ix + (1 << g->g.log_sz)))
+ err = -ENOSPC;
+ else
+ __set_bit(*ix, ft->bitmap);
+
+ mutex_unlock(&ft->mutex);
+
+ return err;
+}
+
+static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix)
+{
+ __clear_bit(ix, ft->bitmap);
+}
+
+int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
+ void *match_criteria, void *flow_context,
+ u32 *flow_index)
+{
+ struct mlx5_flow_table *ft = flow_table;
+ int group_ix;
+ int err;
+
+ err = mlx5_find_group(ft, match_criteria_enable, match_criteria,
+ &group_ix);
+ if (err) {
+ mlx5_core_warn(ft->dev, "mlx5_find_group failed\n");
+ return err;
+ }
+
+ err = alloc_flow_index(ft, group_ix, flow_index);
+ if (err) {
+ mlx5_core_warn(ft->dev, "alloc_flow_index failed\n");
+ return err;
+ }
+
+ return mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context);
+}
+EXPORT_SYMBOL(mlx5_add_flow_table_entry);
+
+void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
+{
+ struct mlx5_flow_table *ft = flow_table;
+
+ mlx5_del_flow_entry_cmd(ft, flow_index);
+ mlx5_free_flow_index(ft, flow_index);
+}
+EXPORT_SYMBOL(mlx5_del_flow_table_entry);
+
+void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
+ u16 num_groups,
+ struct mlx5_flow_table_group *group)
+{
+ struct mlx5_flow_table *ft;
+ u32 start_ix = 0;
+ u32 ft_size = 0;
+ void *gr;
+ void *bm;
+ int err;
+ int i;
+
+ for (i = 0; i < num_groups; i++)
+ ft_size += (1 << group[i].log_sz);
+
+ ft = kzalloc(sizeof(*ft), GFP_KERNEL);
+ gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL);
+ bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL);
+ if (!ft || !gr || !bm)
+ goto err_free_ft;
+
+ ft->group = gr;
+ ft->bitmap = bm;
+ ft->num_groups = num_groups;
+ ft->level = level;
+ ft->type = table_type;
+ ft->size = ft_size;
+ ft->dev = dev;
+ mutex_init(&ft->mutex);
+
+ for (i = 0; i < ft->num_groups; i++) {
+ memcpy(&ft->group[i].g, &group[i], sizeof(*group));
+ ft->group[i].start_ix = start_ix;
+ start_ix += 1 << group[i].log_sz;
+ }
+
+ err = mlx5_create_flow_table_cmd(ft);
+ if (err)
+ goto err_free_ft;
+
+ err = mlx5_create_flow_table_groups(ft);
+ if (err)
+ goto err_destroy_flow_table_cmd;
+
+ return ft;
+
+err_destroy_flow_table_cmd:
+ mlx5_destroy_flow_table_cmd(ft);
+
+err_free_ft:
+ mlx5_core_warn(dev, "failed to alloc flow table\n");
+ kfree(bm);
+ kfree(gr);
+ kfree(ft);
+
+ return NULL;
+}
+EXPORT_SYMBOL(mlx5_create_flow_table);
+
+void mlx5_destroy_flow_table(void *flow_table)
+{
+ struct mlx5_flow_table *ft = flow_table;
+
+ mlx5_destroy_flow_table_groups(ft);
+ mlx5_destroy_flow_table_cmd(ft);
+ kfree(ft->bitmap);
+ kfree(ft->group);
+ kfree(ft);
+}
+EXPORT_SYMBOL(mlx5_destroy_flow_table);
+
+u32 mlx5_get_flow_table_id(void *flow_table)
+{
+ struct mlx5_flow_table *ft = flow_table;
+
+ return ft->id;
+}
+EXPORT_SYMBOL(mlx5_get_flow_table_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 4b4cda3bc..9335e5ae1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -35,79 +35,133 @@
#include <linux/module.h>
#include "mlx5_core.h"
-int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev)
+static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
+ int outlen)
{
- struct mlx5_cmd_query_adapter_mbox_out *out;
- struct mlx5_cmd_query_adapter_mbox_in in;
+ u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_query_board_id(struct mlx5_core_dev *dev)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
int err;
- out = kzalloc(sizeof(*out), GFP_KERNEL);
+ out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- memset(&in, 0, sizeof(in));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
+ err = mlx5_cmd_query_adapter(dev, out, outlen);
if (err)
- goto out_out;
-
- if (out->hdr.status) {
- err = mlx5_cmd_status_to_err(&out->hdr);
- goto out_out;
- }
+ goto out;
- memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid));
+ memcpy(dev->board_id,
+ MLX5_ADDR_OF(query_adapter_out, out,
+ query_adapter_struct.vsd_contd_psid),
+ MLX5_FLD_SZ_BYTES(query_adapter_out,
+ query_adapter_struct.vsd_contd_psid));
-out_out:
+out:
kfree(out);
-
return err;
}
-int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps)
+int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
{
- return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR);
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+ int err;
+
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_cmd_query_adapter(mdev, out, outlen);
+ if (err)
+ goto out;
+
+ *vendor_id = MLX5_GET(query_adapter_out, out,
+ query_adapter_struct.ieee_vendor_id);
+out:
+ kfree(out);
+ return err;
}
+EXPORT_SYMBOL(mlx5_core_query_vendor_id);
-int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps)
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
- u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
- int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- void *out;
int err;
- if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
- return -ENOTSUPP;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
- memset(in, 0, sizeof(in));
- out = kzalloc(out_sz, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
- MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
- MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
if (err)
- goto out;
+ return err;
- err = mlx5_cmd_status_to_err_v2(out);
- if (err) {
- mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err);
- goto out;
+ if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
}
- memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct),
- sizeof(*caps));
+ if (MLX5_CAP_GEN(dev, pg)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
- mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n",
- be32_to_cpu(caps->per_transport_caps.rc_odp_caps),
- be32_to_cpu(caps->per_transport_caps.uc_odp_caps),
- be32_to_cpu(caps->per_transport_caps.ud_odp_caps));
+ if (MLX5_CAP_GEN(dev, atomic)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
-out:
- kfree(out);
- return err;
+ if (MLX5_CAP_GEN(dev, roce)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+ return 0;
}
-EXPORT_SYMBOL(mlx5_query_odp_caps);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
index ee1b0b965..1368dac00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
@@ -36,7 +36,7 @@
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
-int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
+int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port)
{
struct mlx5_mad_ifc_mbox_in *in = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 28425e5ea..06e3e1e54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -38,6 +38,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
+#include <linux/interrupt.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/qp.h>
@@ -47,10 +48,6 @@
#include <linux/mlx5/mlx5_ifc.h>
#include "mlx5_core.h"
-#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "3.0"
-#define DRIVER_RELDATE "January 2015"
-
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
MODULE_LICENSE("Dual BSD/GPL");
@@ -208,24 +205,28 @@ static void release_bar(struct pci_dev *pdev)
static int mlx5_enable_msix(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
- int num_eqs = 1 << dev->caps.gen.log_max_eq;
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_eq_table *table = &priv->eq_table;
+ int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
int nvec;
int i;
- nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
+ nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
+ MLX5_EQ_VEC_COMP_BASE;
nvec = min_t(int, nvec, num_eqs);
if (nvec <= MLX5_EQ_VEC_COMP_BASE)
return -ENOMEM;
- table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL);
- if (!table->msix_arr)
- return -ENOMEM;
+ priv->msix_arr = kcalloc(nvec, sizeof(*priv->msix_arr), GFP_KERNEL);
+
+ priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
+ if (!priv->msix_arr || !priv->irq_info)
+ goto err_free_msix;
for (i = 0; i < nvec; i++)
- table->msix_arr[i].entry = i;
+ priv->msix_arr[i].entry = i;
- nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
+ nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
MLX5_EQ_VEC_COMP_BASE + 1, nvec);
if (nvec < 0)
return nvec;
@@ -233,14 +234,20 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
return 0;
+
+err_free_msix:
+ kfree(priv->irq_info);
+ kfree(priv->msix_arr);
+ return -ENOMEM;
}
static void mlx5_disable_msix(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_priv *priv = &dev->priv;
pci_disable_msix(dev->pdev);
- kfree(table->msix_arr);
+ kfree(priv->irq_info);
+ kfree(priv->msix_arr);
}
struct mlx5_reg_host_endianess {
@@ -277,98 +284,20 @@ static u16 to_fw_pkey_sz(u32 size)
}
}
-/* selectively copy writable fields clearing any reserved area
- */
-static void copy_rw_fields(void *to, struct mlx5_caps *from)
-{
- __be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22);
- u64 v64;
-
- MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp);
- MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
- MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
- MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
- MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
- MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12);
- v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
- *flags_off = cpu_to_be64(v64);
-}
-
-static u16 get_pkey_table_size(int pkey)
-{
- if (pkey > MLX5_MAX_LOG_PKEY_TABLE)
- return 0;
-
- return MLX5_MIN_PKEY_TABLE_SIZE << pkey;
-}
-
-static void fw2drv_caps(struct mlx5_caps *caps, void *out)
-{
- struct mlx5_general_caps *gen = &caps->gen;
-
- gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz);
- gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz);
- gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp);
- gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz);
- gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs);
- gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz);
- gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq);
- gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz);
- gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey);
- gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq);
- gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection);
- gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz);
- gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size);
- gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size);
- gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc);
- gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc);
- gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp);
- gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp);
- gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt);
- gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size));
- gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay);
- gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports);
- gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg);
- gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support);
- gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22));
- pr_debug("flags = 0x%llx\n", gen->flags);
- gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz);
- gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz);
- gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf);
- gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size);
- gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq);
- gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq);
- gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc);
- gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg);
- gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd);
- gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd);
- gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz);
-}
-
-static const char *caps_opmod_str(u16 opmod)
-{
- switch (opmod) {
- case HCA_CAP_OPMOD_GET_MAX:
- return "GET_MAX";
- case HCA_CAP_OPMOD_GET_CUR:
- return "GET_CUR";
- default:
- return "Invalid";
- }
-}
-
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
- u16 opmod)
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode)
{
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- void *out;
+ void *out, *hca_caps;
+ u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
int err;
memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
+
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
@@ -377,12 +306,30 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
err = mlx5_cmd_status_to_err_v2(out);
if (err) {
- mlx5_core_warn(dev, "query max hca cap failed, %d\n", err);
+ mlx5_core_warn(dev,
+ "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
+ cap_type, cap_mode, err);
goto query_ex;
}
- mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod));
- fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct));
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+
+ switch (cap_mode) {
+ case HCA_CAP_OPMOD_GET_MAX:
+ memcpy(dev->hca_caps_max[cap_type], hca_caps,
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ break;
+ case HCA_CAP_OPMOD_GET_CUR:
+ memcpy(dev->hca_caps_cur[cap_type], hca_caps,
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ break;
+ default:
+ mlx5_core_warn(dev,
+ "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
+ cap_type, cap_mode);
+ err = -EINVAL;
+ break;
+ }
query_ex:
kfree(out);
return err;
@@ -409,49 +356,47 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
{
void *set_ctx = NULL;
struct mlx5_profile *prof = dev->profile;
- struct mlx5_caps *cur_caps = NULL;
- struct mlx5_caps *max_caps = NULL;
int err = -ENOMEM;
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *set_hca_cap;
set_ctx = kzalloc(set_sz, GFP_KERNEL);
if (!set_ctx)
goto query_ex;
- max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL);
- if (!max_caps)
- goto query_ex;
-
- cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL);
- if (!cur_caps)
- goto query_ex;
-
- err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
if (err)
goto query_ex;
- err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
if (err)
goto query_ex;
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
+ capability);
+ memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
+ MLX5_ST_SZ_BYTES(cmd_hca_cap));
+
+ mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
+ mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
+ 128);
/* we limit the size of the pkey table to 128 entries for now */
- cur_caps->gen.pkey_table_size = 128;
+ MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
+ to_fw_pkey_sz(128));
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
- cur_caps->gen.log_max_qp = prof->log_max_qp;
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
+ prof->log_max_qp);
+
+ /* disable cmdif checksum */
+ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
- /* disable checksum */
- cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
- copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct),
- cur_caps);
err = set_caps(dev, set_ctx, set_sz);
query_ex:
- kfree(cur_caps);
- kfree(max_caps);
kfree(set_ctx);
-
return err;
}
@@ -507,6 +452,74 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
return 0;
}
+static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct msix_entry *msix = priv->msix_arr;
+ int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+ int numa_node = dev_to_node(&mdev->pdev->dev);
+ int err;
+
+ if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+ mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+ return -ENOMEM;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ priv->irq_info[i].mask);
+
+ err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
+ if (err) {
+ mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
+ irq);
+ goto err_clear_mask;
+ }
+
+ return 0;
+
+err_clear_mask:
+ free_cpumask_var(priv->irq_info[i].mask);
+ return err;
+}
+
+static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct msix_entry *msix = priv->msix_arr;
+ int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+
+ irq_set_affinity_hint(irq, NULL);
+ free_cpumask_var(priv->irq_info[i].mask);
+}
+
+static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
+ err = mlx5_irq_set_affinity_hint(mdev, i);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ for (i--; i >= 0; i--)
+ mlx5_irq_clear_affinity_hint(mdev, i);
+
+ return err;
+}
+
+static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
+ mlx5_irq_clear_affinity_hint(mdev, i);
+}
+
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
@@ -549,7 +562,7 @@ static void free_comp_eqs(struct mlx5_core_dev *dev)
static int alloc_comp_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
- char name[MLX5_MAX_EQ_NAME];
+ char name[MLX5_MAX_IRQ_NAME];
struct mlx5_eq *eq;
int ncomp_vec;
int nent;
@@ -566,7 +579,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
goto clean;
}
- snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
err = mlx5_create_map_eq(dev, eq,
i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
name, &dev->priv.uuari.uars[0]);
@@ -588,6 +601,61 @@ clean:
return err;
}
+#ifdef CONFIG_MLX5_CORE_EN
+static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
+{
+ u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
+ u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
+ u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
+ u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
+ int err;
+ u32 sup_issi;
+
+ memset(query_in, 0, sizeof(query_in));
+ memset(query_out, 0, sizeof(query_out));
+
+ MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
+
+ err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
+ query_out, sizeof(query_out));
+ if (err) {
+ if (((struct mlx5_outbox_hdr *)query_out)->status ==
+ MLX5_CMD_STAT_BAD_OP_ERR) {
+ pr_debug("Only ISSI 0 is supported\n");
+ return 0;
+ }
+
+ pr_err("failed to query ISSI\n");
+ return err;
+ }
+
+ sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
+
+ if (sup_issi & (1 << 1)) {
+ memset(set_in, 0, sizeof(set_in));
+ memset(set_out, 0, sizeof(set_out));
+
+ MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
+ MLX5_SET(set_issi_in, set_in, current_issi, 1);
+
+ err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
+ set_out, sizeof(set_out));
+ if (err) {
+ pr_err("failed to set ISSI=1\n");
+ return err;
+ }
+
+ dev->issi = 1;
+
+ return 0;
+ } else if (sup_issi & (1 << 0) || !sup_issi) {
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+#endif
+
static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
{
struct mlx5_priv *priv = &dev->priv;
@@ -650,6 +718,14 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_pagealloc_cleanup;
}
+#ifdef CONFIG_MLX5_CORE_EN
+ err = mlx5_core_set_issi(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to set issi\n");
+ goto err_disable_hca;
+ }
+#endif
+
err = mlx5_satisfy_startup_pages(dev, 1);
if (err) {
dev_err(&pdev->dev, "failed to allocate boot pages\n");
@@ -688,15 +764,15 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
mlx5_start_health_poll(dev);
- err = mlx5_cmd_query_hca_cap(dev, &dev->caps);
+ err = mlx5_query_hca_caps(dev);
if (err) {
dev_err(&pdev->dev, "query hca failed\n");
goto err_stop_poll;
}
- err = mlx5_cmd_query_adapter(dev);
+ err = mlx5_query_board_id(dev);
if (err) {
- dev_err(&pdev->dev, "query adapter failed\n");
+ dev_err(&pdev->dev, "query board id failed\n");
goto err_stop_poll;
}
@@ -730,6 +806,12 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_stop_eqs;
}
+ err = mlx5_irq_set_affinity_hints(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
+ goto err_free_comp_eqs;
+ }
+
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
mlx5_init_cq_table(dev);
@@ -739,6 +821,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
return 0;
+err_free_comp_eqs:
+ free_comp_eqs(dev);
+
err_stop_eqs:
mlx5_stop_eqs(dev);
@@ -793,6 +878,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
+ mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
@@ -1048,6 +1134,10 @@ static int __init init(void)
if (err)
goto err_health;
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5e_init();
+#endif
+
return 0;
err_health:
@@ -1060,6 +1150,9 @@ err_debug:
static void __exit cleanup(void)
{
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5e_cleanup();
+#endif
pci_unregister_driver(&mlx5_core_driver);
mlx5_health_cleanup();
destroy_workqueue(mlx5_core_wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
index d79fd85d1..d5a0c2d61 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -91,7 +91,7 @@ int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG);
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG);
memcpy(in.gid, mgid, sizeof(*mgid));
in.qpn = cpu_to_be32(qpn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a051b906a..fc88ecaec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -37,6 +37,10 @@
#include <linux/kernel.h>
#include <linux/sched.h>
+#define DRIVER_NAME "mlx5_core"
+#define DRIVER_VERSION "3.0-1"
+#define DRIVER_RELDATE "January 2015"
+
extern int mlx5_core_debug_mask;
#define mlx5_core_dbg(dev, format, ...) \
@@ -65,11 +69,20 @@ enum {
MLX5_CMD_TIME, /* print command execution time */
};
+static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
+ int in_size, u32 *out,
+ int out_size)
+{
+ mlx5_cmd_exec(dev, in, in_size, out, out_size);
+ return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
+}
-int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
- struct mlx5_caps *caps);
-int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev);
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
+int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
+void mlx5e_init(void);
+void mlx5e_cleanup(void);
+
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 49e90f261..70147999f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -102,3 +102,229 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
return err;
}
EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
+
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+ int ptys_size, int proto_mask, u8 local_port)
+{
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(ptys_reg, in, local_port, local_port);
+ MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), ptys,
+ ptys_size, MLX5_REG_PTYS, 0, 0);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
+
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+ u32 *proto_cap, int proto_mask)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
+ if (err)
+ return err;
+
+ if (proto_mask == MLX5_PTYS_EN)
+ *proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+ else
+ *proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap);
+
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+ u32 *proto_admin, int proto_mask)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
+ if (err)
+ return err;
+
+ if (proto_mask == MLX5_PTYS_EN)
+ *proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+ else
+ *proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin);
+
+int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+ u8 *link_width_oper, u8 local_port)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, local_port);
+ if (err)
+ return err;
+
+ *link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
+
+int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
+ u8 *proto_oper, int proto_mask,
+ u8 local_port)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, local_port);
+ if (err)
+ return err;
+
+ if (proto_mask == MLX5_PTYS_EN)
+ *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+ else
+ *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper);
+
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+ int proto_mask)
+{
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(ptys_reg, in, local_port, 1);
+ MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+ if (proto_mask == MLX5_PTYS_EN)
+ MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
+ else
+ MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PTYS, 0, 1);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
+
+int mlx5_set_port_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status)
+{
+ u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 out[MLX5_ST_SZ_DW(paos_reg)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(paos_reg, in, admin_status, status);
+ MLX5_SET(paos_reg, in, ase, 1);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PAOS, 0, 1);
+}
+
+int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
+{
+ u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 out[MLX5_ST_SZ_DW(paos_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PAOS, 0, 0);
+ if (err)
+ return err;
+
+ *status = MLX5_GET(paos_reg, out, oper_status);
+ return err;
+}
+
+static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
+ int *max_mtu, int *oper_mtu, u8 port)
+{
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(pmtu_reg, in, local_port, port);
+
+ mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PMTU, 0, 0);
+
+ if (max_mtu)
+ *max_mtu = MLX5_GET(pmtu_reg, out, max_mtu);
+ if (oper_mtu)
+ *oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu);
+ if (admin_mtu)
+ *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
+}
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
+{
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
+ MLX5_SET(pmtu_reg, in, local_port, port);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PMTU, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
+
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
+ u8 port)
+{
+ mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
+
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+ u8 port)
+{
+ mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
+
+static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
+ int pvlc_size, u8 local_port)
+{
+ u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(ptys_reg, in, local_port, local_port);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
+ pvlc_size, MLX5_REG_PVLC, 0, 0);
+
+ return err;
+}
+
+int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+ u8 *vl_hw_cap, u8 local_port)
+{
+ u32 out[MLX5_ST_SZ_DW(pvlc_reg)];
+ int err;
+
+ err = mlx5_query_port_pvlc(dev, out, sizeof(out), local_port);
+ if (err)
+ return err;
+
+ *vl_hw_cap = MLX5_GET(pvlc_reg, out, vl_hw_cap);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index dc7dbf7e9..8b494b562 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -187,10 +187,17 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_destroy_qp_mbox_in din;
struct mlx5_destroy_qp_mbox_out dout;
int err;
+ void *qpc;
memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
+ if (dev->issi) {
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ /* 0xffffff means we ask to work with cqe version 0 */
+ MLX5_SET(qpc, qpc, user_index, 0xffffff);
+ }
+
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
mlx5_core_warn(dev, "ret %d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index f9d25dcd0..c48f504cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/srq.h>
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
+#include "transobj.h"
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
{
@@ -62,6 +63,74 @@ void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
complete(&srq->free);
}
+static int get_pas_size(void *srqc)
+{
+ u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
+ u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size);
+ u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
+ u32 page_offset = MLX5_GET(srqc, srqc, page_offset);
+ u32 po_quanta = 1 << (log_page_size - 6);
+ u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
+ u32 page_size = 1 << log_page_size;
+ u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
+ u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size;
+
+ return rq_num_pas * sizeof(u64);
+}
+
+static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
+{
+ void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ if (srqc_to_rmpc) {
+ switch (MLX5_GET(srqc, srqc, state)) {
+ case MLX5_SRQC_STATE_GOOD:
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+ break;
+ case MLX5_SRQC_STATE_ERROR:
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
+ break;
+ default:
+ pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__,
+ __LINE__, MLX5_GET(srqc, srqc, state));
+ MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state));
+ }
+
+ MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature));
+ MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size));
+ MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4);
+ MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size));
+ MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset));
+ MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm));
+ MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd));
+ MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr));
+ } else {
+ switch (MLX5_GET(rmpc, rmpc, state)) {
+ case MLX5_RMPC_STATE_RDY:
+ MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
+ break;
+ case MLX5_RMPC_STATE_ERR:
+ MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
+ break;
+ default:
+ pr_warn("%s: %d: Unknown rmp state = 0x%x\n",
+ __func__, __LINE__,
+ MLX5_GET(rmpc, rmpc, state));
+ MLX5_SET(srqc, srqc, state,
+ MLX5_GET(rmpc, rmpc, state));
+ }
+
+ MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature));
+ MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz));
+ MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4);
+ MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz));
+ MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset));
+ MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm));
+ MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd));
+ MLX5_SET64(srqc, srqc, dbr_addr, MLX5_GET64(wq, wq, dbr_addr));
+ }
+}
+
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
{
struct mlx5_srq_table *table = &dev->priv.srq_table;
@@ -79,26 +148,311 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
}
EXPORT_SYMBOL(mlx5_core_get_srq);
-int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int inlen)
+static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int inlen)
{
struct mlx5_create_srq_mbox_out out;
- struct mlx5_srq_table *table = &dev->priv.srq_table;
- struct mlx5_destroy_srq_mbox_in din;
- struct mlx5_destroy_srq_mbox_out dout;
int err;
memset(&out, 0, sizeof(out));
+
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
- if (err)
- return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
+ err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out),
+ sizeof(out));
srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
+ return err;
+}
+
+static int destroy_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ struct mlx5_destroy_srq_mbox_in in;
+ struct mlx5_destroy_srq_mbox_out out;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
+ in.srqn = cpu_to_be32(srq->srqn);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
+ (u32 *)(&out), sizeof(out));
+}
+
+static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq)
+{
+ struct mlx5_arm_srq_mbox_in in;
+ struct mlx5_arm_srq_mbox_out out;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
+ in.hdr.opmod = cpu_to_be16(!!is_srq);
+ in.srqn = cpu_to_be32(srq->srqn);
+ in.lwm = cpu_to_be16(lwm);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in),
+ sizeof(in), (u32 *)(&out),
+ sizeof(out));
+}
+
+static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ struct mlx5_query_srq_mbox_in in;
+
+ memset(&in, 0, sizeof(in));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
+ in.srqn = cpu_to_be32(srq->srqn);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
+ (u32 *)out, sizeof(*out));
+}
+
+static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in,
+ int srq_inlen)
+{
+ u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+ void *create_in;
+ void *srqc;
+ void *xrc_srqc;
+ void *pas;
+ int pas_size;
+ int inlen;
+ int err;
+
+ srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
+ pas_size = get_pas_size(srqc);
+ inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
+ create_in = mlx5_vzalloc(inlen);
+ if (!create_in)
+ return -ENOMEM;
+
+ xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
+ xrc_srq_context_entry);
+ pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
+
+ memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
+ memcpy(pas, in->pas, pas_size);
+ /* 0xffffff means we ask to work with cqe version 0 */
+ MLX5_SET(xrc_srqc, xrc_srqc, user_index, 0xffffff);
+ MLX5_SET(create_xrc_srq_in, create_in, opcode,
+ MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+ memset(create_out, 0, sizeof(create_out));
+ err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
+ sizeof(create_out));
+ if (err)
+ goto out;
+
+ srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
+out:
+ kvfree(create_in);
+ return err;
+}
+
+static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
+ u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
+
+ memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+ memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+
+ MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
+ MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+
+ return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out, sizeof(xrcsrq_out));
+}
+
+static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq, u16 lwm)
+{
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
+ u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
+
+ memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+ memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
+
+ return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out, sizeof(xrcsrq_out));
+}
+
+static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+ u32 *xrcsrq_out;
+ void *srqc;
+ void *xrc_srqc;
+ int err;
+
+ xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ if (!xrcsrq_out)
+ return -ENOMEM;
+ memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+
+ MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
+ MLX5_CMD_OP_QUERY_XRC_SRQ);
+ MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+ err = mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out,
+ MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ if (err)
+ goto out;
+
+ xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
+ xrc_srq_context_entry);
+ srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+ memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+
+out:
+ kvfree(xrcsrq_out);
+ return err;
+}
+
+static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int srq_inlen)
+{
+ void *create_in;
+ void *rmpc;
+ void *srqc;
+ int pas_size;
+ int inlen;
+ int err;
+
+ srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
+ pas_size = get_pas_size(srqc);
+ inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
+ create_in = mlx5_vzalloc(inlen);
+ if (!create_in)
+ return -ENOMEM;
+
+ rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
+
+ memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
+ rmpc_srqc_reformat(srqc, rmpc, true);
+
+ err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
+
+ kvfree(create_in);
+ return err;
+}
+
+static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ return mlx5_core_destroy_rmp(dev, srq->srqn);
+}
+
+static int arm_rmp_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ u16 lwm)
+{
+ void *in;
+ void *rmpc;
+ void *wq;
+ void *bitmask;
+ int err;
+
+ in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+ if (!in)
+ return -ENOMEM;
+
+ rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
+ bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn);
+ MLX5_SET(wq, wq, lwm, lwm);
+ MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+
+ err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+
+ kvfree(in);
+ return err;
+}
+
+static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ u32 *rmp_out;
+ void *rmpc;
+ void *srqc;
+ int err;
+
+ rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
+ if (!rmp_out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
+ if (err)
+ goto out;
+
+ srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+ rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
+ rmpc_srqc_reformat(srqc, rmpc, false);
+
+out:
+ kvfree(rmp_out);
+ return err;
+}
+
+static int create_srq_split(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in,
+ int inlen, int is_xrc)
+{
+ if (!dev->issi)
+ return create_srq_cmd(dev, srq, in, inlen);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return create_xrc_srq_cmd(dev, srq, in, inlen);
+ else
+ return create_rmp_cmd(dev, srq, in, inlen);
+}
+
+static int destroy_srq_split(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ if (!dev->issi)
+ return destroy_srq_cmd(dev, srq);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return destroy_xrc_srq_cmd(dev, srq);
+ else
+ return destroy_rmp_cmd(dev, srq);
+}
+
+int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int inlen,
+ int is_xrc)
+{
+ int err;
+ struct mlx5_srq_table *table = &dev->priv.srq_table;
+
+ srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
+
+ err = create_srq_split(dev, srq, in, inlen, is_xrc);
+ if (err)
+ return err;
+
atomic_set(&srq->refcount, 1);
init_completion(&srq->free);
@@ -107,25 +461,20 @@ int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
spin_unlock_irq(&table->lock);
if (err) {
mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
- goto err_cmd;
+ goto err_destroy_srq_split;
}
return 0;
-err_cmd:
- memset(&din, 0, sizeof(din));
- memset(&dout, 0, sizeof(dout));
- din.srqn = cpu_to_be32(srq->srqn);
- din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
- mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
+err_destroy_srq_split:
+ destroy_srq_split(dev, srq);
+
return err;
}
EXPORT_SYMBOL(mlx5_core_create_srq);
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
{
- struct mlx5_destroy_srq_mbox_in in;
- struct mlx5_destroy_srq_mbox_out out;
struct mlx5_srq_table *table = &dev->priv.srq_table;
struct mlx5_core_srq *tmp;
int err;
@@ -142,17 +491,10 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
return -EINVAL;
}
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
- in.srqn = cpu_to_be32(srq->srqn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ err = destroy_srq_split(dev, srq);
if (err)
return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
if (atomic_dec_and_test(&srq->refcount))
complete(&srq->free);
wait_for_completion(&srq->free);
@@ -164,48 +506,24 @@ EXPORT_SYMBOL(mlx5_core_destroy_srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
{
- struct mlx5_query_srq_mbox_in in;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(out, 0, sizeof(*out));
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
- in.srqn = cpu_to_be32(srq->srqn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
- if (err)
- return err;
-
- if (out->hdr.status)
- return mlx5_cmd_status_to_err(&out->hdr);
-
- return err;
+ if (!dev->issi)
+ return query_srq_cmd(dev, srq, out);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return query_xrc_srq_cmd(dev, srq, out);
+ else
+ return query_rmp_cmd(dev, srq, out);
}
EXPORT_SYMBOL(mlx5_core_query_srq);
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
- struct mlx5_arm_srq_mbox_in in;
- struct mlx5_arm_srq_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
- in.hdr.opmod = cpu_to_be16(!!is_srq);
- in.srqn = cpu_to_be32(srq->srqn);
- in.lwm = cpu_to_be16(lwm);
-
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ if (!dev->issi)
+ return arm_srq_cmd(dev, srq, lwm, is_srq);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return arm_xrc_srq_cmd(dev, srq, lwm);
+ else
+ return arm_rmp_cmd(dev, srq, lwm);
}
EXPORT_SYMBOL(mlx5_core_arm_srq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
new file mode 100644
index 000000000..8d98b0302
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+#include "transobj.h"
+
+int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)];
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(alloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *tdn = MLX5_GET(alloc_transport_domain_out, out,
+ transport_domain);
+
+ return err;
+}
+
+void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(dealloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rq_out)];
+ int err;
+
+ MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rqn = MLX5_GET(create_rq_out, out, rqn);
+
+ return err;
+}
+
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
+
+ MLX5_SET(modify_rq_in, in, rqn, rqn);
+ MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rq_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, in, rqn, rqn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_sq_out)];
+ int err;
+
+ MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *sqn = MLX5_GET(create_sq_out, out, sqn);
+
+ return err;
+}
+
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_sq_out)];
+
+ MLX5_SET(modify_sq_in, in, sqn, sqn);
+ MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_sq_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ MLX5_SET(destroy_sq_in, in, sqn, sqn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tirn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_tir_out)];
+ int err;
+
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *tirn = MLX5_GET(create_tir_out, out, tirn);
+
+ return err;
+}
+
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
+ u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, in, tirn, tirn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tisn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_tis_out)];
+ int err;
+
+ MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *tisn = MLX5_GET(create_tis_out, out, tisn);
+
+ return err;
+}
+
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_out)];
+ u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rmpn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rmp_out)];
+ int err;
+
+ MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rmpn = MLX5_GET(create_rmp_out, out, rmpn);
+
+ return err;
+}
+
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_rmp_out)];
+
+ MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_rmp_in)];
+ int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
+ MLX5_SET(query_rmp_in, in, rmpn, rmpn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
+{
+ void *in;
+ void *rmpc;
+ void *wq;
+ void *bitmask;
+ int err;
+
+ in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+ if (!in)
+ return -ENOMEM;
+
+ rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
+ bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(modify_rmp_in, in, rmpn, rmpn);
+ MLX5_SET(wq, wq, lwm, lwm);
+ MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+
+ err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+
+ kvfree(in);
+
+ return err;
+}
+
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *xsrqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+ int err;
+
+ MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
+
+ return err;
+}
+
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+ void *srqc;
+ void *xrc_srqc;
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
+ MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out,
+ MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ if (!err) {
+ xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out,
+ xrc_srq_context_entry);
+ srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+ memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+ }
+
+ return err;
+}
+
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
+{
+ u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
+ u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
+ MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
+ MLX5_SET(arm_xrc_srq_in, in, op_mod,
+ MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
new file mode 100644
index 000000000..f9ef24471
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __TRANSOBJ_H__
+#define __TRANSOBJ_H__
+
+int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn);
+void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn);
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqn);
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *sqn);
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tirn);
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tisn);
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rmpn);
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rmpn);
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+
+#endif /* __TRANSOBJ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 5a89bb1d6..9ef85873c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -175,12 +175,13 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
for (i = 0; i < tot_uuars; i++) {
bf = &uuari->bfs[i];
- bf->buf_size = dev->caps.gen.bf_reg_size / 2;
+ bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2;
bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
bf->reg = NULL; /* Add WC support */
- bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.gen.bf_reg_size +
- MLX5_BF_OFFSET;
+ bf->offset = (i % MLX5_BF_REGS_PER_PAGE) *
+ (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) +
+ MLX5_BF_OFFSET;
bf->need_lock = need_uuar_lock(i);
spin_lock_init(&bf->lock);
spin_lock_init(&bf->lock32);
@@ -223,3 +224,40 @@ int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
return 0;
}
+
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+ phys_addr_t pfn;
+ phys_addr_t uar_bar_start;
+ int err;
+
+ err = mlx5_cmd_alloc_uar(mdev, &uar->index);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
+ return err;
+ }
+
+ uar_bar_start = pci_resource_start(mdev->pdev, 0);
+ pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index;
+ uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!uar->map) {
+ mlx5_core_warn(mdev, "ioremap() failed, %d\n", err);
+ err = -ENOMEM;
+ goto err_free_uar;
+ }
+
+ return 0;
+
+err_free_uar:
+ mlx5_cmd_free_uar(mdev, uar->index);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_alloc_map_uar);
+
+void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+ iounmap(uar->map);
+ mlx5_cmd_free_uar(mdev, uar->index);
+}
+EXPORT_SYMBOL(mlx5_unmap_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
new file mode 100644
index 000000000..b94177ebc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_core.h"
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
+{
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
+ u32 out[MLX5_ST_SZ_DW(query_vport_state_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_vport_state_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_STATE);
+ MLX5_SET(query_vport_state_in, in, op_mod, opmod);
+
+ err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
+
+ return MLX5_GET(query_vport_state_out, out, state);
+}
+EXPORT_SYMBOL(mlx5_query_vport_state);
+
+void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
+{
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ u8 *out_addr;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return;
+
+ out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
+ nic_vport_context.permanent_address);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+
+ memset(out, 0, outlen);
+ mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
+
+ ether_addr_copy(addr, &out_addr[2]);
+
+ kvfree(out);
+}
+EXPORT_SYMBOL(mlx5_query_nic_vport_mac_address);
+
+int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 gid_index,
+ union ib_gid *gid)
+{
+ int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
+ int is_group_manager;
+ void *out = NULL;
+ void *in = NULL;
+ union ib_gid *tmp;
+ int tbsz;
+ int nout;
+ int err;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+ tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
+ mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
+ vf_num, gid_index, tbsz);
+
+ if (gid_index > tbsz && gid_index != 0xffff)
+ return -EINVAL;
+
+ if (gid_index == 0xffff)
+ nout = tbsz;
+ else
+ nout = 1;
+
+ out_sz += nout * sizeof(*gid);
+
+ in = kzalloc(in_sz, GFP_KERNEL);
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
+ MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
+ } else {
+ err = -EPERM;
+ goto out;
+ }
+ }
+ MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
+
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+ if (err)
+ goto out;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto out;
+
+ tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
+ gid->global.subnet_prefix = tmp->global.subnet_prefix;
+ gid->global.interface_id = tmp->global.interface_id;
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
+
+int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 pkey_index,
+ u16 *pkey)
+{
+ int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
+ int is_group_manager;
+ void *out = NULL;
+ void *in = NULL;
+ void *pkarr;
+ int nout;
+ int tbsz;
+ int err;
+ int i;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+ tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
+ if (pkey_index > tbsz && pkey_index != 0xffff)
+ return -EINVAL;
+
+ if (pkey_index == 0xffff)
+ nout = tbsz;
+ else
+ nout = 1;
+
+ out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
+
+ in = kzalloc(in_sz, GFP_KERNEL);
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
+ MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
+ } else {
+ err = -EPERM;
+ goto out;
+ }
+ }
+ MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
+
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+ if (err)
+ goto out;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto out;
+
+ pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
+ for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
+ *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
+
+int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
+ u8 other_vport, u8 port_num,
+ u16 vf_num,
+ struct mlx5_hca_vport_context *rep)
+{
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
+ int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
+ int is_group_manager;
+ void *out;
+ void *ctx;
+ int err;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+ memset(in, 0, sizeof(in));
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
+
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
+ MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
+ } else {
+ err = -EPERM;
+ goto ex;
+ }
+ }
+
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ if (err)
+ goto ex;
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto ex;
+
+ ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
+ rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
+ rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
+ rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
+ rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
+ rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
+ rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
+ port_physical_state);
+ rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
+ rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
+ port_physical_state);
+ rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
+ rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
+ rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
+ rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
+ cap_mask1_field_select);
+ rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
+ rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
+ cap_mask2_field_select);
+ rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
+ rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
+ init_type_reply);
+ rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
+ rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
+ subnet_timeout);
+ rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
+ rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
+ rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
+ qkey_violation_counter);
+ rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
+ pkey_violation_counter);
+ rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
+ rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
+ system_image_guid);
+
+ex:
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
+
+int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
+ u64 *sys_image_guid)
+{
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
+ if (!err)
+ *sys_image_guid = rep->sys_image_guid;
+
+ kfree(rep);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
+
+int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
+ u64 *node_guid)
+{
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
+ if (!err)
+ *node_guid = rep->node_guid;
+
+ kfree(rep);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
new file mode 100644
index 000000000..838841158
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "wq.h"
+#include "mlx5_core.h"
+
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
+{
+ return (u32)wq->sz_m1 + 1;
+}
+
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
+{
+ return wq->sz_m1 + 1;
+}
+
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
+{
+ return (u32)wq->sz_m1 + 1;
+}
+
+static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
+{
+ return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
+{
+ return mlx5_cqwq_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
+{
+ return mlx5_wq_ll_get_size(wq) << wq->log_stride;
+}
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_cyc *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ int err;
+
+ wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+ err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *cqc, struct mlx5_cqwq *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ int err;
+
+ wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
+ wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
+ wq->sz_m1 = (1 << wq->log_sz) - 1;
+
+ err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_ll *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ struct mlx5_wqe_srq_next_seg *next_seg;
+ int err;
+ int i;
+
+ wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+ err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ for (i = 0; i < wq->sz_m1; i++) {
+ next_seg = mlx5_wq_ll_get_wqe(wq, i);
+ next_seg->next_wqe_index = cpu_to_be16(i + 1);
+ }
+ next_seg = mlx5_wq_ll_get_wqe(wq, i);
+ wq->tail_next = &next_seg->next_wqe_index;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
+{
+ mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
+ mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
new file mode 100644
index 000000000..e0ddd69fb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_WQ_H__
+#define __MLX5_WQ_H__
+
+#include <linux/mlx5/mlx5_ifc.h>
+
+struct mlx5_wq_param {
+ int linear;
+ int numa;
+};
+
+struct mlx5_wq_ctrl {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_buf buf;
+ struct mlx5_db db;
+};
+
+struct mlx5_wq_cyc {
+ void *buf;
+ __be32 *db;
+ u16 sz_m1;
+ u8 log_stride;
+};
+
+struct mlx5_cqwq {
+ void *buf;
+ __be32 *db;
+ u32 sz_m1;
+ u32 cc; /* consumer counter */
+ u8 log_sz;
+ u8 log_stride;
+};
+
+struct mlx5_wq_ll {
+ void *buf;
+ __be32 *db;
+ __be16 *tail_next;
+ u16 sz_m1;
+ u16 head;
+ u16 wqe_ctr;
+ u16 cur_sz;
+ u8 log_stride;
+};
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_cyc *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *cqc, struct mlx5_cqwq *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_ll *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
+
+static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
+{
+ return ctr & wq->sz_m1;
+}
+
+static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
+{
+ int equal = (cc1 == cc2);
+ int smaller = 0x8000 & (cc1 - cc2);
+
+ return !equal && !smaller;
+}
+
+static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
+{
+ return wq->cc & wq->sz_m1;
+}
+
+static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
+{
+ return wq->cc >> wq->log_sz;
+}
+
+static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
+{
+ wq->cc++;
+}
+
+static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
+{
+ *wq->db = cpu_to_be32(wq->cc & 0xffffff);
+}
+
+static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
+{
+ return wq->cur_sz == wq->sz_m1;
+}
+
+static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
+{
+ return !wq->cur_sz;
+}
+
+static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
+{
+ wq->head = head_next;
+ wq->wqe_ctr++;
+ wq->cur_sz++;
+}
+
+static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix,
+ __be16 *next_tail_next)
+{
+ *wq->tail_next = ix;
+ wq->tail_next = next_tail_next;
+ wq->cur_sz--;
+}
+
+static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq)
+{
+ *wq->db = cpu_to_be32(wq->wqe_ctr);
+}
+
+#endif /* __MLX5_WQ_H__ */
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index d16b11ed2..b7e2f4969 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -8,9 +8,7 @@ config NET_VENDOR_MICREL
depends on (HAS_IOMEM && DMA_ENGINE) || SPI || PCI || HAS_IOMEM || \
(ARM && ARCH_KS8695)
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f78909a00..09d2e16fd 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -952,9 +952,8 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
- err = dma_mapping_error(adapter->dev,
- sg_dma_address(&tx_ctl->sg));
- if (err) {
+ if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
+ err = -ENOMEM;
sg_dma_address(&tx_ctl->sg) = 0;
goto err;
}
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 6f332ebdf..75dc46c5f 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -6664,7 +6664,7 @@ static void mib_read_work(struct work_struct *work)
wake_up_interruptible(
&hw_priv->counter[i].counter);
}
- } else if (jiffies >= hw_priv->counter[i].time) {
+ } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) {
/* Only read MIB counters when the port is connected. */
if (media_connected == mib->state)
hw_priv->counter[i].read = 1;
@@ -6689,7 +6689,7 @@ static void mib_monitor(unsigned long ptr)
/* This is used to verify Wake-on-LAN is working. */
if (hw_priv->pme_wait) {
- if (hw_priv->pme_wait <= jiffies) {
+ if (time_is_before_eq_jiffies(hw_priv->pme_wait)) {
hw_clr_wol_pme_status(&hw_priv->hw);
hw_priv->pme_wait = 0;
}
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index afaf0c07f..3fd8ca6d4 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_MICROCHIP
default y
depends on SPI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/moxa/Kconfig b/drivers/net/ethernet/moxa/Kconfig
index 1731e050f..5b531da36 100644
--- a/drivers/net/ethernet/moxa/Kconfig
+++ b/drivers/net/ethernet/moxa/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_MOXART
default y
depends on (ARM && ARCH_MOXART)
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 81d0f1c86..becbb5f1f 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -244,7 +244,6 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
napi_gro_receive(&priv->napi, skb);
rx++;
- ndev->last_rx = jiffies;
priv->stats.rx_packets++;
priv->stats.rx_bytes += len;
if (desc0 & RX_DESC0_MULTICAST)
diff --git a/drivers/net/ethernet/myricom/Kconfig b/drivers/net/ethernet/myricom/Kconfig
index 3932d081f..9645c7245 100644
--- a/drivers/net/ethernet/myricom/Kconfig
+++ b/drivers/net/ethernet/myricom/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_MYRI
default y
depends on PCI && INET
---help---
- If you have a network (Ethernet) card belonging to this class, say
- Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 6478fcc4a..091db0d18 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -276,7 +276,7 @@ static char *myri10ge_fw_rss_unaligned = "/*(DEBLOBBED)*/";
static char *myri10ge_fw_rss_aligned = "/*(DEBLOBBED)*/";
/*(DEBLOBBED)*/
-/* Careful: must be accessed under kparam_block_sysfs_write */
+/* Careful: must be accessed under kernel_param_lock() */
static char *myri10ge_fw_name = NULL;
module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
@@ -3417,7 +3417,7 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
}
}
- kparam_block_sysfs_write(myri10ge_fw_name);
+ kernel_param_lock(THIS_MODULE);
if (myri10ge_fw_name != NULL) {
char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL);
if (fw_name) {
@@ -3425,7 +3425,7 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
set_fw_name(mgp, fw_name, true);
}
}
- kparam_unblock_sysfs_write(myri10ge_fw_name);
+ kernel_param_unlock(THIS_MODULE);
if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
myri10ge_fw_names[mgp->board_number] != NULL &&
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig
index a100860d4..a10ef50e4 100644
--- a/drivers/net/ethernet/natsemi/Kconfig
+++ b/drivers/net/ethernet/natsemi/Kconfig
@@ -6,9 +6,7 @@ config NET_VENDOR_NATSEMI
bool "National Semi-conductor devices"
default y
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
@@ -24,8 +22,7 @@ config MACSONIC
Support for NatSemi SONIC based Ethernet devices. This includes
the onboard Ethernet in many Quadras as well as some LC-PDS,
a few Nubus and all known Comm Slot Ethernet cards. If you have
- one of these say Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ one of these say Y here.
To compile this driver as a module, choose M here. This module will
be called macsonic.
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index 87abb4f10..71899009c 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_EXAR
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say
- Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 1e0f72b65..c28111749 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5308,7 +5308,8 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
/**
* s2io_ethtool_sset - Sets different link parameters.
- * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
* @info: pointer to the structure with parameters given by ethtool to set
* link information.
* Description:
@@ -5793,7 +5794,8 @@ static void s2io_vpd_read(struct s2io_nic *nic)
/**
* s2io_ethtool_geeprom - reads the value stored in the Eeprom.
- * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
* @eeprom : pointer to the user level structure provided by ethtool,
* containing all relevant information.
* @data_buf : user defined value to be written into Eeprom.
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 9e1aaa7f3..5f630a24e 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -1004,8 +1004,6 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
static enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
{
- void **tmp_arr;
-
if (channel->reserve_ptr - channel->reserve_top > 0) {
_alloc_after_swap:
*dtrh = channel->reserve_arr[--channel->reserve_ptr];
@@ -1020,10 +1018,7 @@ _alloc_after_swap:
* i.e. no additional lock need to be done when we free a resource */
if (channel->length - channel->free_ptr > 0) {
-
- tmp_arr = channel->reserve_arr;
- channel->reserve_arr = channel->free_arr;
- channel->free_arr = tmp_arr;
+ swap(channel->reserve_arr, channel->free_arr);
channel->reserve_ptr = channel->length;
channel->reserve_top = channel->free_ptr;
channel->free_ptr = channel->length;
diff --git a/drivers/net/ethernet/nuvoton/Kconfig b/drivers/net/ethernet/nuvoton/Kconfig
index 01182b559..71c973f8e 100644
--- a/drivers/net/ethernet/nuvoton/Kconfig
+++ b/drivers/net/ethernet/nuvoton/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_NUVOTON
default y
depends on ARM && ARCH_W90X900
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/nvidia/Kconfig b/drivers/net/ethernet/nvidia/Kconfig
index ace19e7f6..4efc9fe84 100644
--- a/drivers/net/ethernet/nvidia/Kconfig
+++ b/drivers/net/ethernet/nvidia/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_NVIDIA
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
@@ -22,9 +20,7 @@ config FORCEDETH
tristate "nForce Ethernet support"
depends on PCI
---help---
- If you have a network (Ethernet) controller of this type, say Y and
- read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) controller of this type, say Y here.
To compile this driver as a module, choose M here. The module
will be called forcedeth.
diff --git a/drivers/net/ethernet/oki-semi/Kconfig b/drivers/net/ethernet/oki-semi/Kconfig
index ecd45f9ea..5a975af48 100644
--- a/drivers/net/ethernet/oki-semi/Kconfig
+++ b/drivers/net/ethernet/oki-semi/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_OKI
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig
index 8d5180043..b5ea2a561 100644
--- a/drivers/net/ethernet/packetengines/Kconfig
+++ b/drivers/net/ethernet/packetengines/Kconfig
@@ -7,9 +7,7 @@ config NET_PACKET_ENGINE
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
@@ -23,9 +21,7 @@ config HAMACHI
depends on PCI
select MII
---help---
- If you have a Gigabit Ethernet card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a Gigabit Ethernet card of this type, say Y here.
To compile this driver as a module, choose M here. The module will be
called hamachi.
diff --git a/drivers/net/ethernet/pasemi/Kconfig b/drivers/net/ethernet/pasemi/Kconfig
index 01e6c329d..db19c6f49 100644
--- a/drivers/net/ethernet/pasemi/Kconfig
+++ b/drivers/net/ethernet/pasemi/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_PASEMI
default y
depends on PPC_PASEMI && PCI && INET
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index d49cba129..f1f0108c2 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_QLOGIC
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 00d00beb2..32707fe0c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1326,9 +1326,6 @@ struct qlcnic_eswitch {
};
-/* Return codes for Error handling */
-#define QL_STATUS_INVALID_PARAM -1
-
#define MAX_BW 100 /* % of link speed */
#define MIN_BW 1 /* % of link speed */
#define MAX_VLAN_ID 4095
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 78648e0ac..c316746d5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1415,7 +1415,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
if (fw->size & 0xF) {
addr = dest + size;
for (i = 0; i < (fw->size & 0xF); i++)
- data[i] = temp[size + i];
+ data[i] = ((u8 *)temp)[size + i];
for (; i < 16; i++)
data[i] = 0;
ret = qlcnic_ms_mem_write128(adapter, addr,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 49c525185..dc7501265 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1031,7 +1031,7 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
pfn = pci_info[i].id;
if (pfn >= ahw->max_vnic_func) {
- ret = QL_STATUS_INVALID_PARAM;
+ ret = -EINVAL;
dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
__func__, pfn, ahw->max_vnic_func);
goto err_eswitch;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 59a721fba..05c28f2c6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -24,8 +24,6 @@
#include <linux/hwmon-sysfs.h>
#endif
-#define QLC_STATUS_UNSUPPORTED_CMD -2
-
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
{
return -EOPNOTSUPP;
@@ -166,7 +164,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
u8 b_state, b_rate;
if (len != sizeof(u16))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
memcpy(&beacon, buf, sizeof(u16));
err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
@@ -383,17 +381,17 @@ static int validate_pm_config(struct qlcnic_adapter *adapter,
dest_pci_func = pm_cfg[i].dest_npar;
src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
if (src_index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func);
if (dest_index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
s_esw_id = adapter->npars[src_index].phy_port;
d_esw_id = adapter->npars[dest_index].phy_port;
if (s_esw_id != d_esw_id)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
return 0;
@@ -414,7 +412,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
count = size / sizeof(struct qlcnic_pm_func_cfg);
rem = size % sizeof(struct qlcnic_pm_func_cfg);
if (rem)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
@@ -427,7 +425,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
action = !!pm_cfg[i].action;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
id = adapter->npars[index].phy_port;
ret = qlcnic_config_port_mirroring(adapter, id,
@@ -440,7 +438,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
pci_func = pm_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
id = adapter->npars[index].phy_port;
adapter->npars[index].enable_pm = !!pm_cfg[i].action;
adapter->npars[index].dest_npar = id;
@@ -499,11 +497,11 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
if (pci_func >= ahw->max_vnic_func)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
@@ -517,25 +515,25 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
if (ret != QLCNIC_NON_PRIV_FUNC) {
if (esw_cfg[i].mac_anti_spoof != 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (esw_cfg[i].mac_override != 1)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (esw_cfg[i].promisc_mode != 1)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
break;
case QLCNIC_ADD_VLAN:
if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (!esw_cfg[i].op_type)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
break;
case QLCNIC_DEL_VLAN:
if (!esw_cfg[i].op_type)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
break;
default:
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
}
@@ -559,7 +557,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
count = size / sizeof(struct qlcnic_esw_func_cfg);
rem = size % sizeof(struct qlcnic_esw_func_cfg);
if (rem)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
@@ -570,7 +568,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
for (i = 0; i < count; i++) {
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
continue;
@@ -604,7 +602,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
pci_func = esw_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
npar = &adapter->npars[index];
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
@@ -654,7 +652,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
esw_cfg[pci_func].pci_func = pci_func;
if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
return size;
@@ -669,11 +667,11 @@ static int validate_npar_config(struct qlcnic_adapter *adapter,
for (i = 0; i < count; i++) {
pci_func = np_cfg[i].pci_func;
if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (!IS_VALID_BW(np_cfg[i].min_bw) ||
!IS_VALID_BW(np_cfg[i].max_bw))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
return 0;
}
@@ -694,7 +692,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
count = size / sizeof(struct qlcnic_npar_func_cfg);
rem = size % sizeof(struct qlcnic_npar_func_cfg);
if (rem)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
np_cfg = (struct qlcnic_npar_func_cfg *)buf;
@@ -717,7 +715,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
return ret;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
adapter->npars[index].min_bw = nic_info.min_tx_bw;
adapter->npars[index].max_bw = nic_info.max_tx_bw;
}
@@ -784,13 +782,13 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (size != sizeof(struct qlcnic_esw_statistics))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (offset >= adapter->ahw->max_vnic_func)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
memset(&port_stats, 0, size);
ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
@@ -819,13 +817,13 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (size != sizeof(struct qlcnic_esw_statistics))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
memset(&esw_stats, 0, size);
ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
@@ -853,10 +851,10 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
QLCNIC_QUERY_RX_COUNTER);
@@ -883,10 +881,10 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (offset >= adapter->ahw->max_vnic_func)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
QLCNIC_QUERY_RX_COUNTER);
@@ -953,9 +951,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
if (!size)
- return QL_STATUS_INVALID_PARAM;
- if (!buf)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
count = size / sizeof(u32);
@@ -1132,9 +1128,6 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- if (!buf)
- return QL_STATUS_INVALID_PARAM;
-
ret = kstrtoul(buf, 16, &data);
switch (data) {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 25800a1de..02b7115b6 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3871,9 +3871,6 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
return status;
}
- end_jiffies = jiffies +
- max((unsigned long)1, usecs_to_jiffies(30));
-
/* Check if bit is set then skip the mailbox command and
* clear the bit, else we are in normal reset process.
*/
@@ -3888,6 +3885,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
+ end_jiffies = jiffies + usecs_to_jiffies(30);
do {
value = ql_read32(qdev, RST_FO);
if ((value & RST_FO_FR) == 0)
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index 9a49f42ac..a76e380cf 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -6,9 +6,7 @@ config NET_VENDOR_QUALCOMM
bool "Qualcomm devices"
default y
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 6af028d5f..2f87909f5 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -839,7 +839,7 @@ static const struct of_device_id qca_spi_of_match[] = {
MODULE_DEVICE_TABLE(of, qca_spi_of_match);
static int
-qca_spi_probe(struct spi_device *spi_device)
+qca_spi_probe(struct spi_device *spi)
{
struct qcaspi *qca = NULL;
struct net_device *qcaspi_devs = NULL;
@@ -847,52 +847,52 @@ qca_spi_probe(struct spi_device *spi_device)
u16 signature;
const char *mac;
- if (!spi_device->dev.of_node) {
- dev_err(&spi_device->dev, "Missing device tree\n");
+ if (!spi->dev.of_node) {
+ dev_err(&spi->dev, "Missing device tree\n");
return -EINVAL;
}
- legacy_mode = of_property_read_bool(spi_device->dev.of_node,
+ legacy_mode = of_property_read_bool(spi->dev.of_node,
"qca,legacy-mode");
if (qcaspi_clkspeed == 0) {
- if (spi_device->max_speed_hz)
- qcaspi_clkspeed = spi_device->max_speed_hz;
+ if (spi->max_speed_hz)
+ qcaspi_clkspeed = spi->max_speed_hz;
else
qcaspi_clkspeed = QCASPI_CLK_SPEED;
}
if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
(qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
- dev_info(&spi_device->dev, "Invalid clkspeed: %d\n",
+ dev_info(&spi->dev, "Invalid clkspeed: %d\n",
qcaspi_clkspeed);
return -EINVAL;
}
if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
(qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
- dev_info(&spi_device->dev, "Invalid burst len: %d\n",
+ dev_info(&spi->dev, "Invalid burst len: %d\n",
qcaspi_burst_len);
return -EINVAL;
}
if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
(qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
- dev_info(&spi_device->dev, "Invalid pluggable: %d\n",
+ dev_info(&spi->dev, "Invalid pluggable: %d\n",
qcaspi_pluggable);
return -EINVAL;
}
- dev_info(&spi_device->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
+ dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
QCASPI_DRV_VERSION,
qcaspi_clkspeed,
qcaspi_burst_len,
qcaspi_pluggable);
- spi_device->mode = SPI_MODE_3;
- spi_device->max_speed_hz = qcaspi_clkspeed;
- if (spi_setup(spi_device) < 0) {
- dev_err(&spi_device->dev, "Unable to setup SPI device\n");
+ spi->mode = SPI_MODE_3;
+ spi->max_speed_hz = qcaspi_clkspeed;
+ if (spi_setup(spi) < 0) {
+ dev_err(&spi->dev, "Unable to setup SPI device\n");
return -EFAULT;
}
@@ -905,23 +905,23 @@ qca_spi_probe(struct spi_device *spi_device)
qca = netdev_priv(qcaspi_devs);
if (!qca) {
free_netdev(qcaspi_devs);
- dev_err(&spi_device->dev, "Fail to retrieve private structure\n");
+ dev_err(&spi->dev, "Fail to retrieve private structure\n");
return -ENOMEM;
}
qca->net_dev = qcaspi_devs;
- qca->spi_dev = spi_device;
+ qca->spi_dev = spi;
qca->legacy_mode = legacy_mode;
- spi_set_drvdata(spi_device, qcaspi_devs);
+ spi_set_drvdata(spi, qcaspi_devs);
- mac = of_get_mac_address(spi_device->dev.of_node);
+ mac = of_get_mac_address(spi->dev.of_node);
if (mac)
ether_addr_copy(qca->net_dev->dev_addr, mac);
if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
eth_hw_addr_random(qca->net_dev);
- dev_info(&spi_device->dev, "Using random MAC address: %pM\n",
+ dev_info(&spi->dev, "Using random MAC address: %pM\n",
qca->net_dev->dev_addr);
}
@@ -932,7 +932,7 @@ qca_spi_probe(struct spi_device *spi_device)
qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
if (signature != QCASPI_GOOD_SIGNATURE) {
- dev_err(&spi_device->dev, "Invalid signature (0x%04X)\n",
+ dev_err(&spi->dev, "Invalid signature (0x%04X)\n",
signature);
free_netdev(qcaspi_devs);
return -EFAULT;
@@ -940,7 +940,7 @@ qca_spi_probe(struct spi_device *spi_device)
}
if (register_netdev(qcaspi_devs)) {
- dev_info(&spi_device->dev, "Unable to register net device %s\n",
+ dev_info(&spi->dev, "Unable to register net device %s\n",
qcaspi_devs->name);
free_netdev(qcaspi_devs);
return -EFAULT;
@@ -952,9 +952,9 @@ qca_spi_probe(struct spi_device *spi_device)
}
static int
-qca_spi_remove(struct spi_device *spi_device)
+qca_spi_remove(struct spi_device *spi)
{
- struct net_device *qcaspi_devs = spi_get_drvdata(spi_device);
+ struct net_device *qcaspi_devs = spi_get_drvdata(spi);
struct qcaspi *qca = netdev_priv(qcaspi_devs);
qcaspi_remove_device_debugfs(qca);
diff --git a/drivers/net/ethernet/rdc/Kconfig b/drivers/net/ethernet/rdc/Kconfig
index 2055f7eb2..a9c4e990d 100644
--- a/drivers/net/ethernet/rdc/Kconfig
+++ b/drivers/net/ethernet/rdc/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_RDC
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index ae5d02709..7c69f4c81 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_REALTEK
default y
depends on PCI || (PARPORT && X86)
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
@@ -24,8 +22,7 @@ config ATP
select CRC32
---help---
This is a network (Ethernet) device which attaches to your parallel
- port. Read <file:drivers/net/ethernet/realtek/atp.c> as well as the
- Ethernet-HOWTO, available from <http://www.tldp.org/docs.html#howto>,
+ port. Read the file <file:drivers/net/ethernet/realtek/atp.c>
if you want to use this. If you intend to use this driver, you
should have said N to the "Parallel printer support", because the two
drivers don't like each other.
@@ -40,9 +37,7 @@ config 8139CP
select MII
---help---
This is a driver for the Fast Ethernet PCI network cards based on
- the RTL8139C+ chips. If you have one of those, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ the RTL8139C+ chips. If you have one of those, say Y here.
To compile this driver as a module, choose M here: the module
will be called 8139cp. This is recommended.
@@ -54,8 +49,7 @@ config 8139TOO
select MII
---help---
This is a driver for the Fast Ethernet PCI network cards based on
- the RTL 8129/8130/8139 chips. If you have one of those, say Y and
- read the Ethernet-HOWTO <http://www.tldp.org/docs.html#howto>.
+ the RTL 8129/8130/8139 chips. If you have one of those, say Y here.
To compile this driver as a module, choose M here: the module
will be called 8139too. This is recommended.
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 8267ffead..70335f229 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4857,10 +4857,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_46:
case RTL_GIGA_MAC_VER_47:
case RTL_GIGA_MAC_VER_48:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ break;
case RTL_GIGA_MAC_VER_49:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
- RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
default:
RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 196e98a2d..270c4c9ca 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -2,6 +2,19 @@
# Renesas device configuration
#
+config NET_VENDOR_RENESAS
+ bool "Renesas devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Renesas devices. If you say Y, you will be asked
+ for your specific device in the following questions.
+
+if NET_VENDOR_RENESAS
+
config SH_ETH
tristate "Renesas SuperH Ethernet support"
depends on HAS_DMA
@@ -15,3 +28,19 @@ config SH_ETH
This driver supporting CPUs are:
- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
R8A7740, R8A777x and R8A779x.
+
+config RAVB
+ tristate "Renesas Ethernet AVB support"
+ depends on HAS_DMA
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ select CRC32
+ select MII
+ select MDIO_BITBANG
+ select PHYLIB
+ select PTP_1588_CLOCK
+ help
+ Renesas Ethernet AVB device driver.
+ This driver supports the following SoCs:
+ - R8A779x.
+
+endif # NET_VENDOR_RENESAS
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index 1c278a8e0..a05102a7d 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -3,3 +3,7 @@
#
obj-$(CONFIG_SH_ETH) += sh_eth.o
+
+ravb-objs := ravb_main.o ravb_ptp.o
+
+obj-$(CONFIG_RAVB) += ravb.o
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
new file mode 100644
index 000000000..8aa50ac4e
--- /dev/null
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -0,0 +1,832 @@
+/* Renesas Ethernet AVB device driver
+ *
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ *
+ * Based on the SuperH Ethernet driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __RAVB_H__
+#define __RAVB_H__
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/ptp_clock_kernel.h>
+
+#define BE_TX_RING_SIZE 64 /* TX ring size for Best Effort */
+#define BE_RX_RING_SIZE 1024 /* RX ring size for Best Effort */
+#define NC_TX_RING_SIZE 64 /* TX ring size for Network Control */
+#define NC_RX_RING_SIZE 64 /* RX ring size for Network Control */
+#define BE_TX_RING_MIN 64
+#define BE_RX_RING_MIN 64
+#define BE_TX_RING_MAX 1024
+#define BE_RX_RING_MAX 2048
+
+#define PKT_BUF_SZ 1538
+
+/* Driver's parameters */
+#define RAVB_ALIGN 128
+
+/* Hardware time stamp */
+#define RAVB_TXTSTAMP_VALID 0x00000001 /* TX timestamp valid */
+#define RAVB_TXTSTAMP_ENABLED 0x00000010 /* Enable TX timestamping */
+
+#define RAVB_RXTSTAMP_VALID 0x00000001 /* RX timestamp valid */
+#define RAVB_RXTSTAMP_TYPE 0x00000006 /* RX type mask */
+#define RAVB_RXTSTAMP_TYPE_V2_L2_EVENT 0x00000002
+#define RAVB_RXTSTAMP_TYPE_ALL 0x00000006
+#define RAVB_RXTSTAMP_ENABLED 0x00000010 /* Enable RX timestamping */
+
+enum ravb_reg {
+ /* AVB-DMAC registers */
+ CCC = 0x0000,
+ DBAT = 0x0004,
+ DLR = 0x0008,
+ CSR = 0x000C,
+ CDAR0 = 0x0010,
+ CDAR1 = 0x0014,
+ CDAR2 = 0x0018,
+ CDAR3 = 0x001C,
+ CDAR4 = 0x0020,
+ CDAR5 = 0x0024,
+ CDAR6 = 0x0028,
+ CDAR7 = 0x002C,
+ CDAR8 = 0x0030,
+ CDAR9 = 0x0034,
+ CDAR10 = 0x0038,
+ CDAR11 = 0x003C,
+ CDAR12 = 0x0040,
+ CDAR13 = 0x0044,
+ CDAR14 = 0x0048,
+ CDAR15 = 0x004C,
+ CDAR16 = 0x0050,
+ CDAR17 = 0x0054,
+ CDAR18 = 0x0058,
+ CDAR19 = 0x005C,
+ CDAR20 = 0x0060,
+ CDAR21 = 0x0064,
+ ESR = 0x0088,
+ RCR = 0x0090,
+ RQC0 = 0x0094,
+ RQC1 = 0x0098,
+ RQC2 = 0x009C,
+ RQC3 = 0x00A0,
+ RQC4 = 0x00A4,
+ RPC = 0x00B0,
+ UFCW = 0x00BC,
+ UFCS = 0x00C0,
+ UFCV0 = 0x00C4,
+ UFCV1 = 0x00C8,
+ UFCV2 = 0x00CC,
+ UFCV3 = 0x00D0,
+ UFCV4 = 0x00D4,
+ UFCD0 = 0x00E0,
+ UFCD1 = 0x00E4,
+ UFCD2 = 0x00E8,
+ UFCD3 = 0x00EC,
+ UFCD4 = 0x00F0,
+ SFO = 0x00FC,
+ SFP0 = 0x0100,
+ SFP1 = 0x0104,
+ SFP2 = 0x0108,
+ SFP3 = 0x010C,
+ SFP4 = 0x0110,
+ SFP5 = 0x0114,
+ SFP6 = 0x0118,
+ SFP7 = 0x011C,
+ SFP8 = 0x0120,
+ SFP9 = 0x0124,
+ SFP10 = 0x0128,
+ SFP11 = 0x012C,
+ SFP12 = 0x0130,
+ SFP13 = 0x0134,
+ SFP14 = 0x0138,
+ SFP15 = 0x013C,
+ SFP16 = 0x0140,
+ SFP17 = 0x0144,
+ SFP18 = 0x0148,
+ SFP19 = 0x014C,
+ SFP20 = 0x0150,
+ SFP21 = 0x0154,
+ SFP22 = 0x0158,
+ SFP23 = 0x015C,
+ SFP24 = 0x0160,
+ SFP25 = 0x0164,
+ SFP26 = 0x0168,
+ SFP27 = 0x016C,
+ SFP28 = 0x0170,
+ SFP29 = 0x0174,
+ SFP30 = 0x0178,
+ SFP31 = 0x017C,
+ SFM0 = 0x01C0,
+ SFM1 = 0x01C4,
+ TGC = 0x0300,
+ TCCR = 0x0304,
+ TSR = 0x0308,
+ TFA0 = 0x0310,
+ TFA1 = 0x0314,
+ TFA2 = 0x0318,
+ CIVR0 = 0x0320,
+ CIVR1 = 0x0324,
+ CDVR0 = 0x0328,
+ CDVR1 = 0x032C,
+ CUL0 = 0x0330,
+ CUL1 = 0x0334,
+ CLL0 = 0x0338,
+ CLL1 = 0x033C,
+ DIC = 0x0350,
+ DIS = 0x0354,
+ EIC = 0x0358,
+ EIS = 0x035C,
+ RIC0 = 0x0360,
+ RIS0 = 0x0364,
+ RIC1 = 0x0368,
+ RIS1 = 0x036C,
+ RIC2 = 0x0370,
+ RIS2 = 0x0374,
+ TIC = 0x0378,
+ TIS = 0x037C,
+ ISS = 0x0380,
+ GCCR = 0x0390,
+ GMTT = 0x0394,
+ GPTC = 0x0398,
+ GTI = 0x039C,
+ GTO0 = 0x03A0,
+ GTO1 = 0x03A4,
+ GTO2 = 0x03A8,
+ GIC = 0x03AC,
+ GIS = 0x03B0,
+ GCPT = 0x03B4, /* Undocumented? */
+ GCT0 = 0x03B8,
+ GCT1 = 0x03BC,
+ GCT2 = 0x03C0,
+
+ /* E-MAC registers */
+ ECMR = 0x0500,
+ RFLR = 0x0508,
+ ECSR = 0x0510,
+ ECSIPR = 0x0518,
+ PIR = 0x0520,
+ PSR = 0x0528,
+ PIPR = 0x052c,
+ MPR = 0x0558,
+ PFTCR = 0x055c,
+ PFRCR = 0x0560,
+ GECMR = 0x05b0,
+ MAHR = 0x05c0,
+ MALR = 0x05c8,
+ TROCR = 0x0700, /* Undocumented? */
+ CDCR = 0x0708, /* Undocumented? */
+ LCCR = 0x0710, /* Undocumented? */
+ CEFCR = 0x0740,
+ FRECR = 0x0748,
+ TSFRCR = 0x0750,
+ TLFRCR = 0x0758,
+ RFCR = 0x0760,
+ CERCR = 0x0768, /* Undocumented? */
+ CEECR = 0x0770, /* Undocumented? */
+ MAFCR = 0x0778,
+};
+
+
+/* Register bits of the Ethernet AVB */
+/* CCC */
+enum CCC_BIT {
+ CCC_OPC = 0x00000003,
+ CCC_OPC_RESET = 0x00000000,
+ CCC_OPC_CONFIG = 0x00000001,
+ CCC_OPC_OPERATION = 0x00000002,
+ CCC_DTSR = 0x00000100,
+ CCC_CSEL = 0x00030000,
+ CCC_CSEL_HPB = 0x00010000,
+ CCC_CSEL_ETH_TX = 0x00020000,
+ CCC_CSEL_GMII_REF = 0x00030000,
+ CCC_BOC = 0x00100000, /* Undocumented? */
+ CCC_LBME = 0x01000000,
+};
+
+/* CSR */
+enum CSR_BIT {
+ CSR_OPS = 0x0000000F,
+ CSR_OPS_RESET = 0x00000001,
+ CSR_OPS_CONFIG = 0x00000002,
+ CSR_OPS_OPERATION = 0x00000004,
+ CSR_OPS_STANDBY = 0x00000008, /* Undocumented? */
+ CSR_DTS = 0x00000100,
+ CSR_TPO0 = 0x00010000,
+ CSR_TPO1 = 0x00020000,
+ CSR_TPO2 = 0x00040000,
+ CSR_TPO3 = 0x00080000,
+ CSR_RPO = 0x00100000,
+};
+
+/* ESR */
+enum ESR_BIT {
+ ESR_EQN = 0x0000001F,
+ ESR_ET = 0x00000F00,
+ ESR_EIL = 0x00001000,
+};
+
+/* RCR */
+enum RCR_BIT {
+ RCR_EFFS = 0x00000001,
+ RCR_ENCF = 0x00000002,
+ RCR_ESF = 0x0000000C,
+ RCR_ETS0 = 0x00000010,
+ RCR_ETS2 = 0x00000020,
+ RCR_RFCL = 0x1FFF0000,
+};
+
+/* RQC0/1/2/3/4 */
+enum RQC_BIT {
+ RQC_RSM0 = 0x00000003,
+ RQC_UFCC0 = 0x00000030,
+ RQC_RSM1 = 0x00000300,
+ RQC_UFCC1 = 0x00003000,
+ RQC_RSM2 = 0x00030000,
+ RQC_UFCC2 = 0x00300000,
+ RQC_RSM3 = 0x03000000,
+ RQC_UFCC3 = 0x30000000,
+};
+
+/* RPC */
+enum RPC_BIT {
+ RPC_PCNT = 0x00000700,
+ RPC_DCNT = 0x00FF0000,
+};
+
+/* UFCW */
+enum UFCW_BIT {
+ UFCW_WL0 = 0x0000003F,
+ UFCW_WL1 = 0x00003F00,
+ UFCW_WL2 = 0x003F0000,
+ UFCW_WL3 = 0x3F000000,
+};
+
+/* UFCS */
+enum UFCS_BIT {
+ UFCS_SL0 = 0x0000003F,
+ UFCS_SL1 = 0x00003F00,
+ UFCS_SL2 = 0x003F0000,
+ UFCS_SL3 = 0x3F000000,
+};
+
+/* UFCV0/1/2/3/4 */
+enum UFCV_BIT {
+ UFCV_CV0 = 0x0000003F,
+ UFCV_CV1 = 0x00003F00,
+ UFCV_CV2 = 0x003F0000,
+ UFCV_CV3 = 0x3F000000,
+};
+
+/* UFCD0/1/2/3/4 */
+enum UFCD_BIT {
+ UFCD_DV0 = 0x0000003F,
+ UFCD_DV1 = 0x00003F00,
+ UFCD_DV2 = 0x003F0000,
+ UFCD_DV3 = 0x3F000000,
+};
+
+/* SFO */
+enum SFO_BIT {
+ SFO_FPB = 0x0000003F,
+};
+
+/* RTC */
+enum RTC_BIT {
+ RTC_MFL0 = 0x00000FFF,
+ RTC_MFL1 = 0x0FFF0000,
+};
+
+/* TGC */
+enum TGC_BIT {
+ TGC_TSM0 = 0x00000001,
+ TGC_TSM1 = 0x00000002,
+ TGC_TSM2 = 0x00000004,
+ TGC_TSM3 = 0x00000008,
+ TGC_TQP = 0x00000030,
+ TGC_TQP_NONAVB = 0x00000000,
+ TGC_TQP_AVBMODE1 = 0x00000010,
+ TGC_TQP_AVBMODE2 = 0x00000030,
+ TGC_TBD0 = 0x00000300,
+ TGC_TBD1 = 0x00003000,
+ TGC_TBD2 = 0x00030000,
+ TGC_TBD3 = 0x00300000,
+};
+
+/* TCCR */
+enum TCCR_BIT {
+ TCCR_TSRQ0 = 0x00000001,
+ TCCR_TSRQ1 = 0x00000002,
+ TCCR_TSRQ2 = 0x00000004,
+ TCCR_TSRQ3 = 0x00000008,
+ TCCR_TFEN = 0x00000100,
+ TCCR_TFR = 0x00000200,
+};
+
+/* TSR */
+enum TSR_BIT {
+ TSR_CCS0 = 0x00000003,
+ TSR_CCS1 = 0x0000000C,
+ TSR_TFFL = 0x00000700,
+};
+
+/* TFA2 */
+enum TFA2_BIT {
+ TFA2_TSV = 0x0000FFFF,
+ TFA2_TST = 0x03FF0000,
+};
+
+/* DIC */
+enum DIC_BIT {
+ DIC_DPE1 = 0x00000002,
+ DIC_DPE2 = 0x00000004,
+ DIC_DPE3 = 0x00000008,
+ DIC_DPE4 = 0x00000010,
+ DIC_DPE5 = 0x00000020,
+ DIC_DPE6 = 0x00000040,
+ DIC_DPE7 = 0x00000080,
+ DIC_DPE8 = 0x00000100,
+ DIC_DPE9 = 0x00000200,
+ DIC_DPE10 = 0x00000400,
+ DIC_DPE11 = 0x00000800,
+ DIC_DPE12 = 0x00001000,
+ DIC_DPE13 = 0x00002000,
+ DIC_DPE14 = 0x00004000,
+ DIC_DPE15 = 0x00008000,
+};
+
+/* DIS */
+enum DIS_BIT {
+ DIS_DPF1 = 0x00000002,
+ DIS_DPF2 = 0x00000004,
+ DIS_DPF3 = 0x00000008,
+ DIS_DPF4 = 0x00000010,
+ DIS_DPF5 = 0x00000020,
+ DIS_DPF6 = 0x00000040,
+ DIS_DPF7 = 0x00000080,
+ DIS_DPF8 = 0x00000100,
+ DIS_DPF9 = 0x00000200,
+ DIS_DPF10 = 0x00000400,
+ DIS_DPF11 = 0x00000800,
+ DIS_DPF12 = 0x00001000,
+ DIS_DPF13 = 0x00002000,
+ DIS_DPF14 = 0x00004000,
+ DIS_DPF15 = 0x00008000,
+};
+
+/* EIC */
+enum EIC_BIT {
+ EIC_MREE = 0x00000001,
+ EIC_MTEE = 0x00000002,
+ EIC_QEE = 0x00000004,
+ EIC_SEE = 0x00000008,
+ EIC_CLLE0 = 0x00000010,
+ EIC_CLLE1 = 0x00000020,
+ EIC_CULE0 = 0x00000040,
+ EIC_CULE1 = 0x00000080,
+ EIC_TFFE = 0x00000100,
+};
+
+/* EIS */
+enum EIS_BIT {
+ EIS_MREF = 0x00000001,
+ EIS_MTEF = 0x00000002,
+ EIS_QEF = 0x00000004,
+ EIS_SEF = 0x00000008,
+ EIS_CLLF0 = 0x00000010,
+ EIS_CLLF1 = 0x00000020,
+ EIS_CULF0 = 0x00000040,
+ EIS_CULF1 = 0x00000080,
+ EIS_TFFF = 0x00000100,
+ EIS_QFS = 0x00010000,
+};
+
+/* RIC0 */
+enum RIC0_BIT {
+ RIC0_FRE0 = 0x00000001,
+ RIC0_FRE1 = 0x00000002,
+ RIC0_FRE2 = 0x00000004,
+ RIC0_FRE3 = 0x00000008,
+ RIC0_FRE4 = 0x00000010,
+ RIC0_FRE5 = 0x00000020,
+ RIC0_FRE6 = 0x00000040,
+ RIC0_FRE7 = 0x00000080,
+ RIC0_FRE8 = 0x00000100,
+ RIC0_FRE9 = 0x00000200,
+ RIC0_FRE10 = 0x00000400,
+ RIC0_FRE11 = 0x00000800,
+ RIC0_FRE12 = 0x00001000,
+ RIC0_FRE13 = 0x00002000,
+ RIC0_FRE14 = 0x00004000,
+ RIC0_FRE15 = 0x00008000,
+ RIC0_FRE16 = 0x00010000,
+ RIC0_FRE17 = 0x00020000,
+};
+
+/* RIC0 */
+enum RIS0_BIT {
+ RIS0_FRF0 = 0x00000001,
+ RIS0_FRF1 = 0x00000002,
+ RIS0_FRF2 = 0x00000004,
+ RIS0_FRF3 = 0x00000008,
+ RIS0_FRF4 = 0x00000010,
+ RIS0_FRF5 = 0x00000020,
+ RIS0_FRF6 = 0x00000040,
+ RIS0_FRF7 = 0x00000080,
+ RIS0_FRF8 = 0x00000100,
+ RIS0_FRF9 = 0x00000200,
+ RIS0_FRF10 = 0x00000400,
+ RIS0_FRF11 = 0x00000800,
+ RIS0_FRF12 = 0x00001000,
+ RIS0_FRF13 = 0x00002000,
+ RIS0_FRF14 = 0x00004000,
+ RIS0_FRF15 = 0x00008000,
+ RIS0_FRF16 = 0x00010000,
+ RIS0_FRF17 = 0x00020000,
+};
+
+/* RIC1 */
+enum RIC1_BIT {
+ RIC1_RFWE = 0x80000000,
+};
+
+/* RIS1 */
+enum RIS1_BIT {
+ RIS1_RFWF = 0x80000000,
+};
+
+/* RIC2 */
+enum RIC2_BIT {
+ RIC2_QFE0 = 0x00000001,
+ RIC2_QFE1 = 0x00000002,
+ RIC2_QFE2 = 0x00000004,
+ RIC2_QFE3 = 0x00000008,
+ RIC2_QFE4 = 0x00000010,
+ RIC2_QFE5 = 0x00000020,
+ RIC2_QFE6 = 0x00000040,
+ RIC2_QFE7 = 0x00000080,
+ RIC2_QFE8 = 0x00000100,
+ RIC2_QFE9 = 0x00000200,
+ RIC2_QFE10 = 0x00000400,
+ RIC2_QFE11 = 0x00000800,
+ RIC2_QFE12 = 0x00001000,
+ RIC2_QFE13 = 0x00002000,
+ RIC2_QFE14 = 0x00004000,
+ RIC2_QFE15 = 0x00008000,
+ RIC2_QFE16 = 0x00010000,
+ RIC2_QFE17 = 0x00020000,
+ RIC2_RFFE = 0x80000000,
+};
+
+/* RIS2 */
+enum RIS2_BIT {
+ RIS2_QFF0 = 0x00000001,
+ RIS2_QFF1 = 0x00000002,
+ RIS2_QFF2 = 0x00000004,
+ RIS2_QFF3 = 0x00000008,
+ RIS2_QFF4 = 0x00000010,
+ RIS2_QFF5 = 0x00000020,
+ RIS2_QFF6 = 0x00000040,
+ RIS2_QFF7 = 0x00000080,
+ RIS2_QFF8 = 0x00000100,
+ RIS2_QFF9 = 0x00000200,
+ RIS2_QFF10 = 0x00000400,
+ RIS2_QFF11 = 0x00000800,
+ RIS2_QFF12 = 0x00001000,
+ RIS2_QFF13 = 0x00002000,
+ RIS2_QFF14 = 0x00004000,
+ RIS2_QFF15 = 0x00008000,
+ RIS2_QFF16 = 0x00010000,
+ RIS2_QFF17 = 0x00020000,
+ RIS2_RFFF = 0x80000000,
+};
+
+/* TIC */
+enum TIC_BIT {
+ TIC_FTE0 = 0x00000001, /* Undocumented? */
+ TIC_FTE1 = 0x00000002, /* Undocumented? */
+ TIC_TFUE = 0x00000100,
+ TIC_TFWE = 0x00000200,
+};
+
+/* TIS */
+enum TIS_BIT {
+ TIS_FTF0 = 0x00000001, /* Undocumented? */
+ TIS_FTF1 = 0x00000002, /* Undocumented? */
+ TIS_TFUF = 0x00000100,
+ TIS_TFWF = 0x00000200,
+};
+
+/* ISS */
+enum ISS_BIT {
+ ISS_FRS = 0x00000001, /* Undocumented? */
+ ISS_FTS = 0x00000004, /* Undocumented? */
+ ISS_ES = 0x00000040,
+ ISS_MS = 0x00000080,
+ ISS_TFUS = 0x00000100,
+ ISS_TFWS = 0x00000200,
+ ISS_RFWS = 0x00001000,
+ ISS_CGIS = 0x00002000,
+ ISS_DPS1 = 0x00020000,
+ ISS_DPS2 = 0x00040000,
+ ISS_DPS3 = 0x00080000,
+ ISS_DPS4 = 0x00100000,
+ ISS_DPS5 = 0x00200000,
+ ISS_DPS6 = 0x00400000,
+ ISS_DPS7 = 0x00800000,
+ ISS_DPS8 = 0x01000000,
+ ISS_DPS9 = 0x02000000,
+ ISS_DPS10 = 0x04000000,
+ ISS_DPS11 = 0x08000000,
+ ISS_DPS12 = 0x10000000,
+ ISS_DPS13 = 0x20000000,
+ ISS_DPS14 = 0x40000000,
+ ISS_DPS15 = 0x80000000,
+};
+
+/* GCCR */
+enum GCCR_BIT {
+ GCCR_TCR = 0x00000003,
+ GCCR_TCR_NOREQ = 0x00000000, /* No request */
+ GCCR_TCR_RESET = 0x00000001, /* gPTP/AVTP presentation timer reset */
+ GCCR_TCR_CAPTURE = 0x00000003, /* Capture value set in GCCR.TCSS */
+ GCCR_LTO = 0x00000004,
+ GCCR_LTI = 0x00000008,
+ GCCR_LPTC = 0x00000010,
+ GCCR_LMTT = 0x00000020,
+ GCCR_TCSS = 0x00000300,
+ GCCR_TCSS_GPTP = 0x00000000, /* gPTP timer value */
+ GCCR_TCSS_ADJGPTP = 0x00000100, /* Adjusted gPTP timer value */
+ GCCR_TCSS_AVTP = 0x00000200, /* AVTP presentation time value */
+};
+
+/* GTI */
+enum GTI_BIT {
+ GTI_TIV = 0x0FFFFFFF,
+};
+
+/* GIC */
+enum GIC_BIT {
+ GIC_PTCE = 0x00000001, /* Undocumented? */
+ GIC_PTME = 0x00000004,
+};
+
+/* GIS */
+enum GIS_BIT {
+ GIS_PTCF = 0x00000001, /* Undocumented? */
+ GIS_PTMF = 0x00000004,
+};
+
+/* ECMR */
+enum ECMR_BIT {
+ ECMR_PRM = 0x00000001,
+ ECMR_DM = 0x00000002,
+ ECMR_TE = 0x00000020,
+ ECMR_RE = 0x00000040,
+ ECMR_MPDE = 0x00000200,
+ ECMR_TXF = 0x00010000, /* Undocumented? */
+ ECMR_RXF = 0x00020000,
+ ECMR_PFR = 0x00040000,
+ ECMR_ZPF = 0x00080000, /* Undocumented? */
+ ECMR_RZPF = 0x00100000,
+ ECMR_DPAD = 0x00200000,
+ ECMR_RCSC = 0x00800000,
+ ECMR_TRCCM = 0x04000000,
+};
+
+/* ECSR */
+enum ECSR_BIT {
+ ECSR_ICD = 0x00000001,
+ ECSR_MPD = 0x00000002,
+ ECSR_LCHNG = 0x00000004,
+ ECSR_PHYI = 0x00000008,
+};
+
+/* ECSIPR */
+enum ECSIPR_BIT {
+ ECSIPR_ICDIP = 0x00000001,
+ ECSIPR_MPDIP = 0x00000002,
+ ECSIPR_LCHNGIP = 0x00000004, /* Undocumented? */
+};
+
+/* PIR */
+enum PIR_BIT {
+ PIR_MDC = 0x00000001,
+ PIR_MMD = 0x00000002,
+ PIR_MDO = 0x00000004,
+ PIR_MDI = 0x00000008,
+};
+
+/* PSR */
+enum PSR_BIT {
+ PSR_LMON = 0x00000001,
+};
+
+/* PIPR */
+enum PIPR_BIT {
+ PIPR_PHYIP = 0x00000001,
+};
+
+/* MPR */
+enum MPR_BIT {
+ MPR_MP = 0x0000ffff,
+};
+
+/* GECMR */
+enum GECMR_BIT {
+ GECMR_SPEED = 0x00000001,
+ GECMR_SPEED_100 = 0x00000000,
+ GECMR_SPEED_1000 = 0x00000001,
+};
+
+/* The Ethernet AVB descriptor definitions. */
+struct ravb_desc {
+ __le16 ds; /* Descriptor size */
+ u8 cc; /* Content control MSBs (reserved) */
+ u8 die_dt; /* Descriptor interrupt enable and type */
+ __le32 dptr; /* Descriptor pointer */
+};
+
+enum DIE_DT {
+ /* Frame data */
+ DT_FMID = 0x40,
+ DT_FSTART = 0x50,
+ DT_FEND = 0x60,
+ DT_FSINGLE = 0x70,
+ /* Chain control */
+ DT_LINK = 0x80,
+ DT_LINKFIX = 0x90,
+ DT_EOS = 0xa0,
+ /* HW/SW arbitration */
+ DT_FEMPTY = 0xc0,
+ DT_FEMPTY_IS = 0xd0,
+ DT_FEMPTY_IC = 0xe0,
+ DT_FEMPTY_ND = 0xf0,
+ DT_LEMPTY = 0x20,
+ DT_EEMPTY = 0x30,
+};
+
+struct ravb_rx_desc {
+ __le16 ds_cc; /* Descriptor size and content control LSBs */
+ u8 msc; /* MAC status code */
+ u8 die_dt; /* Descriptor interrupt enable and type */
+ __le32 dptr; /* Descpriptor pointer */
+};
+
+struct ravb_ex_rx_desc {
+ __le16 ds_cc; /* Descriptor size and content control lower bits */
+ u8 msc; /* MAC status code */
+ u8 die_dt; /* Descriptor interrupt enable and type */
+ __le32 dptr; /* Descpriptor pointer */
+ __le32 ts_n; /* Timestampe nsec */
+ __le32 ts_sl; /* Timestamp low */
+ __le16 ts_sh; /* Timestamp high */
+ __le16 res; /* Reserved bits */
+};
+
+enum RX_DS_CC_BIT {
+ RX_DS = 0x0fff, /* Data size */
+ RX_TR = 0x1000, /* Truncation indication */
+ RX_EI = 0x2000, /* Error indication */
+ RX_PS = 0xc000, /* Padding selection */
+};
+
+/* E-MAC status code */
+enum MSC_BIT {
+ MSC_CRC = 0x01, /* Frame CRC error */
+ MSC_RFE = 0x02, /* Frame reception error (flagged by PHY) */
+ MSC_RTSF = 0x04, /* Frame length error (frame too short) */
+ MSC_RTLF = 0x08, /* Frame length error (frame too long) */
+ MSC_FRE = 0x10, /* Fraction error (not a multiple of 8 bits) */
+ MSC_CRL = 0x20, /* Carrier lost */
+ MSC_CEEF = 0x40, /* Carrier extension error */
+ MSC_MC = 0x80, /* Multicast frame reception */
+};
+
+struct ravb_tx_desc {
+ __le16 ds_tagl; /* Descriptor size and frame tag LSBs */
+ u8 tagh_tsr; /* Frame tag MSBs and timestamp storage request bit */
+ u8 die_dt; /* Descriptor interrupt enable and type */
+ __le32 dptr; /* Descpriptor pointer */
+};
+
+enum TX_DS_TAGL_BIT {
+ TX_DS = 0x0fff, /* Data size */
+ TX_TAGL = 0xf000, /* Frame tag LSBs */
+};
+
+enum TX_TAGH_TSR_BIT {
+ TX_TAGH = 0x3f, /* Frame tag MSBs */
+ TX_TSR = 0x40, /* Timestamp storage request */
+};
+enum RAVB_QUEUE {
+ RAVB_BE = 0, /* Best Effort Queue */
+ RAVB_NC, /* Network Control Queue */
+};
+
+#define DBAT_ENTRY_NUM 22
+#define RX_QUEUE_OFFSET 4
+#define NUM_RX_QUEUE 2
+#define NUM_TX_QUEUE 2
+
+struct ravb_tstamp_skb {
+ struct list_head list;
+ struct sk_buff *skb;
+ u16 tag;
+};
+
+struct ravb_ptp_perout {
+ u32 target;
+ u32 period;
+};
+
+#define N_EXT_TS 1
+#define N_PER_OUT 1
+
+struct ravb_ptp {
+ struct ptp_clock *clock;
+ struct ptp_clock_info info;
+ u32 default_addend;
+ u32 current_addend;
+ int extts[N_EXT_TS];
+ struct ravb_ptp_perout perout[N_PER_OUT];
+};
+
+struct ravb_private {
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ void __iomem *addr;
+ struct mdiobb_ctrl mdiobb;
+ u32 num_rx_ring[NUM_RX_QUEUE];
+ u32 num_tx_ring[NUM_TX_QUEUE];
+ u32 desc_bat_size;
+ dma_addr_t desc_bat_dma;
+ struct ravb_desc *desc_bat;
+ dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
+ dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
+ struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
+ struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
+ struct sk_buff **rx_skb[NUM_RX_QUEUE];
+ struct sk_buff **tx_skb[NUM_TX_QUEUE];
+ void **tx_buffers[NUM_TX_QUEUE];
+ u32 rx_over_errors;
+ u32 rx_fifo_errors;
+ struct net_device_stats stats[NUM_RX_QUEUE];
+ u32 tstamp_tx_ctrl;
+ u32 tstamp_rx_ctrl;
+ struct list_head ts_skb_list;
+ u32 ts_skb_tag;
+ struct ravb_ptp ptp;
+ spinlock_t lock; /* Register access lock */
+ u32 cur_rx[NUM_RX_QUEUE]; /* Consumer ring indices */
+ u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */
+ u32 cur_tx[NUM_TX_QUEUE];
+ u32 dirty_tx[NUM_TX_QUEUE];
+ struct napi_struct napi[NUM_RX_QUEUE];
+ struct work_struct work;
+ /* MII transceiver section. */
+ struct mii_bus *mii_bus; /* MDIO bus control */
+ struct phy_device *phydev; /* PHY device control */
+ int link;
+ phy_interface_t phy_interface;
+ int msg_enable;
+ int speed;
+ int duplex;
+
+ unsigned no_avb_link:1;
+ unsigned avb_link_active_low:1;
+};
+
+static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ return ioread32(priv->addr + reg);
+}
+
+static inline void ravb_write(struct net_device *ndev, u32 data,
+ enum ravb_reg reg)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ iowrite32(data, priv->addr + reg);
+}
+
+int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value);
+
+irqreturn_t ravb_ptp_interrupt(struct net_device *ndev);
+void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev);
+void ravb_ptp_stop(struct net_device *ndev);
+
+#endif /* #ifndef __RAVB_H__ */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
new file mode 100644
index 000000000..78849dd4e
--- /dev/null
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -0,0 +1,1826 @@
+/* Renesas Ethernet AVB device driver
+ *
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ *
+ * Based on the SuperH Ethernet driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/cache.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "ravb.h"
+
+#define RAVB_DEF_MSG_ENABLE \
+ (NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
+{
+ int i;
+
+ for (i = 0; i < 10000; i++) {
+ if ((ravb_read(ndev, reg) & mask) == value)
+ return 0;
+ udelay(10);
+ }
+ return -ETIMEDOUT;
+}
+
+static int ravb_config(struct net_device *ndev)
+{
+ int error;
+
+ /* Set config mode */
+ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
+ CCC);
+ /* Check if the operating mode is changed to the config mode */
+ error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
+ if (error)
+ netdev_err(ndev, "failed to switch device to config mode\n");
+
+ return error;
+}
+
+static void ravb_set_duplex(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 ecmr = ravb_read(ndev, ECMR);
+
+ if (priv->duplex) /* Full */
+ ecmr |= ECMR_DM;
+ else /* Half */
+ ecmr &= ~ECMR_DM;
+ ravb_write(ndev, ecmr, ECMR);
+}
+
+static void ravb_set_rate(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ switch (priv->speed) {
+ case 100: /* 100BASE */
+ ravb_write(ndev, GECMR_SPEED_100, GECMR);
+ break;
+ case 1000: /* 1000BASE */
+ ravb_write(ndev, GECMR_SPEED_1000, GECMR);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ravb_set_buffer_align(struct sk_buff *skb)
+{
+ u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
+
+ if (reserve)
+ skb_reserve(skb, RAVB_ALIGN - reserve);
+}
+
+/* Get MAC address from the MAC address registers
+ *
+ * Ethernet AVB device doesn't have ROM for MAC address.
+ * This function gets the MAC address that was used by a bootloader.
+ */
+static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
+{
+ if (mac) {
+ ether_addr_copy(ndev->dev_addr, mac);
+ } else {
+ ndev->dev_addr[0] = (ravb_read(ndev, MAHR) >> 24);
+ ndev->dev_addr[1] = (ravb_read(ndev, MAHR) >> 16) & 0xFF;
+ ndev->dev_addr[2] = (ravb_read(ndev, MAHR) >> 8) & 0xFF;
+ ndev->dev_addr[3] = (ravb_read(ndev, MAHR) >> 0) & 0xFF;
+ ndev->dev_addr[4] = (ravb_read(ndev, MALR) >> 8) & 0xFF;
+ ndev->dev_addr[5] = (ravb_read(ndev, MALR) >> 0) & 0xFF;
+ }
+}
+
+static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
+{
+ struct ravb_private *priv = container_of(ctrl, struct ravb_private,
+ mdiobb);
+ u32 pir = ravb_read(priv->ndev, PIR);
+
+ if (set)
+ pir |= mask;
+ else
+ pir &= ~mask;
+ ravb_write(priv->ndev, pir, PIR);
+}
+
+/* MDC pin control */
+static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+ ravb_mdio_ctrl(ctrl, PIR_MDC, level);
+}
+
+/* Data I/O pin control */
+static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+ ravb_mdio_ctrl(ctrl, PIR_MMD, output);
+}
+
+/* Set data bit */
+static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
+{
+ ravb_mdio_ctrl(ctrl, PIR_MDO, value);
+}
+
+/* Get data bit */
+static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
+{
+ struct ravb_private *priv = container_of(ctrl, struct ravb_private,
+ mdiobb);
+
+ return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
+}
+
+/* MDIO bus control struct */
+static struct mdiobb_ops bb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = ravb_set_mdc,
+ .set_mdio_dir = ravb_set_mdio_dir,
+ .set_mdio_data = ravb_set_mdio_data,
+ .get_mdio_data = ravb_get_mdio_data,
+};
+
+/* Free skb's and DMA buffers for Ethernet AVB */
+static void ravb_ring_free(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int ring_size;
+ int i;
+
+ /* Free RX skb ringbuffer */
+ if (priv->rx_skb[q]) {
+ for (i = 0; i < priv->num_rx_ring[q]; i++)
+ dev_kfree_skb(priv->rx_skb[q][i]);
+ }
+ kfree(priv->rx_skb[q]);
+ priv->rx_skb[q] = NULL;
+
+ /* Free TX skb ringbuffer */
+ if (priv->tx_skb[q]) {
+ for (i = 0; i < priv->num_tx_ring[q]; i++)
+ dev_kfree_skb(priv->tx_skb[q][i]);
+ }
+ kfree(priv->tx_skb[q]);
+ priv->tx_skb[q] = NULL;
+
+ /* Free aligned TX buffers */
+ if (priv->tx_buffers[q]) {
+ for (i = 0; i < priv->num_tx_ring[q]; i++)
+ kfree(priv->tx_buffers[q][i]);
+ }
+ kfree(priv->tx_buffers[q]);
+ priv->tx_buffers[q] = NULL;
+
+ if (priv->rx_ring[q]) {
+ ring_size = sizeof(struct ravb_ex_rx_desc) *
+ (priv->num_rx_ring[q] + 1);
+ dma_free_coherent(NULL, ring_size, priv->rx_ring[q],
+ priv->rx_desc_dma[q]);
+ priv->rx_ring[q] = NULL;
+ }
+
+ if (priv->tx_ring[q]) {
+ ring_size = sizeof(struct ravb_tx_desc) *
+ (priv->num_tx_ring[q] + 1);
+ dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
+ priv->tx_desc_dma[q]);
+ priv->tx_ring[q] = NULL;
+ }
+}
+
+/* Format skb and descriptor buffer for Ethernet AVB */
+static void ravb_ring_format(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_ex_rx_desc *rx_desc = NULL;
+ struct ravb_tx_desc *tx_desc = NULL;
+ struct ravb_desc *desc = NULL;
+ int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
+ int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
+ dma_addr_t dma_addr;
+ int i;
+
+ priv->cur_rx[q] = 0;
+ priv->cur_tx[q] = 0;
+ priv->dirty_rx[q] = 0;
+ priv->dirty_tx[q] = 0;
+
+ memset(priv->rx_ring[q], 0, rx_ring_size);
+ /* Build RX ring buffer */
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ /* RX descriptor */
+ rx_desc = &priv->rx_ring[q][i];
+ /* The size of the buffer should be on 16-byte boundary. */
+ rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+ dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
+ ALIGN(PKT_BUF_SZ, 16),
+ DMA_FROM_DEVICE);
+ /* We just set the data size to 0 for a failed mapping which
+ * should prevent DMA from happening...
+ */
+ if (dma_mapping_error(&ndev->dev, dma_addr))
+ rx_desc->ds_cc = cpu_to_le16(0);
+ rx_desc->dptr = cpu_to_le32(dma_addr);
+ rx_desc->die_dt = DT_FEMPTY;
+ }
+ rx_desc = &priv->rx_ring[q][i];
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+ rx_desc->die_dt = DT_LINKFIX; /* type */
+
+ memset(priv->tx_ring[q], 0, tx_ring_size);
+ /* Build TX ring buffer */
+ for (i = 0; i < priv->num_tx_ring[q]; i++) {
+ tx_desc = &priv->tx_ring[q][i];
+ tx_desc->die_dt = DT_EEMPTY;
+ }
+ tx_desc = &priv->tx_ring[q][i];
+ tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
+ tx_desc->die_dt = DT_LINKFIX; /* type */
+
+ /* RX descriptor base address for best effort */
+ desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
+ desc->die_dt = DT_LINKFIX; /* type */
+ desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+
+ /* TX descriptor base address for best effort */
+ desc = &priv->desc_bat[q];
+ desc->die_dt = DT_LINKFIX; /* type */
+ desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
+}
+
+/* Init skb and descriptor buffer for Ethernet AVB */
+static int ravb_ring_init(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
+ int ring_size;
+ void *buffer;
+ int i;
+
+ /* Allocate RX and TX skb rings */
+ priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
+ sizeof(*priv->rx_skb[q]), GFP_KERNEL);
+ priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
+ sizeof(*priv->tx_skb[q]), GFP_KERNEL);
+ if (!priv->rx_skb[q] || !priv->tx_skb[q])
+ goto error;
+
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
+ if (!skb)
+ goto error;
+ ravb_set_buffer_align(skb);
+ priv->rx_skb[q][i] = skb;
+ }
+
+ /* Allocate rings for the aligned buffers */
+ priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
+ sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
+ if (!priv->tx_buffers[q])
+ goto error;
+
+ for (i = 0; i < priv->num_tx_ring[q]; i++) {
+ buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
+ if (!buffer)
+ goto error;
+ /* Aligned TX buffer */
+ priv->tx_buffers[q][i] = buffer;
+ }
+
+ /* Allocate all RX descriptors. */
+ ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
+ priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
+ &priv->rx_desc_dma[q],
+ GFP_KERNEL);
+ if (!priv->rx_ring[q])
+ goto error;
+
+ priv->dirty_rx[q] = 0;
+
+ /* Allocate all TX descriptors. */
+ ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] + 1);
+ priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
+ &priv->tx_desc_dma[q],
+ GFP_KERNEL);
+ if (!priv->tx_ring[q])
+ goto error;
+
+ return 0;
+
+error:
+ ravb_ring_free(ndev, q);
+
+ return -ENOMEM;
+}
+
+/* E-MAC init function */
+static void ravb_emac_init(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 ecmr;
+
+ /* Receive frame limit set register */
+ ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
+
+ /* PAUSE prohibition */
+ ecmr = ravb_read(ndev, ECMR);
+ ecmr &= ECMR_DM;
+ ecmr |= ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
+ ravb_write(ndev, ecmr, ECMR);
+
+ ravb_set_rate(ndev);
+
+ /* Set MAC address */
+ ravb_write(ndev,
+ (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+ (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
+ ravb_write(ndev,
+ (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
+
+ ravb_write(ndev, 1, MPR);
+
+ /* E-MAC status register clear */
+ ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
+
+ /* E-MAC interrupt enable register */
+ ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
+}
+
+/* Device init function for Ethernet AVB */
+static int ravb_dmac_init(struct net_device *ndev)
+{
+ int error;
+
+ /* Set CONFIG mode */
+ error = ravb_config(ndev);
+ if (error)
+ return error;
+
+ error = ravb_ring_init(ndev, RAVB_BE);
+ if (error)
+ return error;
+ error = ravb_ring_init(ndev, RAVB_NC);
+ if (error) {
+ ravb_ring_free(ndev, RAVB_BE);
+ return error;
+ }
+
+ /* Descriptor format */
+ ravb_ring_format(ndev, RAVB_BE);
+ ravb_ring_format(ndev, RAVB_NC);
+
+#if defined(__LITTLE_ENDIAN)
+ ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC);
+#else
+ ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC);
+#endif
+
+ /* Set AVB RX */
+ ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR);
+
+ /* Set FIFO size */
+ ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
+
+ /* Timestamp enable */
+ ravb_write(ndev, TCCR_TFEN, TCCR);
+
+ /* Interrupt enable: */
+ /* Frame receive */
+ ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
+ /* Receive FIFO full warning */
+ ravb_write(ndev, RIC1_RFWE, RIC1);
+ /* Receive FIFO full error, descriptor empty */
+ ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
+ /* Frame transmitted, timestamp FIFO updated */
+ ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
+
+ /* Setting the control will start the AVB-DMAC process. */
+ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION,
+ CCC);
+
+ return 0;
+}
+
+/* Free TX skb function for AVB-IP */
+static int ravb_tx_free(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &priv->stats[q];
+ struct ravb_tx_desc *desc;
+ int free_num = 0;
+ int entry = 0;
+ u32 size;
+
+ for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
+ entry = priv->dirty_tx[q] % priv->num_tx_ring[q];
+ desc = &priv->tx_ring[q][entry];
+ if (desc->die_dt != DT_FEMPTY)
+ break;
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ size = le16_to_cpu(desc->ds_tagl) & TX_DS;
+ /* Free the original skb. */
+ if (priv->tx_skb[q][entry]) {
+ dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+ size, DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx_skb[q][entry]);
+ priv->tx_skb[q][entry] = NULL;
+ free_num++;
+ }
+ stats->tx_packets++;
+ stats->tx_bytes += size;
+ desc->die_dt = DT_EEMPTY;
+ }
+ return free_num;
+}
+
+static void ravb_get_tx_tstamp(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_tstamp_skb *ts_skb, *ts_skb2;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb;
+ struct timespec64 ts;
+ u16 tag, tfa_tag;
+ int count;
+ u32 tfa2;
+
+ count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
+ while (count--) {
+ tfa2 = ravb_read(ndev, TFA2);
+ tfa_tag = (tfa2 & TFA2_TST) >> 16;
+ ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
+ ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
+ ravb_read(ndev, TFA1);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
+ list) {
+ skb = ts_skb->skb;
+ tag = ts_skb->tag;
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ if (tag == tfa_tag) {
+ skb_tstamp_tx(skb, &shhwtstamps);
+ break;
+ }
+ }
+ ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR);
+ }
+}
+
+/* Packet receive function for Ethernet AVB */
+static bool ravb_rx(struct net_device *ndev, int *quota, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
+ priv->cur_rx[q];
+ struct net_device_stats *stats = &priv->stats[q];
+ struct ravb_ex_rx_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ struct timespec64 ts;
+ u16 pkt_len = 0;
+ u8 desc_status;
+ int limit;
+
+ boguscnt = min(boguscnt, *quota);
+ limit = boguscnt;
+ desc = &priv->rx_ring[q][entry];
+ while (desc->die_dt != DT_FEMPTY) {
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ desc_status = desc->msc;
+ pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
+
+ if (--boguscnt < 0)
+ break;
+
+ /* We use 0-byte descriptors to mark the DMA mapping errors */
+ if (!pkt_len)
+ continue;
+
+ if (desc_status & MSC_MC)
+ stats->multicast++;
+
+ if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
+ MSC_CEEF)) {
+ stats->rx_errors++;
+ if (desc_status & MSC_CRC)
+ stats->rx_crc_errors++;
+ if (desc_status & MSC_RFE)
+ stats->rx_frame_errors++;
+ if (desc_status & (MSC_RTLF | MSC_RTSF))
+ stats->rx_length_errors++;
+ if (desc_status & MSC_CEEF)
+ stats->rx_missed_errors++;
+ } else {
+ u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
+
+ skb = priv->rx_skb[q][entry];
+ priv->rx_skb[q][entry] = NULL;
+ dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+ ALIGN(PKT_BUF_SZ, 16),
+ DMA_FROM_DEVICE);
+ get_ts &= (q == RAVB_NC) ?
+ RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
+ ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
+ if (get_ts) {
+ struct skb_shared_hwtstamps *shhwtstamps;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
+ 32) | le32_to_cpu(desc->ts_sl);
+ ts.tv_nsec = le32_to_cpu(desc->ts_n);
+ shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
+ }
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi[q], skb);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ }
+
+ entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q][entry];
+ }
+
+ /* Refill the RX ring buffers. */
+ for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
+ entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q][entry];
+ /* The size of the buffer should be on 16-byte boundary. */
+ desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+
+ if (!priv->rx_skb[q][entry]) {
+ skb = netdev_alloc_skb(ndev,
+ PKT_BUF_SZ + RAVB_ALIGN - 1);
+ if (!skb)
+ break; /* Better luck next round. */
+ ravb_set_buffer_align(skb);
+ dma_addr = dma_map_single(&ndev->dev, skb->data,
+ le16_to_cpu(desc->ds_cc),
+ DMA_FROM_DEVICE);
+ skb_checksum_none_assert(skb);
+ /* We just set the data size to 0 for a failed mapping
+ * which should prevent DMA from happening...
+ */
+ if (dma_mapping_error(&ndev->dev, dma_addr))
+ desc->ds_cc = cpu_to_le16(0);
+ desc->dptr = cpu_to_le32(dma_addr);
+ priv->rx_skb[q][entry] = skb;
+ }
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
+ desc->die_dt = DT_FEMPTY;
+ }
+
+ *quota -= limit - (++boguscnt);
+
+ return boguscnt <= 0;
+}
+
+static void ravb_rcv_snd_disable(struct net_device *ndev)
+{
+ /* Disable TX and RX */
+ ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR);
+}
+
+static void ravb_rcv_snd_enable(struct net_device *ndev)
+{
+ /* Enable TX and RX */
+ ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR);
+}
+
+/* function for waiting dma process finished */
+static int ravb_stop_dma(struct net_device *ndev)
+{
+ int error;
+
+ /* Wait for stopping the hardware TX process */
+ error = ravb_wait(ndev, TCCR,
+ TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
+ if (error)
+ return error;
+
+ error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
+ 0);
+ if (error)
+ return error;
+
+ /* Stop the E-MAC's RX/TX processes. */
+ ravb_rcv_snd_disable(ndev);
+
+ /* Wait for stopping the RX DMA process */
+ error = ravb_wait(ndev, CSR, CSR_RPO, 0);
+ if (error)
+ return error;
+
+ /* Stop AVB-DMAC process */
+ return ravb_config(ndev);
+}
+
+/* E-MAC interrupt handler */
+static void ravb_emac_interrupt(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 ecsr, psr;
+
+ ecsr = ravb_read(ndev, ECSR);
+ ravb_write(ndev, ecsr, ECSR); /* clear interrupt */
+ if (ecsr & ECSR_ICD)
+ ndev->stats.tx_carrier_errors++;
+ if (ecsr & ECSR_LCHNG) {
+ /* Link changed */
+ if (priv->no_avb_link)
+ return;
+ psr = ravb_read(ndev, PSR);
+ if (priv->avb_link_active_low)
+ psr ^= PSR_LMON;
+ if (!(psr & PSR_LMON)) {
+ /* DIsable RX and TX */
+ ravb_rcv_snd_disable(ndev);
+ } else {
+ /* Enable RX and TX */
+ ravb_rcv_snd_enable(ndev);
+ }
+ }
+}
+
+/* Error interrupt handler */
+static void ravb_error_interrupt(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 eis, ris2;
+
+ eis = ravb_read(ndev, EIS);
+ ravb_write(ndev, ~EIS_QFS, EIS);
+ if (eis & EIS_QFS) {
+ ris2 = ravb_read(ndev, RIS2);
+ ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
+
+ /* Receive Descriptor Empty int */
+ if (ris2 & RIS2_QFF0)
+ priv->stats[RAVB_BE].rx_over_errors++;
+
+ /* Receive Descriptor Empty int */
+ if (ris2 & RIS2_QFF1)
+ priv->stats[RAVB_NC].rx_over_errors++;
+
+ /* Receive FIFO Overflow int */
+ if (ris2 & RIS2_RFFF)
+ priv->rx_fifo_errors++;
+ }
+}
+
+static irqreturn_t ravb_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct ravb_private *priv = netdev_priv(ndev);
+ irqreturn_t result = IRQ_NONE;
+ u32 iss;
+
+ spin_lock(&priv->lock);
+ /* Get interrupt status */
+ iss = ravb_read(ndev, ISS);
+
+ /* Received and transmitted interrupts */
+ if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
+ u32 ris0 = ravb_read(ndev, RIS0);
+ u32 ric0 = ravb_read(ndev, RIC0);
+ u32 tis = ravb_read(ndev, TIS);
+ u32 tic = ravb_read(ndev, TIC);
+ int q;
+
+ /* Timestamp updated */
+ if (tis & TIS_TFUF) {
+ ravb_write(ndev, ~TIS_TFUF, TIS);
+ ravb_get_tx_tstamp(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ /* Network control and best effort queue RX/TX */
+ for (q = RAVB_NC; q >= RAVB_BE; q--) {
+ if (((ris0 & ric0) & BIT(q)) ||
+ ((tis & tic) & BIT(q))) {
+ if (napi_schedule_prep(&priv->napi[q])) {
+ /* Mask RX and TX interrupts */
+ ravb_write(ndev, ric0 & ~BIT(q), RIC0);
+ ravb_write(ndev, tic & ~BIT(q), TIC);
+ __napi_schedule(&priv->napi[q]);
+ } else {
+ netdev_warn(ndev,
+ "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
+ ris0, ric0);
+ netdev_warn(ndev,
+ " tx status 0x%08x, tx mask 0x%08x.\n",
+ tis, tic);
+ }
+ result = IRQ_HANDLED;
+ }
+ }
+ }
+
+ /* E-MAC status summary */
+ if (iss & ISS_MS) {
+ ravb_emac_interrupt(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ /* Error status summary */
+ if (iss & ISS_ES) {
+ ravb_error_interrupt(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ if (iss & ISS_CGIS)
+ result = ravb_ptp_interrupt(ndev);
+
+ mmiowb();
+ spin_unlock(&priv->lock);
+ return result;
+}
+
+static int ravb_poll(struct napi_struct *napi, int budget)
+{
+ struct net_device *ndev = napi->dev;
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ int q = napi - priv->napi;
+ int mask = BIT(q);
+ int quota = budget;
+ u32 ris0, tis;
+
+ for (;;) {
+ tis = ravb_read(ndev, TIS);
+ ris0 = ravb_read(ndev, RIS0);
+ if (!((ris0 & mask) || (tis & mask)))
+ break;
+
+ /* Processing RX Descriptor Ring */
+ if (ris0 & mask) {
+ /* Clear RX interrupt */
+ ravb_write(ndev, ~mask, RIS0);
+ if (ravb_rx(ndev, &quota, q))
+ goto out;
+ }
+ /* Processing TX Descriptor Ring */
+ if (tis & mask) {
+ spin_lock_irqsave(&priv->lock, flags);
+ /* Clear TX interrupt */
+ ravb_write(ndev, ~mask, TIS);
+ ravb_tx_free(ndev, q);
+ netif_wake_subqueue(ndev, q);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ }
+
+ napi_complete(napi);
+
+ /* Re-enable RX/TX interrupts */
+ spin_lock_irqsave(&priv->lock, flags);
+ ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0);
+ ravb_write(ndev, ravb_read(ndev, TIC) | mask, TIC);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Receive error message handling */
+ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
+ priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
+ if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
+ ndev->stats.rx_over_errors = priv->rx_over_errors;
+ netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
+ }
+ if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
+ ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
+ netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
+ }
+out:
+ return budget - quota;
+}
+
+/* PHY state control function */
+static void ravb_adjust_link(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = priv->phydev;
+ bool new_state = false;
+
+ if (phydev->link) {
+ if (phydev->duplex != priv->duplex) {
+ new_state = true;
+ priv->duplex = phydev->duplex;
+ ravb_set_duplex(ndev);
+ }
+
+ if (phydev->speed != priv->speed) {
+ new_state = true;
+ priv->speed = phydev->speed;
+ ravb_set_rate(ndev);
+ }
+ if (!priv->link) {
+ ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF,
+ ECMR);
+ new_state = true;
+ priv->link = phydev->link;
+ if (priv->no_avb_link)
+ ravb_rcv_snd_enable(ndev);
+ }
+ } else if (priv->link) {
+ new_state = true;
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = -1;
+ if (priv->no_avb_link)
+ ravb_rcv_snd_disable(ndev);
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+}
+
+/* PHY init function */
+static int ravb_phy_init(struct net_device *ndev)
+{
+ struct device_node *np = ndev->dev.parent->of_node;
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev;
+ struct device_node *pn;
+
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = -1;
+
+ /* Try connecting to PHY */
+ pn = of_parse_phandle(np, "phy-handle", 0);
+ phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
+ priv->phy_interface);
+ if (!phydev) {
+ netdev_err(ndev, "failed to connect PHY\n");
+ return -ENOENT;
+ }
+
+ netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
+ phydev->addr, phydev->irq, phydev->drv->name);
+
+ priv->phydev = phydev;
+
+ return 0;
+}
+
+/* PHY control start function */
+static int ravb_phy_start(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error;
+
+ error = ravb_phy_init(ndev);
+ if (error)
+ return error;
+
+ phy_start(priv->phydev);
+
+ return 0;
+}
+
+static int ravb_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error = -ENODEV;
+ unsigned long flags;
+
+ if (priv->phydev) {
+ spin_lock_irqsave(&priv->lock, flags);
+ error = phy_ethtool_gset(priv->phydev, ecmd);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return error;
+}
+
+static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ int error;
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Disable TX and RX */
+ ravb_rcv_snd_disable(ndev);
+
+ error = phy_ethtool_sset(priv->phydev, ecmd);
+ if (error)
+ goto error_exit;
+
+ if (ecmd->duplex == DUPLEX_FULL)
+ priv->duplex = 1;
+ else
+ priv->duplex = 0;
+
+ ravb_set_duplex(ndev);
+
+error_exit:
+ mdelay(1);
+
+ /* Enable TX and RX */
+ ravb_rcv_snd_enable(ndev);
+
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_nway_reset(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error = -ENODEV;
+ unsigned long flags;
+
+ if (priv->phydev) {
+ spin_lock_irqsave(&priv->lock, flags);
+ error = phy_start_aneg(priv->phydev);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return error;
+}
+
+static u32 ravb_get_msglevel(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ return priv->msg_enable;
+}
+
+static void ravb_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ priv->msg_enable = value;
+}
+
+static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "rx_queue_0_current",
+ "tx_queue_0_current",
+ "rx_queue_0_dirty",
+ "tx_queue_0_dirty",
+ "rx_queue_0_packets",
+ "tx_queue_0_packets",
+ "rx_queue_0_bytes",
+ "tx_queue_0_bytes",
+ "rx_queue_0_mcast_packets",
+ "rx_queue_0_errors",
+ "rx_queue_0_crc_errors",
+ "rx_queue_0_frame_errors",
+ "rx_queue_0_length_errors",
+ "rx_queue_0_missed_errors",
+ "rx_queue_0_over_errors",
+
+ "rx_queue_1_current",
+ "tx_queue_1_current",
+ "rx_queue_1_dirty",
+ "tx_queue_1_dirty",
+ "rx_queue_1_packets",
+ "tx_queue_1_packets",
+ "rx_queue_1_bytes",
+ "tx_queue_1_bytes",
+ "rx_queue_1_mcast_packets",
+ "rx_queue_1_errors",
+ "rx_queue_1_crc_errors",
+ "rx_queue_1_frame_errors_",
+ "rx_queue_1_length_errors",
+ "rx_queue_1_missed_errors",
+ "rx_queue_1_over_errors",
+};
+
+#define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats)
+
+static int ravb_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return RAVB_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void ravb_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int i = 0;
+ int q;
+
+ /* Device-specific stats */
+ for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
+ struct net_device_stats *stats = &priv->stats[q];
+
+ data[i++] = priv->cur_rx[q];
+ data[i++] = priv->cur_tx[q];
+ data[i++] = priv->dirty_rx[q];
+ data[i++] = priv->dirty_tx[q];
+ data[i++] = stats->rx_packets;
+ data[i++] = stats->tx_packets;
+ data[i++] = stats->rx_bytes;
+ data[i++] = stats->tx_bytes;
+ data[i++] = stats->multicast;
+ data[i++] = stats->rx_errors;
+ data[i++] = stats->rx_crc_errors;
+ data[i++] = stats->rx_frame_errors;
+ data[i++] = stats->rx_length_errors;
+ data[i++] = stats->rx_missed_errors;
+ data[i++] = stats->rx_over_errors;
+ }
+}
+
+static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
+ break;
+ }
+}
+
+static void ravb_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ ring->rx_max_pending = BE_RX_RING_MAX;
+ ring->tx_max_pending = BE_TX_RING_MAX;
+ ring->rx_pending = priv->num_rx_ring[RAVB_BE];
+ ring->tx_pending = priv->num_tx_ring[RAVB_BE];
+}
+
+static int ravb_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error;
+
+ if (ring->tx_pending > BE_TX_RING_MAX ||
+ ring->rx_pending > BE_RX_RING_MAX ||
+ ring->tx_pending < BE_TX_RING_MIN ||
+ ring->rx_pending < BE_RX_RING_MIN)
+ return -EINVAL;
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ /* Stop PTP Clock driver */
+ ravb_ptp_stop(ndev);
+ /* Wait for DMA stopping */
+ error = ravb_stop_dma(ndev);
+ if (error) {
+ netdev_err(ndev,
+ "cannot set ringparam! Any AVB processes are still running?\n");
+ return error;
+ }
+ synchronize_irq(ndev->irq);
+
+ /* Free all the skb's in the RX queue and the DMA buffers. */
+ ravb_ring_free(ndev, RAVB_BE);
+ ravb_ring_free(ndev, RAVB_NC);
+ }
+
+ /* Set new parameters */
+ priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
+ priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
+
+ if (netif_running(ndev)) {
+ error = ravb_dmac_init(ndev);
+ if (error) {
+ netdev_err(ndev,
+ "%s: ravb_dmac_init() failed, error %d\n",
+ __func__, error);
+ return error;
+ }
+
+ ravb_emac_init(ndev);
+
+ /* Initialise PTP Clock driver */
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_device_attach(ndev);
+ }
+
+ return 0;
+}
+
+static int ravb_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ info->phc_index = ptp_clock_index(priv->ptp.clock);
+
+ return 0;
+}
+
+static const struct ethtool_ops ravb_ethtool_ops = {
+ .get_settings = ravb_get_settings,
+ .set_settings = ravb_set_settings,
+ .nway_reset = ravb_nway_reset,
+ .get_msglevel = ravb_get_msglevel,
+ .set_msglevel = ravb_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_strings = ravb_get_strings,
+ .get_ethtool_stats = ravb_get_ethtool_stats,
+ .get_sset_count = ravb_get_sset_count,
+ .get_ringparam = ravb_get_ringparam,
+ .set_ringparam = ravb_set_ringparam,
+ .get_ts_info = ravb_get_ts_info,
+};
+
+/* Network device open function for Ethernet AVB */
+static int ravb_open(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error;
+
+ napi_enable(&priv->napi[RAVB_BE]);
+ napi_enable(&priv->napi[RAVB_NC]);
+
+ error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name,
+ ndev);
+ if (error) {
+ netdev_err(ndev, "cannot request IRQ\n");
+ goto out_napi_off;
+ }
+
+ /* Device init */
+ error = ravb_dmac_init(ndev);
+ if (error)
+ goto out_free_irq;
+ ravb_emac_init(ndev);
+
+ /* Initialise PTP Clock driver */
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_tx_start_all_queues(ndev);
+
+ /* PHY control start */
+ error = ravb_phy_start(ndev);
+ if (error)
+ goto out_ptp_stop;
+
+ return 0;
+
+out_ptp_stop:
+ /* Stop PTP Clock driver */
+ ravb_ptp_stop(ndev);
+out_free_irq:
+ free_irq(ndev->irq, ndev);
+out_napi_off:
+ napi_disable(&priv->napi[RAVB_NC]);
+ napi_disable(&priv->napi[RAVB_BE]);
+ return error;
+}
+
+/* Timeout function for Ethernet AVB */
+static void ravb_tx_timeout(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ netif_err(priv, tx_err, ndev,
+ "transmit timed out, status %08x, resetting...\n",
+ ravb_read(ndev, ISS));
+
+ /* tx_errors count up */
+ ndev->stats.tx_errors++;
+
+ schedule_work(&priv->work);
+}
+
+static void ravb_tx_timeout_work(struct work_struct *work)
+{
+ struct ravb_private *priv = container_of(work, struct ravb_private,
+ work);
+ struct net_device *ndev = priv->ndev;
+
+ netif_tx_stop_all_queues(ndev);
+
+ /* Stop PTP Clock driver */
+ ravb_ptp_stop(ndev);
+
+ /* Wait for DMA stopping */
+ ravb_stop_dma(ndev);
+
+ ravb_ring_free(ndev, RAVB_BE);
+ ravb_ring_free(ndev, RAVB_NC);
+
+ /* Device init */
+ ravb_dmac_init(ndev);
+ ravb_emac_init(ndev);
+
+ /* Initialise PTP Clock driver */
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_tx_start_all_queues(ndev);
+}
+
+/* Packet transmit function for Ethernet AVB */
+static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_tstamp_skb *ts_skb = NULL;
+ u16 q = skb_get_queue_mapping(skb);
+ struct ravb_tx_desc *desc;
+ unsigned long flags;
+ u32 dma_addr;
+ void *buffer;
+ u32 entry;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
+ netif_err(priv, tx_queued, ndev,
+ "still transmitting with the full ring!\n");
+ netif_stop_subqueue(ndev, q);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+ entry = priv->cur_tx[q] % priv->num_tx_ring[q];
+ priv->tx_skb[q][entry] = skb;
+
+ if (skb_put_padto(skb, ETH_ZLEN))
+ goto drop;
+
+ buffer = PTR_ALIGN(priv->tx_buffers[q][entry], RAVB_ALIGN);
+ memcpy(buffer, skb->data, skb->len);
+ desc = &priv->tx_ring[q][entry];
+ desc->ds_tagl = cpu_to_le16(skb->len);
+ dma_addr = dma_map_single(&ndev->dev, buffer, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr))
+ goto drop;
+ desc->dptr = cpu_to_le32(dma_addr);
+
+ /* TX timestamp required */
+ if (q == RAVB_NC) {
+ ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
+ if (!ts_skb) {
+ dma_unmap_single(&ndev->dev, dma_addr, skb->len,
+ DMA_TO_DEVICE);
+ goto drop;
+ }
+ ts_skb->skb = skb;
+ ts_skb->tag = priv->ts_skb_tag++;
+ priv->ts_skb_tag &= 0x3ff;
+ list_add_tail(&ts_skb->list, &priv->ts_skb_list);
+
+ /* TAG and timestamp required flag */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_tx_timestamp(skb);
+ desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
+ desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
+ }
+
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
+ desc->die_dt = DT_FSINGLE;
+
+ ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
+
+ priv->cur_tx[q]++;
+ if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
+ !ravb_tx_free(ndev, q))
+ netif_stop_subqueue(ndev, q);
+
+exit:
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return NETDEV_TX_OK;
+
+drop:
+ dev_kfree_skb_any(skb);
+ priv->tx_skb[q][entry] = NULL;
+ goto exit;
+}
+
+static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ /* If skb needs TX timestamp, it is handled in network control queue */
+ return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
+ RAVB_BE;
+
+}
+
+static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct net_device_stats *nstats, *stats0, *stats1;
+
+ nstats = &ndev->stats;
+ stats0 = &priv->stats[RAVB_BE];
+ stats1 = &priv->stats[RAVB_NC];
+
+ nstats->tx_dropped += ravb_read(ndev, TROCR);
+ ravb_write(ndev, 0, TROCR); /* (write clear) */
+ nstats->collisions += ravb_read(ndev, CDCR);
+ ravb_write(ndev, 0, CDCR); /* (write clear) */
+ nstats->tx_carrier_errors += ravb_read(ndev, LCCR);
+ ravb_write(ndev, 0, LCCR); /* (write clear) */
+
+ nstats->tx_carrier_errors += ravb_read(ndev, CERCR);
+ ravb_write(ndev, 0, CERCR); /* (write clear) */
+ nstats->tx_carrier_errors += ravb_read(ndev, CEECR);
+ ravb_write(ndev, 0, CEECR); /* (write clear) */
+
+ nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
+ nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
+ nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
+ nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
+ nstats->multicast = stats0->multicast + stats1->multicast;
+ nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
+ nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
+ nstats->rx_frame_errors =
+ stats0->rx_frame_errors + stats1->rx_frame_errors;
+ nstats->rx_length_errors =
+ stats0->rx_length_errors + stats1->rx_length_errors;
+ nstats->rx_missed_errors =
+ stats0->rx_missed_errors + stats1->rx_missed_errors;
+ nstats->rx_over_errors =
+ stats0->rx_over_errors + stats1->rx_over_errors;
+
+ return nstats;
+}
+
+/* Update promiscuous bit */
+static void ravb_set_rx_mode(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ u32 ecmr;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ ecmr = ravb_read(ndev, ECMR);
+ if (ndev->flags & IFF_PROMISC)
+ ecmr |= ECMR_PRM;
+ else
+ ecmr &= ~ECMR_PRM;
+ ravb_write(ndev, ecmr, ECMR);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/* Device close function for Ethernet AVB */
+static int ravb_close(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_tstamp_skb *ts_skb, *ts_skb2;
+
+ netif_tx_stop_all_queues(ndev);
+
+ /* Disable interrupts by clearing the interrupt masks. */
+ ravb_write(ndev, 0, RIC0);
+ ravb_write(ndev, 0, RIC1);
+ ravb_write(ndev, 0, RIC2);
+ ravb_write(ndev, 0, TIC);
+
+ /* Stop PTP Clock driver */
+ ravb_ptp_stop(ndev);
+
+ /* Set the config mode to stop the AVB-DMAC's processes */
+ if (ravb_stop_dma(ndev) < 0)
+ netdev_err(ndev,
+ "device will be stopped after h/w processes are done.\n");
+
+ /* Clear the timestamp list */
+ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ }
+
+ /* PHY disconnect */
+ if (priv->phydev) {
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+
+ free_irq(ndev->irq, ndev);
+
+ napi_disable(&priv->napi[RAVB_NC]);
+ napi_disable(&priv->napi[RAVB_BE]);
+
+ /* Free all the skb's in the RX queue and the DMA buffers. */
+ ravb_ring_free(ndev, RAVB_BE);
+ ravb_ring_free(ndev, RAVB_NC);
+
+ return 0;
+}
+
+static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
+ HWTSTAMP_TX_OFF;
+ if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ else
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/* Control hardware time stamping */
+static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct hwtstamp_config config;
+ u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
+ u32 tstamp_tx_ctrl;
+
+ if (copy_from_user(&config, req->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* Reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tstamp_tx_ctrl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tstamp_rx_ctrl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
+ break;
+ default:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
+ }
+
+ priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
+ priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
+
+ return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/* ioctl to device function */
+static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = priv->phydev;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return ravb_hwtstamp_get(ndev, req);
+ case SIOCSHWTSTAMP:
+ return ravb_hwtstamp_set(ndev, req);
+ }
+
+ return phy_mii_ioctl(phydev, req, cmd);
+}
+
+static const struct net_device_ops ravb_netdev_ops = {
+ .ndo_open = ravb_open,
+ .ndo_stop = ravb_close,
+ .ndo_start_xmit = ravb_start_xmit,
+ .ndo_select_queue = ravb_select_queue,
+ .ndo_get_stats = ravb_get_stats,
+ .ndo_set_rx_mode = ravb_set_rx_mode,
+ .ndo_tx_timeout = ravb_tx_timeout,
+ .ndo_do_ioctl = ravb_do_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+/* MDIO bus init function */
+static int ravb_mdio_init(struct ravb_private *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ int error;
+
+ /* Bitbang init */
+ priv->mdiobb.ops = &bb_ops;
+
+ /* MII controller setting */
+ priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
+ if (!priv->mii_bus)
+ return -ENOMEM;
+
+ /* Hook up MII support for ethtool */
+ priv->mii_bus->name = "ravb_mii";
+ priv->mii_bus->parent = dev;
+ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+
+ /* Register MDIO bus */
+ error = of_mdiobus_register(priv->mii_bus, dev->of_node);
+ if (error)
+ goto out_free_bus;
+
+ return 0;
+
+out_free_bus:
+ free_mdio_bitbang(priv->mii_bus);
+ return error;
+}
+
+/* MDIO bus release function */
+static int ravb_mdio_release(struct ravb_private *priv)
+{
+ /* Unregister mdio bus */
+ mdiobus_unregister(priv->mii_bus);
+
+ /* Free bitbang info */
+ free_mdio_bitbang(priv->mii_bus);
+
+ return 0;
+}
+
+static int ravb_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct ravb_private *priv;
+ struct net_device *ndev;
+ int error, irq, q;
+ struct resource *res;
+
+ if (!np) {
+ dev_err(&pdev->dev,
+ "this driver is required to be instantiated from device tree\n");
+ return -EINVAL;
+ }
+
+ /* Get base address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "invalid resource\n");
+ return -EINVAL;
+ }
+
+ ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
+ NUM_TX_QUEUE, NUM_RX_QUEUE);
+ if (!ndev)
+ return -ENOMEM;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ /* The Ether-specific entries in the device structure. */
+ ndev->base_addr = res->start;
+ ndev->dma = -1;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ error = -ENODEV;
+ goto out_release;
+ }
+ ndev->irq = irq;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->pdev = pdev;
+ priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
+ priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
+ priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
+ priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
+ priv->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->addr)) {
+ error = PTR_ERR(priv->addr);
+ goto out_release;
+ }
+
+ spin_lock_init(&priv->lock);
+ INIT_WORK(&priv->work, ravb_tx_timeout_work);
+
+ priv->phy_interface = of_get_phy_mode(np);
+
+ priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
+ priv->avb_link_active_low =
+ of_property_read_bool(np, "renesas,ether-link-active-low");
+
+ /* Set function */
+ ndev->netdev_ops = &ravb_netdev_ops;
+ ndev->ethtool_ops = &ravb_ethtool_ops;
+
+ /* Set AVB config mode */
+ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
+ CCC);
+
+ /* Set CSEL value */
+ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB,
+ CCC);
+
+ /* Set GTI value */
+ ravb_write(ndev, ((1000 << 20) / 130) & GTI_TIV, GTI);
+
+ /* Request GTI loading */
+ ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR);
+
+ /* Allocate descriptor base address table */
+ priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
+ priv->desc_bat = dma_alloc_coherent(NULL, priv->desc_bat_size,
+ &priv->desc_bat_dma, GFP_KERNEL);
+ if (!priv->desc_bat) {
+ dev_err(&ndev->dev,
+ "Cannot allocate desc base address table (size %d bytes)\n",
+ priv->desc_bat_size);
+ error = -ENOMEM;
+ goto out_release;
+ }
+ for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
+ priv->desc_bat[q].die_dt = DT_EOS;
+ ravb_write(ndev, priv->desc_bat_dma, DBAT);
+
+ /* Initialise HW timestamp list */
+ INIT_LIST_HEAD(&priv->ts_skb_list);
+
+ /* Debug message level */
+ priv->msg_enable = RAVB_DEF_MSG_ENABLE;
+
+ /* Read and set MAC address */
+ ravb_read_mac_address(ndev, of_get_mac_address(np));
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ dev_warn(&pdev->dev,
+ "no valid MAC address supplied, using a random one\n");
+ eth_hw_addr_random(ndev);
+ }
+
+ /* MDIO bus init */
+ error = ravb_mdio_init(priv);
+ if (error) {
+ dev_err(&ndev->dev, "failed to initialize MDIO\n");
+ goto out_dma_free;
+ }
+
+ netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
+ netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
+
+ /* Network device register */
+ error = register_netdev(ndev);
+ if (error)
+ goto out_napi_del;
+
+ /* Print device information */
+ netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
+ (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
+
+ platform_set_drvdata(pdev, ndev);
+
+ return 0;
+
+out_napi_del:
+ netif_napi_del(&priv->napi[RAVB_NC]);
+ netif_napi_del(&priv->napi[RAVB_BE]);
+ ravb_mdio_release(priv);
+out_dma_free:
+ dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
+ priv->desc_bat_dma);
+out_release:
+ if (ndev)
+ free_netdev(ndev);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return error;
+}
+
+static int ravb_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
+ priv->desc_bat_dma);
+ /* Set reset mode */
+ ravb_write(ndev, CCC_OPC_RESET, CCC);
+ pm_runtime_put_sync(&pdev->dev);
+ unregister_netdev(ndev);
+ netif_napi_del(&priv->napi[RAVB_NC]);
+ netif_napi_del(&priv->napi[RAVB_BE]);
+ ravb_mdio_release(priv);
+ pm_runtime_disable(&pdev->dev);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ravb_runtime_nop(struct device *dev)
+{
+ /* Runtime PM callback shared between ->runtime_suspend()
+ * and ->runtime_resume(). Simply returns success.
+ *
+ * This driver re-initializes all registers after
+ * pm_runtime_get_sync() anyway so there is no need
+ * to save and restore registers here.
+ */
+ return 0;
+}
+
+static const struct dev_pm_ops ravb_dev_pm_ops = {
+ .runtime_suspend = ravb_runtime_nop,
+ .runtime_resume = ravb_runtime_nop,
+};
+
+#define RAVB_PM_OPS (&ravb_dev_pm_ops)
+#else
+#define RAVB_PM_OPS NULL
+#endif
+
+static const struct of_device_id ravb_match_table[] = {
+ { .compatible = "renesas,etheravb-r8a7790" },
+ { .compatible = "renesas,etheravb-r8a7794" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ravb_match_table);
+
+static struct platform_driver ravb_driver = {
+ .probe = ravb_probe,
+ .remove = ravb_remove,
+ .driver = {
+ .name = "ravb",
+ .pm = RAVB_PM_OPS,
+ .of_match_table = ravb_match_table,
+ },
+};
+
+module_platform_driver(ravb_driver);
+
+MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
+MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
new file mode 100644
index 000000000..7a8ce920c
--- /dev/null
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -0,0 +1,359 @@
+/* PTP 1588 clock using the Renesas Ethernet AVB
+ *
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "ravb.h"
+
+static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request)
+{
+ struct net_device *ndev = priv->ndev;
+ int error;
+
+ error = ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
+ if (error)
+ return error;
+
+ ravb_write(ndev, ravb_read(ndev, GCCR) | request, GCCR);
+ return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
+}
+
+/* Caller must hold the lock */
+static int ravb_ptp_time_read(struct ravb_private *priv, struct timespec64 *ts)
+{
+ struct net_device *ndev = priv->ndev;
+ int error;
+
+ error = ravb_ptp_tcr_request(priv, GCCR_TCR_CAPTURE);
+ if (error)
+ return error;
+
+ ts->tv_nsec = ravb_read(ndev, GCT0);
+ ts->tv_sec = ravb_read(ndev, GCT1) |
+ ((s64)ravb_read(ndev, GCT2) << 32);
+
+ return 0;
+}
+
+/* Caller must hold the lock */
+static int ravb_ptp_time_write(struct ravb_private *priv,
+ const struct timespec64 *ts)
+{
+ struct net_device *ndev = priv->ndev;
+ int error;
+ u32 gccr;
+
+ error = ravb_ptp_tcr_request(priv, GCCR_TCR_RESET);
+ if (error)
+ return error;
+
+ gccr = ravb_read(ndev, GCCR);
+ if (gccr & GCCR_LTO)
+ return -EBUSY;
+ ravb_write(ndev, ts->tv_nsec, GTO0);
+ ravb_write(ndev, ts->tv_sec, GTO1);
+ ravb_write(ndev, (ts->tv_sec >> 32) & 0xffff, GTO2);
+ ravb_write(ndev, gccr | GCCR_LTO, GCCR);
+
+ return 0;
+}
+
+/* Caller must hold the lock */
+static int ravb_ptp_update_compare(struct ravb_private *priv, u32 ns)
+{
+ struct net_device *ndev = priv->ndev;
+ /* When the comparison value (GPTC.PTCV) is in range of
+ * [x-1 to x+1] (x is the configured increment value in
+ * GTI.TIV), it may happen that a comparison match is
+ * not detected when the timer wraps around.
+ */
+ u32 gti_ns_plus_1 = (priv->ptp.current_addend >> 20) + 1;
+ u32 gccr;
+
+ if (ns < gti_ns_plus_1)
+ ns = gti_ns_plus_1;
+ else if (ns > 0 - gti_ns_plus_1)
+ ns = 0 - gti_ns_plus_1;
+
+ gccr = ravb_read(ndev, GCCR);
+ if (gccr & GCCR_LPTC)
+ return -EBUSY;
+ ravb_write(ndev, ns, GPTC);
+ ravb_write(ndev, gccr | GCCR_LPTC, GCCR);
+
+ return 0;
+}
+
+/* PTP clock operations */
+static int ravb_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ struct net_device *ndev = priv->ndev;
+ unsigned long flags;
+ u32 diff, addend;
+ bool neg_adj = false;
+ u32 gccr;
+
+ if (ppb < 0) {
+ neg_adj = true;
+ ppb = -ppb;
+ }
+ addend = priv->ptp.default_addend;
+ diff = div_u64((u64)addend * ppb, NSEC_PER_SEC);
+
+ addend = neg_adj ? addend - diff : addend + diff;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->ptp.current_addend = addend;
+
+ gccr = ravb_read(ndev, GCCR);
+ if (gccr & GCCR_LTI) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return -EBUSY;
+ }
+ ravb_write(ndev, addend & GTI_TIV, GTI);
+ ravb_write(ndev, gccr | GCCR_LTI, GCCR);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int ravb_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ struct timespec64 ts;
+ unsigned long flags;
+ int error;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ error = ravb_ptp_time_read(priv, &ts);
+ if (!error) {
+ u64 now = ktime_to_ns(timespec64_to_ktime(ts));
+
+ ts = ns_to_timespec64(now + delta);
+ error = ravb_ptp_time_write(priv, &ts);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ unsigned long flags;
+ int error;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ error = ravb_ptp_time_read(priv, ts);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ unsigned long flags;
+ int error;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ error = ravb_ptp_time_write(priv, ts);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_ptp_extts(struct ptp_clock_info *ptp,
+ struct ptp_extts_request *req, int on)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ struct net_device *ndev = priv->ndev;
+ unsigned long flags;
+ u32 gic;
+
+ if (req->index)
+ return -EINVAL;
+
+ if (priv->ptp.extts[req->index] == on)
+ return 0;
+ priv->ptp.extts[req->index] = on;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ gic = ravb_read(ndev, GIC);
+ if (on)
+ gic |= GIC_PTCE;
+ else
+ gic &= ~GIC_PTCE;
+ ravb_write(ndev, gic, GIC);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int ravb_ptp_perout(struct ptp_clock_info *ptp,
+ struct ptp_perout_request *req, int on)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ struct net_device *ndev = priv->ndev;
+ struct ravb_ptp_perout *perout;
+ unsigned long flags;
+ int error = 0;
+ u32 gic;
+
+ if (req->index)
+ return -EINVAL;
+
+ if (on) {
+ u64 start_ns;
+ u64 period_ns;
+
+ start_ns = req->start.sec * NSEC_PER_SEC + req->start.nsec;
+ period_ns = req->period.sec * NSEC_PER_SEC + req->period.nsec;
+
+ if (start_ns > U32_MAX) {
+ netdev_warn(ndev,
+ "ptp: start value (nsec) is over limit. Maximum size of start is only 32 bits\n");
+ return -ERANGE;
+ }
+
+ if (period_ns > U32_MAX) {
+ netdev_warn(ndev,
+ "ptp: period value (nsec) is over limit. Maximum size of period is only 32 bits\n");
+ return -ERANGE;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ perout = &priv->ptp.perout[req->index];
+ perout->target = (u32)start_ns;
+ perout->period = (u32)period_ns;
+ error = ravb_ptp_update_compare(priv, (u32)start_ns);
+ if (!error) {
+ /* Unmask interrupt */
+ gic = ravb_read(ndev, GIC);
+ gic |= GIC_PTME;
+ ravb_write(ndev, gic, GIC);
+ }
+ } else {
+ spin_lock_irqsave(&priv->lock, flags);
+
+ perout = &priv->ptp.perout[req->index];
+ perout->period = 0;
+
+ /* Mask interrupt */
+ gic = ravb_read(ndev, GIC);
+ gic &= ~GIC_PTME;
+ ravb_write(ndev, gic, GIC);
+ }
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *req, int on)
+{
+ switch (req->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return ravb_ptp_extts(ptp, &req->extts, on);
+ case PTP_CLK_REQ_PEROUT:
+ return ravb_ptp_perout(ptp, &req->perout, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct ptp_clock_info ravb_ptp_info = {
+ .owner = THIS_MODULE,
+ .name = "ravb clock",
+ .max_adj = 50000000,
+ .n_ext_ts = N_EXT_TS,
+ .n_per_out = N_PER_OUT,
+ .adjfreq = ravb_ptp_adjfreq,
+ .adjtime = ravb_ptp_adjtime,
+ .gettime64 = ravb_ptp_gettime64,
+ .settime64 = ravb_ptp_settime64,
+ .enable = ravb_ptp_enable,
+};
+
+/* Caller must hold the lock */
+irqreturn_t ravb_ptp_interrupt(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 gis = ravb_read(ndev, GIS);
+
+ gis &= ravb_read(ndev, GIC);
+ if (gis & GIS_PTCF) {
+ struct ptp_clock_event event;
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = ravb_read(ndev, GCPT);
+ ptp_clock_event(priv->ptp.clock, &event);
+ }
+ if (gis & GIS_PTMF) {
+ struct ravb_ptp_perout *perout = priv->ptp.perout;
+
+ if (perout->period) {
+ perout->target += perout->period;
+ ravb_ptp_update_compare(priv, perout->target);
+ }
+ }
+
+ if (gis) {
+ ravb_write(ndev, ~gis, GIS);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ u32 gccr;
+
+ priv->ptp.info = ravb_ptp_info;
+
+ priv->ptp.default_addend = ravb_read(ndev, GTI);
+ priv->ptp.current_addend = priv->ptp.default_addend;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
+ gccr = ravb_read(ndev, GCCR) & ~GCCR_TCSS;
+ ravb_write(ndev, gccr | GCCR_TCSS_ADJGPTP, GCCR);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev);
+}
+
+void ravb_ptp_stop(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ ravb_write(ndev, 0, GIC);
+ ravb_write(ndev, 0, GIS);
+
+ ptp_clock_unregister(priv->ptp.clock);
+}
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index cf98cc9bb..2e7f9a283 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -181,7 +181,7 @@ struct rocker_desc_info {
size_t data_size;
size_t tlv_size;
struct rocker_desc *desc;
- DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ dma_addr_t mapaddr;
};
struct rocker_dma_ring_info {
@@ -225,6 +225,7 @@ struct rocker_port {
struct napi_struct napi_rx;
struct rocker_dma_ring_info tx_ring;
struct rocker_dma_ring_info rx_ring;
+ struct list_head trans_mem;
};
struct rocker {
@@ -236,21 +237,21 @@ struct rocker {
struct {
u64 id;
} hw;
- spinlock_t cmd_ring_lock;
+ spinlock_t cmd_ring_lock; /* for cmd ring accesses */
struct rocker_dma_ring_info cmd_ring;
struct rocker_dma_ring_info event_ring;
DECLARE_HASHTABLE(flow_tbl, 16);
- spinlock_t flow_tbl_lock;
+ spinlock_t flow_tbl_lock; /* for flow tbl accesses */
u64 flow_tbl_next_cookie;
DECLARE_HASHTABLE(group_tbl, 16);
- spinlock_t group_tbl_lock;
+ spinlock_t group_tbl_lock; /* for group tbl accesses */
DECLARE_HASHTABLE(fdb_tbl, 16);
- spinlock_t fdb_tbl_lock;
+ spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
DECLARE_HASHTABLE(internal_vlan_tbl, 8);
- spinlock_t internal_vlan_tbl_lock;
+ spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
DECLARE_HASHTABLE(neigh_tbl, 16);
- spinlock_t neigh_tbl_lock;
+ spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
u32 neigh_tbl_next_index;
};
@@ -294,7 +295,7 @@ static bool rocker_vlan_id_is_internal(__be16 vlan_id)
return (_vlan_id >= start && _vlan_id <= end);
}
-static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
+static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
u16 vid, bool *pop_vlan)
{
__be16 vlan_id;
@@ -311,7 +312,7 @@ static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
return vlan_id;
}
-static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
+static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
__be16 vlan_id)
{
if (rocker_vlan_id_is_internal(vlan_id))
@@ -320,11 +321,88 @@ static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
return ntohs(vlan_id);
}
-static bool rocker_port_is_bridged(struct rocker_port *rocker_port)
+static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
{
return !!rocker_port->bridge_dev;
}
+#define ROCKER_OP_FLAG_REMOVE BIT(0)
+#define ROCKER_OP_FLAG_NOWAIT BIT(1)
+#define ROCKER_OP_FLAG_LEARNED BIT(2)
+#define ROCKER_OP_FLAG_REFRESH BIT(3)
+
+static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags,
+ size_t size)
+{
+ struct list_head *elem = NULL;
+ gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
+ GFP_ATOMIC : GFP_KERNEL;
+
+ /* If in transaction prepare phase, allocate the memory
+ * and enqueue it on a per-port list. If in transaction
+ * commit phase, dequeue the memory from the per-port list
+ * rather than re-allocating the memory. The idea is the
+ * driver code paths for prepare and commit are identical
+ * so the memory allocated in the prepare phase is the
+ * memory used in the commit phase.
+ */
+
+ switch (trans) {
+ case SWITCHDEV_TRANS_PREPARE:
+ elem = kzalloc(size + sizeof(*elem), gfp_flags);
+ if (!elem)
+ return NULL;
+ list_add_tail(elem, &rocker_port->trans_mem);
+ break;
+ case SWITCHDEV_TRANS_COMMIT:
+ BUG_ON(list_empty(&rocker_port->trans_mem));
+ elem = rocker_port->trans_mem.next;
+ list_del_init(elem);
+ break;
+ case SWITCHDEV_TRANS_NONE:
+ elem = kzalloc(size + sizeof(*elem), gfp_flags);
+ if (elem)
+ INIT_LIST_HEAD(elem);
+ break;
+ default:
+ break;
+ }
+
+ return elem ? elem + 1 : NULL;
+}
+
+static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags,
+ size_t size)
+{
+ return __rocker_port_mem_alloc(rocker_port, trans, flags, size);
+}
+
+static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags,
+ size_t n, size_t size)
+{
+ return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size);
+}
+
+static void rocker_port_kfree(enum switchdev_trans trans, const void *mem)
+{
+ struct list_head *elem;
+
+ /* Frees are ignored if in transaction prepare phase. The
+ * memory remains on the per-port list until freed in the
+ * commit phase.
+ */
+
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ return;
+
+ elem = (struct list_head *)mem - 1;
+ BUG_ON(!list_empty(elem));
+ kfree(elem);
+}
+
struct rocker_wait {
wait_queue_head_t wait;
bool done;
@@ -343,20 +421,23 @@ static void rocker_wait_init(struct rocker_wait *wait)
rocker_wait_reset(wait);
}
-static struct rocker_wait *rocker_wait_create(gfp_t gfp)
+static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
+ int flags)
{
struct rocker_wait *wait;
- wait = kmalloc(sizeof(*wait), gfp);
+ wait = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*wait));
if (!wait)
return NULL;
rocker_wait_init(wait);
return wait;
}
-static void rocker_wait_destroy(struct rocker_wait *work)
+static void rocker_wait_destroy(enum switchdev_trans trans,
+ struct rocker_wait *wait)
{
- kfree(work);
+ rocker_port_kfree(trans, wait);
}
static bool rocker_wait_event_timeout(struct rocker_wait *wait,
@@ -374,18 +455,18 @@ static void rocker_wait_wake_up(struct rocker_wait *wait)
wake_up(&wait->wait);
}
-static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector)
+static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
{
return rocker->msix_entries[vector].vector;
}
-static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port)
+static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
{
return rocker_msix_vector(rocker_port->rocker,
ROCKER_MSIX_VEC_TX(rocker_port->port_number));
}
-static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
+static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
{
return rocker_msix_vector(rocker_port->rocker,
ROCKER_MSIX_VEC_RX(rocker_port->port_number));
@@ -404,9 +485,9 @@ static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
* HW basic testing functions
*****************************/
-static int rocker_reg_test(struct rocker *rocker)
+static int rocker_reg_test(const struct rocker *rocker)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
u64 test_reg;
u64 rnd;
@@ -434,12 +515,12 @@ static int rocker_reg_test(struct rocker *rocker)
return 0;
}
-static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
- u32 test_type, dma_addr_t dma_handle,
- unsigned char *buf, unsigned char *expect,
- size_t size)
+static int rocker_dma_test_one(const struct rocker *rocker,
+ struct rocker_wait *wait, u32 test_type,
+ dma_addr_t dma_handle, const unsigned char *buf,
+ const unsigned char *expect, size_t size)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
int i;
rocker_wait_reset(wait);
@@ -463,7 +544,7 @@ static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
-static int rocker_dma_test_offset(struct rocker *rocker,
+static int rocker_dma_test_offset(const struct rocker *rocker,
struct rocker_wait *wait, int offset)
{
struct pci_dev *pdev = rocker->pdev;
@@ -523,7 +604,8 @@ free_alloc:
return err;
}
-static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait)
+static int rocker_dma_test(const struct rocker *rocker,
+ struct rocker_wait *wait)
{
int i;
int err;
@@ -545,9 +627,9 @@ static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int rocker_basic_hw_test(struct rocker *rocker)
+static int rocker_basic_hw_test(const struct rocker *rocker)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
struct rocker_wait wait;
int err;
@@ -680,7 +762,7 @@ static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
return *(u64 *) rocker_tlv_data(tlv);
}
-static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
+static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
const char *buf, int buf_len)
{
const struct rocker_tlv *tlv;
@@ -693,19 +775,19 @@ static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
u32 type = rocker_tlv_type(tlv);
if (type > 0 && type <= maxtype)
- tb[type] = (struct rocker_tlv *) tlv;
+ tb[type] = tlv;
}
}
-static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype,
+static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype,
const struct rocker_tlv *tlv)
{
rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
rocker_tlv_len(tlv));
}
-static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype,
- struct rocker_desc_info *desc_info)
+static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
+ const struct rocker_desc_info *desc_info)
{
rocker_tlv_parse(tb, maxtype, desc_info->data,
desc_info->desc->tlv_size);
@@ -790,9 +872,9 @@ static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
}
static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
- struct rocker_tlv *start)
+ const struct rocker_tlv *start)
{
- desc_info->tlv_size = (char *) start - desc_info->data;
+ desc_info->tlv_size = (const char *) start - desc_info->data;
}
/******************************************
@@ -804,7 +886,7 @@ static u32 __pos_inc(u32 pos, size_t limit)
return ++pos == limit ? 0 : pos;
}
-static int rocker_desc_err(struct rocker_desc_info *desc_info)
+static int rocker_desc_err(const struct rocker_desc_info *desc_info)
{
int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
@@ -832,31 +914,31 @@ static int rocker_desc_err(struct rocker_desc_info *desc_info)
return -EINVAL;
}
-static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
+static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
{
desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
}
-static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
+static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
{
u32 comp_err = desc_info->desc->comp_err;
return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
}
-static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
+static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
{
return (void *)(uintptr_t)desc_info->desc->cookie;
}
-static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
+static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
void *ptr)
{
desc_info->desc->cookie = (uintptr_t) ptr;
}
static struct rocker_desc_info *
-rocker_desc_head_get(struct rocker_dma_ring_info *info)
+rocker_desc_head_get(const struct rocker_dma_ring_info *info)
{
static struct rocker_desc_info *desc_info;
u32 head = __pos_inc(info->head, info->size);
@@ -868,15 +950,15 @@ rocker_desc_head_get(struct rocker_dma_ring_info *info)
return desc_info;
}
-static void rocker_desc_commit(struct rocker_desc_info *desc_info)
+static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
{
desc_info->desc->buf_size = desc_info->data_size;
desc_info->desc->tlv_size = desc_info->tlv_size;
}
-static void rocker_desc_head_set(struct rocker *rocker,
+static void rocker_desc_head_set(const struct rocker *rocker,
struct rocker_dma_ring_info *info,
- struct rocker_desc_info *desc_info)
+ const struct rocker_desc_info *desc_info)
{
u32 head = __pos_inc(info->head, info->size);
@@ -901,8 +983,8 @@ rocker_desc_tail_get(struct rocker_dma_ring_info *info)
return desc_info;
}
-static void rocker_dma_ring_credits_set(struct rocker *rocker,
- struct rocker_dma_ring_info *info,
+static void rocker_dma_ring_credits_set(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info,
u32 credits)
{
if (credits)
@@ -915,7 +997,7 @@ static unsigned long rocker_dma_ring_size_fix(size_t size)
min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
}
-static int rocker_dma_ring_create(struct rocker *rocker,
+static int rocker_dma_ring_create(const struct rocker *rocker,
unsigned int type,
size_t size,
struct rocker_dma_ring_info *info)
@@ -951,8 +1033,8 @@ static int rocker_dma_ring_create(struct rocker *rocker,
return 0;
}
-static void rocker_dma_ring_destroy(struct rocker *rocker,
- struct rocker_dma_ring_info *info)
+static void rocker_dma_ring_destroy(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info)
{
rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
@@ -962,7 +1044,7 @@ static void rocker_dma_ring_destroy(struct rocker *rocker,
kfree(info->desc_info);
}
-static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
+static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
struct rocker_dma_ring_info *info)
{
int i;
@@ -977,8 +1059,8 @@ static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
rocker_desc_commit(&info->desc_info[i]);
}
-static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
- struct rocker_dma_ring_info *info,
+static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info,
int direction, size_t buf_size)
{
struct pci_dev *pdev = rocker->pdev;
@@ -1015,7 +1097,7 @@ static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
rollback:
for (i--; i >= 0; i--) {
- struct rocker_desc_info *desc_info = &info->desc_info[i];
+ const struct rocker_desc_info *desc_info = &info->desc_info[i];
pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
desc_info->data_size, direction);
@@ -1024,15 +1106,15 @@ rollback:
return err;
}
-static void rocker_dma_ring_bufs_free(struct rocker *rocker,
- struct rocker_dma_ring_info *info,
+static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info,
int direction)
{
struct pci_dev *pdev = rocker->pdev;
int i;
for (i = 0; i < info->size; i++) {
- struct rocker_desc_info *desc_info = &info->desc_info[i];
+ const struct rocker_desc_info *desc_info = &info->desc_info[i];
struct rocker_desc *desc = &info->desc[i];
desc->buf_addr = 0;
@@ -1045,7 +1127,7 @@ static void rocker_dma_ring_bufs_free(struct rocker *rocker,
static int rocker_dma_rings_init(struct rocker *rocker)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
int err;
err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
@@ -1102,11 +1184,11 @@ static void rocker_dma_rings_fini(struct rocker *rocker)
rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
}
-static int rocker_dma_rx_ring_skb_map(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
struct sk_buff *skb, size_t buf_len)
{
+ const struct rocker *rocker = rocker_port->rocker;
struct pci_dev *pdev = rocker->pdev;
dma_addr_t dma_handle;
@@ -1126,13 +1208,12 @@ tlv_put_failure:
return -EMSGSIZE;
}
-static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port)
+static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
{
return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
}
-static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info)
{
struct net_device *dev = rocker_port->dev;
@@ -1149,8 +1230,7 @@ static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
skb = netdev_alloc_skb_ip_align(dev, buf_len);
if (!skb)
return -ENOMEM;
- err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info,
- skb, buf_len);
+ err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
if (err) {
dev_kfree_skb_any(skb);
return err;
@@ -1159,8 +1239,8 @@ static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
return 0;
}
-static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
- struct rocker_tlv **attrs)
+static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
+ const struct rocker_tlv **attrs)
{
struct pci_dev *pdev = rocker->pdev;
dma_addr_t dma_handle;
@@ -1174,10 +1254,10 @@ static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
}
-static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
- struct rocker_desc_info *desc_info)
+static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
+ const struct rocker_desc_info *desc_info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
if (!skb)
@@ -1187,15 +1267,15 @@ static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
dev_kfree_skb_any(skb);
}
-static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker,
- struct rocker_port *rocker_port)
+static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
{
- struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker *rocker = rocker_port->rocker;
int i;
int err;
for (i = 0; i < rx_ring->size; i++) {
- err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port,
+ err = rocker_dma_rx_ring_skb_alloc(rocker_port,
&rx_ring->desc_info[i]);
if (err)
goto rollback;
@@ -1208,10 +1288,10 @@ rollback:
return err;
}
-static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker,
- struct rocker_port *rocker_port)
+static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
{
- struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker *rocker = rocker_port->rocker;
int i;
for (i = 0; i < rx_ring->size; i++)
@@ -1257,7 +1337,7 @@ static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
goto err_dma_rx_ring_bufs_alloc;
}
- err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port);
+ err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
if (err) {
netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
goto err_dma_rx_ring_skbs_alloc;
@@ -1283,7 +1363,7 @@ static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
{
struct rocker *rocker = rocker_port->rocker;
- rocker_dma_rx_ring_skbs_free(rocker, rocker_port);
+ rocker_dma_rx_ring_skbs_free(rocker_port);
rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
PCI_DMA_BIDIRECTIONAL);
rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
@@ -1292,7 +1372,8 @@ static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
}
-static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
+static void rocker_port_set_enable(const struct rocker_port *rocker_port,
+ bool enable)
{
u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
@@ -1310,7 +1391,7 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
{
struct rocker *rocker = dev_id;
- struct rocker_desc_info *desc_info;
+ const struct rocker_desc_info *desc_info;
struct rocker_wait *wait;
u32 credits = 0;
@@ -1319,7 +1400,7 @@ static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
wait = rocker_desc_cookie_ptr_get(desc_info);
if (wait->nowait) {
rocker_desc_gen_clear(desc_info);
- rocker_wait_destroy(wait);
+ rocker_wait_destroy(SWITCHDEV_TRANS_NONE, wait);
} else {
rocker_wait_wake_up(wait);
}
@@ -1331,22 +1412,22 @@ static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void rocker_port_link_up(struct rocker_port *rocker_port)
+static void rocker_port_link_up(const struct rocker_port *rocker_port)
{
netif_carrier_on(rocker_port->dev);
netdev_info(rocker_port->dev, "Link is up\n");
}
-static void rocker_port_link_down(struct rocker_port *rocker_port)
+static void rocker_port_link_down(const struct rocker_port *rocker_port)
{
netif_carrier_off(rocker_port->dev);
netdev_info(rocker_port->dev, "Link is down\n");
}
-static int rocker_event_link_change(struct rocker *rocker,
+static int rocker_event_link_change(const struct rocker *rocker,
const struct rocker_tlv *info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
unsigned int port_number;
bool link_up;
struct rocker_port *rocker_port;
@@ -1373,22 +1454,18 @@ static int rocker_event_link_change(struct rocker *rocker,
return 0;
}
-#define ROCKER_OP_FLAG_REMOVE BIT(0)
-#define ROCKER_OP_FLAG_NOWAIT BIT(1)
-#define ROCKER_OP_FLAG_LEARNED BIT(2)
-#define ROCKER_OP_FLAG_REFRESH BIT(3)
-
static int rocker_port_fdb(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
const unsigned char *addr,
__be16 vlan_id, int flags);
-static int rocker_event_mac_vlan_seen(struct rocker *rocker,
+static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
const struct rocker_tlv *info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
unsigned int port_number;
struct rocker_port *rocker_port;
- unsigned char *addr;
+ const unsigned char *addr;
int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
__be16 vlan_id;
@@ -1411,14 +1488,15 @@ static int rocker_event_mac_vlan_seen(struct rocker *rocker,
rocker_port->stp_state != BR_STATE_FORWARDING)
return 0;
- return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+ return rocker_port_fdb(rocker_port, SWITCHDEV_TRANS_NONE,
+ addr, vlan_id, flags);
}
-static int rocker_event_process(struct rocker *rocker,
- struct rocker_desc_info *desc_info)
+static int rocker_event_process(const struct rocker *rocker,
+ const struct rocker_desc_info *desc_info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
- struct rocker_tlv *info;
+ const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
+ const struct rocker_tlv *info;
u16 type;
rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
@@ -1442,8 +1520,8 @@ static int rocker_event_process(struct rocker *rocker,
static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
{
struct rocker *rocker = dev_id;
- struct pci_dev *pdev = rocker->pdev;
- struct rocker_desc_info *desc_info;
+ const struct pci_dev *pdev = rocker->pdev;
+ const struct rocker_desc_info *desc_info;
u32 credits = 0;
int err;
@@ -1487,65 +1565,75 @@ static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
* Command interface
********************/
-typedef int (*rocker_cmd_cb_t)(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
- void *priv);
+typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv);
+
+typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
+ void *priv);
-static int rocker_cmd_exec(struct rocker *rocker,
- struct rocker_port *rocker_port,
- rocker_cmd_cb_t prepare, void *prepare_priv,
- rocker_cmd_cb_t process, void *process_priv,
- bool nowait)
+static int rocker_cmd_exec(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags,
+ rocker_cmd_prep_cb_t prepare, void *prepare_priv,
+ rocker_cmd_proc_cb_t process, void *process_priv)
{
+ struct rocker *rocker = rocker_port->rocker;
struct rocker_desc_info *desc_info;
struct rocker_wait *wait;
- unsigned long flags;
+ bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
+ unsigned long lock_flags;
int err;
- wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL);
+ wait = rocker_wait_create(rocker_port, trans, flags);
if (!wait)
return -ENOMEM;
wait->nowait = nowait;
- spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
+ spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
+
desc_info = rocker_desc_head_get(&rocker->cmd_ring);
if (!desc_info) {
- spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+ spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
err = -EAGAIN;
goto out;
}
- err = prepare(rocker, rocker_port, desc_info, prepare_priv);
+
+ err = prepare(rocker_port, desc_info, prepare_priv);
if (err) {
- spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+ spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
goto out;
}
+
rocker_desc_cookie_ptr_set(desc_info, wait);
- rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
- spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
+
+ spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
if (nowait)
return 0;
- if (!rocker_wait_event_timeout(wait, HZ / 10))
- return -EIO;
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!rocker_wait_event_timeout(wait, HZ / 10))
+ return -EIO;
err = rocker_desc_err(desc_info);
if (err)
return err;
if (process)
- err = process(rocker, rocker_port, desc_info, process_priv);
+ err = process(rocker_port, desc_info, process_priv);
rocker_desc_gen_clear(desc_info);
out:
- rocker_wait_destroy(wait);
+ rocker_wait_destroy(trans, wait);
return err;
}
static int
-rocker_cmd_get_port_settings_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -1565,14 +1653,13 @@ rocker_cmd_get_port_settings_prep(struct rocker *rocker,
}
static int
-rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
void *priv)
{
struct ethtool_cmd *ecmd = priv;
- struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
- struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
u32 speed;
u8 duplex;
u8 autoneg;
@@ -1604,15 +1691,14 @@ rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
}
static int
-rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
void *priv)
{
unsigned char *macaddr = priv;
- struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
- struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
- struct rocker_tlv *attr;
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ const struct rocker_tlv *attr;
rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
if (!attrs[ROCKER_TLV_CMD_INFO])
@@ -1637,17 +1723,16 @@ struct port_name {
};
static int
-rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
void *priv)
{
- struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
- struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
struct port_name *name = priv;
- struct rocker_tlv *attr;
+ const struct rocker_tlv *attr;
size_t i, j, len;
- char *str;
+ const char *str;
rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
if (!attrs[ROCKER_TLV_CMD_INFO])
@@ -1679,8 +1764,7 @@ rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker,
}
static int
-rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -1710,12 +1794,11 @@ rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
}
static int
-rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
- unsigned char *macaddr = priv;
+ const unsigned char *macaddr = priv;
struct rocker_tlv *cmd_info;
if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
@@ -1735,8 +1818,7 @@ rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
}
static int
-rocker_cmd_set_port_learning_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -1761,46 +1843,48 @@ rocker_cmd_set_port_learning_prep(struct rocker *rocker,
static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
struct ethtool_cmd *ecmd)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_ethtool_proc,
- ecmd, false);
+ ecmd);
}
static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
unsigned char *macaddr)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_macaddr_proc,
- macaddr, false);
+ macaddr);
}
static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
struct ethtool_cmd *ecmd)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
rocker_cmd_set_port_settings_ethtool_prep,
- ecmd, NULL, NULL, false);
+ ecmd, NULL, NULL);
}
static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
unsigned char *macaddr)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
rocker_cmd_set_port_settings_macaddr_prep,
- macaddr, NULL, NULL, false);
+ macaddr, NULL, NULL);
}
-static int rocker_port_set_learning(struct rocker_port *rocker_port)
+static int rocker_port_set_learning(struct rocker_port *rocker_port,
+ enum switchdev_trans trans)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, trans, 0,
rocker_cmd_set_port_learning_prep,
- NULL, NULL, NULL, false);
+ NULL, NULL, NULL);
}
-static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
entry->key.ig_port.in_pport))
@@ -1815,8 +1899,9 @@ static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
entry->key.vlan.in_pport))
@@ -1838,8 +1923,9 @@ static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
entry->key.term_mac.in_pport))
@@ -1875,7 +1961,7 @@ static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
static int
rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
entry->key.ucast_routing.eth_type))
@@ -1896,8 +1982,9 @@ rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (entry->key.bridge.has_eth_dst &&
rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
@@ -1929,8 +2016,9 @@ static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
entry->key.acl.in_pport))
@@ -1995,12 +2083,11 @@ static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
- struct rocker_flow_tbl_entry *entry = priv;
+ const struct rocker_flow_tbl_entry *entry = priv;
struct rocker_tlv *cmd_info;
int err = 0;
@@ -2053,8 +2140,7 @@ static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
return 0;
}
-static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -2090,7 +2176,7 @@ rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
static int
rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
- struct rocker_group_tbl_entry *entry)
+ const struct rocker_group_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
entry->l2_rewrite.group_id))
@@ -2113,7 +2199,7 @@ rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
static int
rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
- struct rocker_group_tbl_entry *entry)
+ const struct rocker_group_tbl_entry *entry)
{
int i;
struct rocker_tlv *group_ids;
@@ -2139,7 +2225,7 @@ rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
static int
rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
- struct rocker_group_tbl_entry *entry)
+ const struct rocker_group_tbl_entry *entry)
{
if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
@@ -2163,8 +2249,7 @@ rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_group_tbl_add(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -2209,8 +2294,7 @@ static int rocker_cmd_group_tbl_add(struct rocker *rocker,
return 0;
}
-static int rocker_cmd_group_tbl_del(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -2293,7 +2377,8 @@ static void rocker_free_tbls(struct rocker *rocker)
}
static struct rocker_flow_tbl_entry *
-rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
+rocker_flow_tbl_find(const struct rocker *rocker,
+ const struct rocker_flow_tbl_entry *match)
{
struct rocker_flow_tbl_entry *found;
size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
@@ -2308,24 +2393,25 @@ rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
}
static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
- struct rocker_flow_tbl_entry *match,
- bool nowait)
+ enum switchdev_trans trans, int flags,
+ struct rocker_flow_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_flow_tbl_entry *found;
size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
- unsigned long flags;
+ unsigned long lock_flags;
match->key_crc32 = crc32(~0, &match->key, key_len);
- spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+ spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
found = rocker_flow_tbl_find(rocker, match);
if (found) {
match->cookie = found->cookie;
- hash_del(&found->entry);
- kfree(found);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
+ rocker_port_kfree(trans, found);
found = match;
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
} else {
@@ -2334,73 +2420,69 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
}
- hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
- spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+ spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
- return rocker_cmd_exec(rocker, rocker_port,
- rocker_cmd_flow_tbl_add,
- found, NULL, NULL, nowait);
+ return rocker_cmd_exec(rocker_port, trans, flags,
+ rocker_cmd_flow_tbl_add, found, NULL, NULL);
}
static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
- struct rocker_flow_tbl_entry *match,
- bool nowait)
+ enum switchdev_trans trans, int flags,
+ struct rocker_flow_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_flow_tbl_entry *found;
size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
- unsigned long flags;
+ unsigned long lock_flags;
int err = 0;
match->key_crc32 = crc32(~0, &match->key, key_len);
- spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+ spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
found = rocker_flow_tbl_find(rocker, match);
if (found) {
- hash_del(&found->entry);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
}
- spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+ spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
- kfree(match);
+ rocker_port_kfree(trans, match);
if (found) {
- err = rocker_cmd_exec(rocker, rocker_port,
+ err = rocker_cmd_exec(rocker_port, trans, flags,
rocker_cmd_flow_tbl_del,
- found, NULL, NULL, nowait);
- kfree(found);
+ found, NULL, NULL);
+ rocker_port_kfree(trans, found);
}
return err;
}
-static gfp_t rocker_op_flags_gfp(int flags)
-{
- return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL;
-}
-
static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
- int flags, struct rocker_flow_tbl_entry *entry)
+ enum switchdev_trans trans, int flags,
+ struct rocker_flow_tbl_entry *entry)
{
- bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
-
if (flags & ROCKER_OP_FLAG_REMOVE)
- return rocker_flow_tbl_del(rocker_port, entry, nowait);
+ return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
else
- return rocker_flow_tbl_add(rocker_port, entry, nowait);
+ return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
}
static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
- int flags, u32 in_pport, u32 in_pport_mask,
+ enum switchdev_trans trans, int flags,
+ u32 in_pport, u32 in_pport_mask,
enum rocker_of_dpa_table_id goto_tbl)
{
struct rocker_flow_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2410,18 +2492,19 @@ static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
entry->key.ig_port.in_pport_mask = in_pport_mask;
entry->key.ig_port.goto_tbl = goto_tbl;
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
- int flags, u32 in_pport,
- __be16 vlan_id, __be16 vlan_id_mask,
+ enum switchdev_trans trans, int flags,
+ u32 in_pport, __be16 vlan_id,
+ __be16 vlan_id_mask,
enum rocker_of_dpa_table_id goto_tbl,
bool untagged, __be16 new_vlan_id)
{
struct rocker_flow_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2435,10 +2518,11 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
entry->key.vlan.untagged = untagged;
entry->key.vlan.new_vlan_id = new_vlan_id;
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
u32 in_pport, u32 in_pport_mask,
__be16 eth_type, const u8 *eth_dst,
const u8 *eth_dst_mask, __be16 vlan_id,
@@ -2447,7 +2531,7 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
{
struct rocker_flow_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2471,11 +2555,11 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
entry->key.term_mac.vlan_id_mask = vlan_id_mask;
entry->key.term_mac.copy_to_cpu = copy_to_cpu;
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
- int flags,
+ enum switchdev_trans trans, int flags,
const u8 *eth_dst, const u8 *eth_dst_mask,
__be16 vlan_id, u32 tunnel_id,
enum rocker_of_dpa_table_id goto_tbl,
@@ -2487,7 +2571,7 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
bool wild = false;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2500,7 +2584,7 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
if (eth_dst_mask) {
entry->key.bridge.has_eth_dst_mask = 1;
ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
- if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN))
+ if (!ether_addr_equal(eth_dst_mask, ff_mac))
wild = true;
}
@@ -2525,10 +2609,11 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
entry->key.bridge.group_id = group_id;
entry->key.bridge.copy_to_cpu = copy_to_cpu;
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
__be16 eth_type, __be32 dst,
__be32 dst_mask, u32 priority,
enum rocker_of_dpa_table_id goto_tbl,
@@ -2536,7 +2621,7 @@ static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
{
struct rocker_flow_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2550,30 +2635,29 @@ static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
entry->key_len = offsetof(struct rocker_flow_tbl_key,
ucast_routing.group_id);
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
- int flags, u32 in_pport,
- u32 in_pport_mask,
+ enum switchdev_trans trans, int flags,
+ u32 in_pport, u32 in_pport_mask,
const u8 *eth_src, const u8 *eth_src_mask,
const u8 *eth_dst, const u8 *eth_dst_mask,
- __be16 eth_type,
- __be16 vlan_id, __be16 vlan_id_mask,
- u8 ip_proto, u8 ip_proto_mask,
- u8 ip_tos, u8 ip_tos_mask,
+ __be16 eth_type, __be16 vlan_id,
+ __be16 vlan_id_mask, u8 ip_proto,
+ u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
u32 group_id)
{
u32 priority;
struct rocker_flow_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
priority = ROCKER_PRIORITY_ACL_NORMAL;
if (eth_dst && eth_dst_mask) {
- if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0)
+ if (ether_addr_equal(eth_dst_mask, mcast_mac))
priority = ROCKER_PRIORITY_ACL_DFLT;
else if (is_link_local_ether_addr(eth_dst))
priority = ROCKER_PRIORITY_ACL_CTRL;
@@ -2602,12 +2686,12 @@ static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
entry->key.acl.ip_tos_mask = ip_tos_mask;
entry->key.acl.group_id = group_id;
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static struct rocker_group_tbl_entry *
-rocker_group_tbl_find(struct rocker *rocker,
- struct rocker_group_tbl_entry *match)
+rocker_group_tbl_find(const struct rocker *rocker,
+ const struct rocker_group_tbl_entry *match)
{
struct rocker_group_tbl_entry *found;
@@ -2620,34 +2704,36 @@ rocker_group_tbl_find(struct rocker *rocker,
return NULL;
}
-static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry)
+static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
+ struct rocker_group_tbl_entry *entry)
{
switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
- kfree(entry->group_ids);
+ rocker_port_kfree(trans, entry->group_ids);
break;
default:
break;
}
- kfree(entry);
+ rocker_port_kfree(trans, entry);
}
static int rocker_group_tbl_add(struct rocker_port *rocker_port,
- struct rocker_group_tbl_entry *match,
- bool nowait)
+ enum switchdev_trans trans, int flags,
+ struct rocker_group_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_group_tbl_entry *found;
- unsigned long flags;
+ unsigned long lock_flags;
- spin_lock_irqsave(&rocker->group_tbl_lock, flags);
+ spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
found = rocker_group_tbl_find(rocker, match);
if (found) {
- hash_del(&found->entry);
- rocker_group_tbl_entry_free(found);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
+ rocker_group_tbl_entry_free(trans, found);
found = match;
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
} else {
@@ -2655,116 +2741,118 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
}
- hash_add(rocker->group_tbl, &found->entry, found->group_id);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_add(rocker->group_tbl, &found->entry, found->group_id);
- spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
+ spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
- return rocker_cmd_exec(rocker, rocker_port,
- rocker_cmd_group_tbl_add,
- found, NULL, NULL, nowait);
+ return rocker_cmd_exec(rocker_port, trans, flags,
+ rocker_cmd_group_tbl_add, found, NULL, NULL);
}
static int rocker_group_tbl_del(struct rocker_port *rocker_port,
- struct rocker_group_tbl_entry *match,
- bool nowait)
+ enum switchdev_trans trans, int flags,
+ struct rocker_group_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_group_tbl_entry *found;
- unsigned long flags;
+ unsigned long lock_flags;
int err = 0;
- spin_lock_irqsave(&rocker->group_tbl_lock, flags);
+ spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
found = rocker_group_tbl_find(rocker, match);
if (found) {
- hash_del(&found->entry);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
}
- spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
+ spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
- rocker_group_tbl_entry_free(match);
+ rocker_group_tbl_entry_free(trans, match);
if (found) {
- err = rocker_cmd_exec(rocker, rocker_port,
+ err = rocker_cmd_exec(rocker_port, trans, flags,
rocker_cmd_group_tbl_del,
- found, NULL, NULL, nowait);
- rocker_group_tbl_entry_free(found);
+ found, NULL, NULL);
+ rocker_group_tbl_entry_free(trans, found);
}
return err;
}
static int rocker_group_tbl_do(struct rocker_port *rocker_port,
- int flags, struct rocker_group_tbl_entry *entry)
+ enum switchdev_trans trans, int flags,
+ struct rocker_group_tbl_entry *entry)
{
- bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
-
if (flags & ROCKER_OP_FLAG_REMOVE)
- return rocker_group_tbl_del(rocker_port, entry, nowait);
+ return rocker_group_tbl_del(rocker_port, trans, flags, entry);
else
- return rocker_group_tbl_add(rocker_port, entry, nowait);
+ return rocker_group_tbl_add(rocker_port, trans, flags, entry);
}
static int rocker_group_l2_interface(struct rocker_port *rocker_port,
- int flags, __be16 vlan_id,
- u32 out_pport, int pop_vlan)
+ enum switchdev_trans trans, int flags,
+ __be16 vlan_id, u32 out_pport,
+ int pop_vlan)
{
struct rocker_group_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
entry->l2_interface.pop_vlan = pop_vlan;
- return rocker_group_tbl_do(rocker_port, flags, entry);
+ return rocker_group_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
int flags, u8 group_count,
- u32 *group_ids, u32 group_id)
+ const u32 *group_ids, u32 group_id)
{
struct rocker_group_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
entry->group_id = group_id;
entry->group_count = group_count;
- entry->group_ids = kcalloc(group_count, sizeof(u32),
- rocker_op_flags_gfp(flags));
+ entry->group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
+ group_count, sizeof(u32));
if (!entry->group_ids) {
- kfree(entry);
+ rocker_port_kfree(trans, entry);
return -ENOMEM;
}
memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
- return rocker_group_tbl_do(rocker_port, flags, entry);
+ return rocker_group_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_group_l2_flood(struct rocker_port *rocker_port,
- int flags, __be16 vlan_id,
- u8 group_count, u32 *group_ids,
- u32 group_id)
+ enum switchdev_trans trans, int flags,
+ __be16 vlan_id, u8 group_count,
+ const u32 *group_ids, u32 group_id)
{
- return rocker_group_l2_fan_out(rocker_port, flags,
+ return rocker_group_l2_fan_out(rocker_port, trans, flags,
group_count, group_ids,
group_id);
}
static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
- int flags, u32 index, u8 *src_mac,
- u8 *dst_mac, __be16 vlan_id,
- bool ttl_check, u32 pport)
+ enum switchdev_trans trans, int flags,
+ u32 index, const u8 *src_mac, const u8 *dst_mac,
+ __be16 vlan_id, bool ttl_check, u32 pport)
{
struct rocker_group_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2777,11 +2865,11 @@ static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
entry->l3_unicast.ttl_check = ttl_check;
entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
- return rocker_group_tbl_do(rocker_port, flags, entry);
+ return rocker_group_tbl_do(rocker_port, trans, flags, entry);
}
static struct rocker_neigh_tbl_entry *
- rocker_neigh_tbl_find(struct rocker *rocker, __be32 ip_addr)
+rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
{
struct rocker_neigh_tbl_entry *found;
@@ -2794,37 +2882,44 @@ static struct rocker_neigh_tbl_entry *
}
static void _rocker_neigh_add(struct rocker *rocker,
+ enum switchdev_trans trans,
struct rocker_neigh_tbl_entry *entry)
{
- entry->index = rocker->neigh_tbl_next_index++;
+ if (trans != SWITCHDEV_TRANS_COMMIT)
+ entry->index = rocker->neigh_tbl_next_index++;
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ return;
entry->ref_count++;
hash_add(rocker->neigh_tbl, &entry->entry,
be32_to_cpu(entry->ip_addr));
}
-static void _rocker_neigh_del(struct rocker *rocker,
+static void _rocker_neigh_del(enum switchdev_trans trans,
struct rocker_neigh_tbl_entry *entry)
{
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ return;
if (--entry->ref_count == 0) {
hash_del(&entry->entry);
- kfree(entry);
+ rocker_port_kfree(trans, entry);
}
}
-static void _rocker_neigh_update(struct rocker *rocker,
- struct rocker_neigh_tbl_entry *entry,
- u8 *eth_dst, bool ttl_check)
+static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
+ enum switchdev_trans trans,
+ const u8 *eth_dst, bool ttl_check)
{
if (eth_dst) {
ether_addr_copy(entry->eth_dst, eth_dst);
entry->ttl_check = ttl_check;
- } else {
+ } else if (trans != SWITCHDEV_TRANS_PREPARE) {
entry->ref_count++;
}
}
static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
- int flags, __be32 ip_addr, u8 *eth_dst)
+ enum switchdev_trans trans,
+ int flags, __be32 ip_addr, const u8 *eth_dst)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_neigh_tbl_entry *entry;
@@ -2840,7 +2935,7 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
bool removing;
int err = 0;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2857,12 +2952,12 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
entry->dev = rocker_port->dev;
ether_addr_copy(entry->eth_dst, eth_dst);
entry->ttl_check = true;
- _rocker_neigh_add(rocker, entry);
+ _rocker_neigh_add(rocker, trans, entry);
} else if (removing) {
memcpy(entry, found, sizeof(*entry));
- _rocker_neigh_del(rocker, found);
+ _rocker_neigh_del(trans, found);
} else if (updating) {
- _rocker_neigh_update(rocker, found, eth_dst, true);
+ _rocker_neigh_update(found, trans, eth_dst, true);
memcpy(entry, found, sizeof(*entry));
} else {
err = -ENOENT;
@@ -2879,7 +2974,7 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
* other routes' nexthops.
*/
- err = rocker_group_l3_unicast(rocker_port, flags,
+ err = rocker_group_l3_unicast(rocker_port, trans, flags,
entry->index,
rocker_port->dev->dev_addr,
entry->eth_dst,
@@ -2895,7 +2990,7 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
if (adding || removing) {
group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
- err = rocker_flow_tbl_ucast4_routing(rocker_port,
+ err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
eth_type, ip_addr,
inet_make_mask(32),
priority, goto_tbl,
@@ -2909,13 +3004,13 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
err_out:
if (!adding)
- kfree(entry);
+ rocker_port_kfree(trans, entry);
return err;
}
static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
- __be32 ip_addr)
+ enum switchdev_trans trans, __be32 ip_addr)
{
struct net_device *dev = rocker_port->dev;
struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
@@ -2933,7 +3028,8 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
*/
if (n->nud_state & NUD_VALID)
- err = rocker_port_ipv4_neigh(rocker_port, 0, ip_addr, n->ha);
+ err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
+ ip_addr, n->ha);
else
neigh_event_send(n, NULL);
@@ -2941,7 +3037,8 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
return err;
}
-static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
+static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags,
__be32 ip_addr, u32 *index)
{
struct rocker *rocker = rocker_port->rocker;
@@ -2954,7 +3051,7 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
bool resolved = true;
int err = 0;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2971,13 +3068,13 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
if (adding) {
entry->ip_addr = ip_addr;
entry->dev = rocker_port->dev;
- _rocker_neigh_add(rocker, entry);
+ _rocker_neigh_add(rocker, trans, entry);
*index = entry->index;
resolved = false;
} else if (removing) {
- _rocker_neigh_del(rocker, found);
+ _rocker_neigh_del(trans, found);
} else if (updating) {
- _rocker_neigh_update(rocker, found, NULL, false);
+ _rocker_neigh_update(found, trans, NULL, false);
resolved = !is_zero_ether_addr(found->eth_dst);
} else {
err = -ENOENT;
@@ -2986,7 +3083,7 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
if (!adding)
- kfree(entry);
+ rocker_port_kfree(trans, entry);
if (err)
return err;
@@ -2994,24 +3091,25 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
/* Resolved means neigh ip_addr is resolved to neigh mac. */
if (!resolved)
- err = rocker_port_ipv4_resolve(rocker_port, ip_addr);
+ err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
return err;
}
static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
int flags, __be16 vlan_id)
{
struct rocker_port *p;
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
u32 *group_ids;
u8 group_count = 0;
int err = 0;
int i;
- group_ids = kcalloc(rocker->port_count, sizeof(u32),
- rocker_op_flags_gfp(flags));
+ group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
+ rocker->port_count, sizeof(u32));
if (!group_ids)
return -ENOMEM;
@@ -3022,6 +3120,8 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
for (i = 0; i < rocker->port_count; i++) {
p = rocker->ports[i];
+ if (!p)
+ continue;
if (!rocker_port_is_bridged(p))
continue;
if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
@@ -3034,23 +3134,22 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
if (group_count == 0)
goto no_ports_in_vlan;
- err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
- group_count, group_ids,
- group_id);
+ err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
+ group_count, group_ids, group_id);
if (err)
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 flood group\n", err);
no_ports_in_vlan:
- kfree(group_ids);
+ rocker_port_kfree(trans, group_ids);
return err;
}
static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
- int flags, __be16 vlan_id,
- bool pop_vlan)
+ enum switchdev_trans trans, int flags,
+ __be16 vlan_id, bool pop_vlan)
{
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
struct rocker_port *p;
bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
u32 out_pport;
@@ -3065,9 +3164,8 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
if (rocker_port->stp_state == BR_STATE_LEARNING ||
rocker_port->stp_state == BR_STATE_FORWARDING) {
out_pport = rocker_port->pport;
- err = rocker_group_l2_interface(rocker_port, flags,
- vlan_id, out_pport,
- pop_vlan);
+ err = rocker_group_l2_interface(rocker_port, trans, flags,
+ vlan_id, out_pport, pop_vlan);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 group for pport %d\n",
@@ -3083,7 +3181,7 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
for (i = 0; i < rocker->port_count; i++) {
p = rocker->ports[i];
- if (test_bit(ntohs(vlan_id), p->vlan_bitmap))
+ if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
ref++;
}
@@ -3091,9 +3189,8 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
return 0;
out_pport = 0;
- err = rocker_group_l2_interface(rocker_port, flags,
- vlan_id, out_pport,
- pop_vlan);
+ err = rocker_group_l2_interface(rocker_port, trans, flags,
+ vlan_id, out_pport, pop_vlan);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 group for CPU port\n", err);
@@ -3149,14 +3246,14 @@ static struct rocker_ctrl {
};
static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
- int flags, struct rocker_ctrl *ctrl,
- __be16 vlan_id)
+ enum switchdev_trans trans, int flags,
+ const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
u32 in_pport = rocker_port->pport;
u32 in_pport_mask = 0xffffffff;
u32 out_pport = 0;
- u8 *eth_src = NULL;
- u8 *eth_src_mask = NULL;
+ const u8 *eth_src = NULL;
+ const u8 *eth_src_mask = NULL;
__be16 vlan_id_mask = htons(0xffff);
u8 ip_proto = 0;
u8 ip_proto_mask = 0;
@@ -3165,7 +3262,7 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
int err;
- err = rocker_flow_tbl_acl(rocker_port, flags,
+ err = rocker_flow_tbl_acl(rocker_port, trans, flags,
in_pport, in_pport_mask,
eth_src, eth_src_mask,
ctrl->eth_dst, ctrl->eth_dst_mask,
@@ -3182,7 +3279,8 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
}
static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
- int flags, struct rocker_ctrl *ctrl,
+ enum switchdev_trans trans, int flags,
+ const struct rocker_ctrl *ctrl,
__be16 vlan_id)
{
enum rocker_of_dpa_table_id goto_tbl =
@@ -3194,7 +3292,7 @@ static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
if (!rocker_port_is_bridged(rocker_port))
return 0;
- err = rocker_flow_tbl_bridge(rocker_port, flags,
+ err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
ctrl->eth_dst, ctrl->eth_dst_mask,
vlan_id, tunnel_id,
goto_tbl, group_id, ctrl->copy_to_cpu);
@@ -3206,8 +3304,8 @@ static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
}
static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
- int flags, struct rocker_ctrl *ctrl,
- __be16 vlan_id)
+ enum switchdev_trans trans, int flags,
+ const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
u32 in_pport_mask = 0xffffffff;
__be16 vlan_id_mask = htons(0xffff);
@@ -3216,7 +3314,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
if (ntohs(vlan_id) == 0)
vlan_id = rocker_port->internal_vlan_id;
- err = rocker_flow_tbl_term_mac(rocker_port,
+ err = rocker_flow_tbl_term_mac(rocker_port, trans,
rocker_port->pport, in_pport_mask,
ctrl->eth_type, ctrl->eth_dst,
ctrl->eth_dst_mask, vlan_id,
@@ -3229,32 +3327,34 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
return err;
}
-static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags,
- struct rocker_ctrl *ctrl, __be16 vlan_id)
+static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags,
+ const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
if (ctrl->acl)
- return rocker_port_ctrl_vlan_acl(rocker_port, flags,
+ return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
ctrl, vlan_id);
if (ctrl->bridge)
- return rocker_port_ctrl_vlan_bridge(rocker_port, flags,
+ return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
ctrl, vlan_id);
if (ctrl->term)
- return rocker_port_ctrl_vlan_term(rocker_port, flags,
+ return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
ctrl, vlan_id);
return -EOPNOTSUPP;
}
static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
- int flags, __be16 vlan_id)
+ enum switchdev_trans trans, int flags,
+ __be16 vlan_id)
{
int err = 0;
int i;
for (i = 0; i < ROCKER_CTRL_MAX; i++) {
if (rocker_port->ctrls[i]) {
- err = rocker_port_ctrl_vlan(rocker_port, flags,
+ err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
&rocker_ctrls[i], vlan_id);
if (err)
return err;
@@ -3264,8 +3364,9 @@ static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
return err;
}
-static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
- struct rocker_ctrl *ctrl)
+static int rocker_port_ctrl(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags,
+ const struct rocker_ctrl *ctrl)
{
u16 vid;
int err = 0;
@@ -3273,7 +3374,7 @@ static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
for (vid = 1; vid < VLAN_N_VID; vid++) {
if (!test_bit(vid, rocker_port->vlan_bitmap))
continue;
- err = rocker_port_ctrl_vlan(rocker_port, flags,
+ err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
ctrl, htons(vid));
if (err)
break;
@@ -3282,8 +3383,8 @@ static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
return err;
}
-static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
- u16 vid)
+static int rocker_port_vlan(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags, u16 vid)
{
enum rocker_of_dpa_table_id goto_tbl =
ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
@@ -3297,50 +3398,57 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
- if (adding && test_and_set_bit(ntohs(internal_vlan_id),
- rocker_port->vlan_bitmap))
+ if (adding && test_bit(ntohs(internal_vlan_id),
+ rocker_port->vlan_bitmap))
return 0; /* already added */
- else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id),
- rocker_port->vlan_bitmap))
+ else if (!adding && !test_bit(ntohs(internal_vlan_id),
+ rocker_port->vlan_bitmap))
return 0; /* already removed */
+ change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
+
if (adding) {
- err = rocker_port_ctrl_vlan_add(rocker_port, flags,
+ err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
internal_vlan_id);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port ctrl vlan add\n", err);
- return err;
+ goto err_out;
}
}
- err = rocker_port_vlan_l2_groups(rocker_port, flags,
+ err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
internal_vlan_id, untagged);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 groups\n", err);
- return err;
+ goto err_out;
}
- err = rocker_port_vlan_flood_group(rocker_port, flags,
+ err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
internal_vlan_id);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 flood group\n", err);
- return err;
+ goto err_out;
}
- err = rocker_flow_tbl_vlan(rocker_port, flags,
+ err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
in_pport, vlan_id, vlan_id_mask,
goto_tbl, untagged, internal_vlan_id);
if (err)
netdev_err(rocker_port->dev,
"Error (%d) port VLAN table\n", err);
+err_out:
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
+
return err;
}
-static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
+static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags)
{
enum rocker_of_dpa_table_id goto_tbl;
u32 in_pport;
@@ -3355,7 +3463,7 @@ static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
in_pport_mask = 0xffff0000;
goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
- err = rocker_flow_tbl_ig_port(rocker_port, flags,
+ err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
in_pport, in_pport_mask,
goto_tbl);
if (err)
@@ -3367,7 +3475,8 @@ static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
struct rocker_fdb_learn_work {
struct work_struct work;
- struct net_device *dev;
+ struct rocker_port *rocker_port;
+ enum switchdev_trans trans;
int flags;
u8 addr[ETH_ALEN];
u16 vid;
@@ -3375,27 +3484,28 @@ struct rocker_fdb_learn_work {
static void rocker_port_fdb_learn_work(struct work_struct *work)
{
- struct rocker_fdb_learn_work *lw =
+ const struct rocker_fdb_learn_work *lw =
container_of(work, struct rocker_fdb_learn_work, work);
bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
- struct netdev_switch_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info;
info.addr = lw->addr;
info.vid = lw->vid;
if (learned && removing)
- call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
- lw->dev, &info.info);
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
+ lw->rocker_port->dev, &info.info);
else if (learned && !removing)
- call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
- lw->dev, &info.info);
+ call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
+ lw->rocker_port->dev, &info.info);
- kfree(work);
+ rocker_port_kfree(lw->trans, work);
}
static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
- int flags, const u8 *addr, __be16 vlan_id)
+ enum switchdev_trans trans, int flags,
+ const u8 *addr, __be16 vlan_id)
{
struct rocker_fdb_learn_work *lw;
enum rocker_of_dpa_table_id goto_tbl =
@@ -3411,8 +3521,8 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
- err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
- vlan_id, tunnel_id, goto_tbl,
+ err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
+ NULL, vlan_id, tunnel_id, goto_tbl,
group_id, copy_to_cpu);
if (err)
return err;
@@ -3424,24 +3534,29 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
if (!rocker_port_is_bridged(rocker_port))
return 0;
- lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags));
+ lw = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*lw));
if (!lw)
return -ENOMEM;
INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
- lw->dev = rocker_port->dev;
+ lw->rocker_port = rocker_port;
+ lw->trans = trans;
lw->flags = flags;
ether_addr_copy(lw->addr, addr);
lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
- schedule_work(&lw->work);
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ rocker_port_kfree(trans, lw);
+ else
+ schedule_work(&lw->work);
return 0;
}
static struct rocker_fdb_tbl_entry *
-rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
+rocker_fdb_tbl_find(const struct rocker *rocker,
+ const struct rocker_fdb_tbl_entry *match)
{
struct rocker_fdb_tbl_entry *found;
@@ -3453,6 +3568,7 @@ rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
}
static int rocker_port_fdb(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
const unsigned char *addr,
__be16 vlan_id, int flags)
{
@@ -3462,7 +3578,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
unsigned long lock_flags;
- fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags));
+ fdb = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*fdb));
if (!fdb)
return -ENOMEM;
@@ -3477,32 +3593,34 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
found = rocker_fdb_tbl_find(rocker, fdb);
if (removing && found) {
- kfree(fdb);
- hash_del(&found->entry);
+ rocker_port_kfree(trans, fdb);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
} else if (!removing && !found) {
- hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
}
spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
/* Check if adding and already exists, or removing and can't find */
if (!found != !removing) {
- kfree(fdb);
+ rocker_port_kfree(trans, fdb);
if (!found && removing)
return 0;
/* Refreshing existing to update aging timers */
flags |= ROCKER_OP_FLAG_REFRESH;
}
- return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id);
+ return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
}
-static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
+static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_fdb_tbl_entry *found;
unsigned long lock_flags;
- int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
struct hlist_node *tmp;
int bkt;
int err = 0;
@@ -3511,6 +3629,8 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
rocker_port->stp_state == BR_STATE_FORWARDING)
return 0;
+ flags |= ROCKER_OP_FLAG_REMOVE;
+
spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
@@ -3518,12 +3638,13 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
continue;
if (!found->learned)
continue;
- err = rocker_port_fdb_learn(rocker_port, flags,
+ err = rocker_port_fdb_learn(rocker_port, trans, flags,
found->key.addr,
found->key.vlan_id);
if (err)
goto err_out;
- hash_del(&found->entry);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
}
err_out:
@@ -3533,7 +3654,8 @@ err_out:
}
static int rocker_port_router_mac(struct rocker_port *rocker_port,
- int flags, __be16 vlan_id)
+ enum switchdev_trans trans, int flags,
+ __be16 vlan_id)
{
u32 in_pport_mask = 0xffffffff;
__be16 eth_type;
@@ -3546,7 +3668,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
vlan_id = rocker_port->internal_vlan_id;
eth_type = htons(ETH_P_IP);
- err = rocker_flow_tbl_term_mac(rocker_port,
+ err = rocker_flow_tbl_term_mac(rocker_port, trans,
rocker_port->pport, in_pport_mask,
eth_type, rocker_port->dev->dev_addr,
dst_mac_mask, vlan_id, vlan_id_mask,
@@ -3555,7 +3677,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
return err;
eth_type = htons(ETH_P_IPV6);
- err = rocker_flow_tbl_term_mac(rocker_port,
+ err = rocker_flow_tbl_term_mac(rocker_port, trans,
rocker_port->pport, in_pport_mask,
eth_type, rocker_port->dev->dev_addr,
dst_mac_mask, vlan_id, vlan_id_mask,
@@ -3564,13 +3686,13 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
return err;
}
-static int rocker_port_fwding(struct rocker_port *rocker_port)
+static int rocker_port_fwding(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags)
{
bool pop_vlan;
u32 out_pport;
__be16 vlan_id;
u16 vid;
- int flags = ROCKER_OP_FLAG_NOWAIT;
int err;
/* Port will be forwarding-enabled if its STP state is LEARNING
@@ -3590,9 +3712,8 @@ static int rocker_port_fwding(struct rocker_port *rocker_port)
continue;
vlan_id = htons(vid);
pop_vlan = rocker_vlan_id_is_internal(vlan_id);
- err = rocker_group_l2_interface(rocker_port, flags,
- vlan_id, out_pport,
- pop_vlan);
+ err = rocker_group_l2_interface(rocker_port, trans, flags,
+ vlan_id, out_pport, pop_vlan);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 group for pport %d\n",
@@ -3604,13 +3725,21 @@ static int rocker_port_fwding(struct rocker_port *rocker_port)
return 0;
}
-static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
+static int rocker_port_stp_update(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags,
+ u8 state)
{
bool want[ROCKER_CTRL_MAX] = { 0, };
- int flags;
+ bool prev_ctrls[ROCKER_CTRL_MAX];
+ u8 prev_state;
int err;
int i;
+ if (trans == SWITCHDEV_TRANS_PREPARE) {
+ memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
+ prev_state = rocker_port->stp_state;
+ }
+
if (rocker_port->stp_state == state)
return 0;
@@ -3638,45 +3767,57 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
for (i = 0; i < ROCKER_CTRL_MAX; i++) {
if (want[i] != rocker_port->ctrls[i]) {
- flags = ROCKER_OP_FLAG_NOWAIT |
- (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
- err = rocker_port_ctrl(rocker_port, flags,
+ int ctrl_flags = flags |
+ (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
+ err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
&rocker_ctrls[i]);
if (err)
- return err;
+ goto err_out;
rocker_port->ctrls[i] = want[i];
}
}
- err = rocker_port_fdb_flush(rocker_port);
+ err = rocker_port_fdb_flush(rocker_port, trans, flags);
if (err)
- return err;
+ goto err_out;
+
+ err = rocker_port_fwding(rocker_port, trans, flags);
+
+err_out:
+ if (trans == SWITCHDEV_TRANS_PREPARE) {
+ memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
+ rocker_port->stp_state = prev_state;
+ }
- return rocker_port_fwding(rocker_port);
+ return err;
}
-static int rocker_port_fwd_enable(struct rocker_port *rocker_port)
+static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags)
{
if (rocker_port_is_bridged(rocker_port))
/* bridge STP will enable port */
return 0;
/* port is not bridged, so simulate going to FORWARDING state */
- return rocker_port_stp_update(rocker_port, BR_STATE_FORWARDING);
+ return rocker_port_stp_update(rocker_port, trans, flags,
+ BR_STATE_FORWARDING);
}
-static int rocker_port_fwd_disable(struct rocker_port *rocker_port)
+static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags)
{
if (rocker_port_is_bridged(rocker_port))
/* bridge STP will disable port */
return 0;
/* port is not bridged, so simulate going to DISABLED state */
- return rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
+ return rocker_port_stp_update(rocker_port, trans, flags,
+ BR_STATE_DISABLED);
}
static struct rocker_internal_vlan_tbl_entry *
-rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
+rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
{
struct rocker_internal_vlan_tbl_entry *found;
@@ -3731,8 +3872,9 @@ found:
return found->vlan_id;
}
-static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port,
- int ifindex)
+static void
+rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
+ int ifindex)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_internal_vlan_tbl_entry *found;
@@ -3760,11 +3902,12 @@ not_found:
spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
}
-static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
- int dst_len, struct fib_info *fi, u32 tb_id,
- int flags)
+static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, __be32 dst,
+ int dst_len, const struct fib_info *fi,
+ u32 tb_id, int flags)
{
- struct fib_nh *nh;
+ const struct fib_nh *nh;
__be16 eth_type = htons(ETH_P_IP);
__be32 dst_mask = inet_make_mask(dst_len);
__be16 internal_vlan_id = rocker_port->internal_vlan_id;
@@ -3784,7 +3927,7 @@ static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
has_gw = !!nh->nh_gw;
if (has_gw && nh_on_port) {
- err = rocker_port_ipv4_nh(rocker_port, flags,
+ err = rocker_port_ipv4_nh(rocker_port, trans, flags,
nh->nh_gw, &index);
if (err)
return err;
@@ -3795,7 +3938,7 @@ static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
}
- err = rocker_flow_tbl_ucast4_routing(rocker_port, eth_type, dst,
+ err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
dst_mask, priority, goto_tbl,
group_id, flags);
if (err)
@@ -3834,7 +3977,7 @@ static int rocker_port_open(struct net_device *dev)
goto err_request_rx_irq;
}
- err = rocker_port_fwd_enable(rocker_port);
+ err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
if (err)
goto err_fwd_enable;
@@ -3861,7 +4004,8 @@ static int rocker_port_stop(struct net_device *dev)
rocker_port_set_enable(rocker_port, false);
napi_disable(&rocker_port->napi_rx);
napi_disable(&rocker_port->napi_tx);
- rocker_port_fwd_disable(rocker_port);
+ rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE,
+ ROCKER_OP_FLAG_NOWAIT);
free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
rocker_port_dma_rings_fini(rocker_port);
@@ -3869,12 +4013,12 @@ static int rocker_port_stop(struct net_device *dev)
return 0;
}
-static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info)
+static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info)
{
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
struct pci_dev *pdev = rocker->pdev;
- struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
struct rocker_tlv *attr;
int rem;
@@ -3882,7 +4026,7 @@ static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
if (!attrs[ROCKER_TLV_TX_FRAGS])
return;
rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
- struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
+ const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
dma_addr_t dma_handle;
size_t len;
@@ -3899,11 +4043,11 @@ static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
}
}
-static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port,
+static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
char *buf, size_t buf_len)
{
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
struct pci_dev *pdev = rocker->pdev;
dma_addr_t dma_handle;
struct rocker_tlv *frag;
@@ -4008,269 +4152,359 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static int rocker_port_vlan_rx_add_vid(struct net_device *dev,
- __be16 proto, u16 vid)
+static int rocker_port_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
{
struct rocker_port *rocker_port = netdev_priv(dev);
+ struct port_name name = { .buf = buf, .len = len };
int err;
- err = rocker_port_vlan(rocker_port, 0, vid);
- if (err)
- return err;
+ err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+ rocker_cmd_get_port_settings_prep, NULL,
+ rocker_cmd_get_port_settings_phys_name_proc,
+ &name);
- return rocker_port_router_mac(rocker_port, 0, htons(vid));
+ return err ? -EOPNOTSUPP : 0;
}
-static int rocker_port_vlan_rx_kill_vid(struct net_device *dev,
- __be16 proto, u16 vid)
+static const struct net_device_ops rocker_port_netdev_ops = {
+ .ndo_open = rocker_port_open,
+ .ndo_stop = rocker_port_stop,
+ .ndo_start_xmit = rocker_port_xmit,
+ .ndo_set_mac_address = rocker_port_set_mac_address,
+ .ndo_bridge_getlink = switchdev_port_bridge_getlink,
+ .ndo_bridge_setlink = switchdev_port_bridge_setlink,
+ .ndo_bridge_dellink = switchdev_port_bridge_dellink,
+ .ndo_fdb_add = switchdev_port_fdb_add,
+ .ndo_fdb_del = switchdev_port_fdb_del,
+ .ndo_fdb_dump = switchdev_port_fdb_dump,
+ .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
+};
+
+/********************
+ * swdev interface
+ ********************/
+
+static int rocker_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- int err;
+ const struct rocker_port *rocker_port = netdev_priv(dev);
+ const struct rocker *rocker = rocker_port->rocker;
- err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE,
- htons(vid));
- if (err)
- return err;
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(rocker->hw.id);
+ memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
+ break;
+ case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
+ attr->u.brport_flags = rocker_port->brport_flags;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
- return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid);
+ return 0;
}
-static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid,
- u16 nlm_flags)
+static void rocker_port_trans_abort(const struct rocker_port *rocker_port)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
- int flags = 0;
-
- if (!rocker_port_is_bridged(rocker_port))
- return -EINVAL;
+ struct list_head *mem, *tmp;
- return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+ list_for_each_safe(mem, tmp, &rocker_port->trans_mem) {
+ list_del(mem);
+ kfree(mem);
+ }
}
-static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid)
+static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
+ unsigned long brport_flags)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
- int flags = ROCKER_OP_FLAG_REMOVE;
+ unsigned long orig_flags;
+ int err = 0;
- if (!rocker_port_is_bridged(rocker_port))
- return -EINVAL;
+ orig_flags = rocker_port->brport_flags;
+ rocker_port->brport_flags = brport_flags;
+ if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
+ err = rocker_port_set_learning(rocker_port, trans);
- return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ rocker_port->brport_flags = orig_flags;
+
+ return err;
}
-static int rocker_fdb_fill_info(struct sk_buff *skb,
- struct rocker_port *rocker_port,
- const unsigned char *addr, u16 vid,
- u32 portid, u32 seq, int type,
- unsigned int flags)
+static int rocker_port_attr_set(struct net_device *dev,
+ struct switchdev_attr *attr)
{
- struct nlmsghdr *nlh;
- struct ndmsg *ndm;
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int err = 0;
- nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
- if (!nlh)
- return -EMSGSIZE;
+ switch (attr->trans) {
+ case SWITCHDEV_TRANS_PREPARE:
+ BUG_ON(!list_empty(&rocker_port->trans_mem));
+ break;
+ case SWITCHDEV_TRANS_ABORT:
+ rocker_port_trans_abort(rocker_port);
+ return 0;
+ default:
+ break;
+ }
- ndm = nlmsg_data(nlh);
- ndm->ndm_family = AF_BRIDGE;
- ndm->ndm_pad1 = 0;
- ndm->ndm_pad2 = 0;
- ndm->ndm_flags = NTF_SELF;
- ndm->ndm_type = 0;
- ndm->ndm_ifindex = rocker_port->dev->ifindex;
- ndm->ndm_state = NUD_REACHABLE;
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_PORT_STP_STATE:
+ err = rocker_port_stp_update(rocker_port, attr->trans,
+ ROCKER_OP_FLAG_NOWAIT,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
+ err = rocker_port_brport_flags_set(rocker_port, attr->trans,
+ attr->u.brport_flags);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
- if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
- goto nla_put_failure;
+ return err;
+}
- if (vid && nla_put_u16(skb, NDA_VLAN, vid))
- goto nla_put_failure;
+static int rocker_port_vlan_add(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, u16 vid, u16 flags)
+{
+ int err;
- nlmsg_end(skb, nlh);
- return 0;
+ /* XXX deal with flags for PVID and untagged */
-nla_put_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
+ err = rocker_port_vlan(rocker_port, trans, 0, vid);
+ if (err)
+ return err;
+
+ err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
+ if (err)
+ rocker_port_vlan(rocker_port, trans,
+ ROCKER_OP_FLAG_REMOVE, vid);
+
+ return err;
}
-static int rocker_port_fdb_dump(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct net_device *dev,
- struct net_device *filter_dev,
- int idx)
+static int rocker_port_vlans_add(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
+ const struct switchdev_obj_vlan *vlan)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_fdb_tbl_entry *found;
- struct hlist_node *tmp;
- int bkt;
- unsigned long lock_flags;
- const unsigned char *addr;
u16 vid;
int err;
- spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
- hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
- if (found->key.pport != rocker_port->pport)
- continue;
- if (idx < cb->args[0])
- goto skip;
- addr = found->key.addr;
- vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id);
- err = rocker_fdb_fill_info(skb, rocker_port, addr, vid,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH, NLM_F_MULTI);
- if (err < 0)
- break;
-skip:
- ++idx;
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ err = rocker_port_vlan_add(rocker_port, trans,
+ vid, vlan->flags);
+ if (err)
+ return err;
}
- spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
- return idx;
+
+ return 0;
}
-static int rocker_port_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags)
+static int rocker_port_fdb_add(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
+ const struct switchdev_obj_fdb *fdb)
+{
+ __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
+ int flags = 0;
+
+ if (!rocker_port_is_bridged(rocker_port))
+ return -EINVAL;
+
+ return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
+}
+
+static int rocker_port_obj_add(struct net_device *dev,
+ struct switchdev_obj *obj)
{
struct rocker_port *rocker_port = netdev_priv(dev);
- struct nlattr *protinfo;
- struct nlattr *attr;
- int err;
+ const struct switchdev_obj_ipv4_fib *fib4;
+ int err = 0;
- protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
- IFLA_PROTINFO);
- if (protinfo) {
- attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING);
- if (attr) {
- if (nla_len(attr) < sizeof(u8))
- return -EINVAL;
-
- if (nla_get_u8(attr))
- rocker_port->brport_flags |= BR_LEARNING;
- else
- rocker_port->brport_flags &= ~BR_LEARNING;
- err = rocker_port_set_learning(rocker_port);
- if (err)
- return err;
- }
- attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING_SYNC);
- if (attr) {
- if (nla_len(attr) < sizeof(u8))
- return -EINVAL;
-
- if (nla_get_u8(attr))
- rocker_port->brport_flags |= BR_LEARNING_SYNC;
- else
- rocker_port->brport_flags &= ~BR_LEARNING_SYNC;
- }
+ switch (obj->trans) {
+ case SWITCHDEV_TRANS_PREPARE:
+ BUG_ON(!list_empty(&rocker_port->trans_mem));
+ break;
+ case SWITCHDEV_TRANS_ABORT:
+ rocker_port_trans_abort(rocker_port);
+ return 0;
+ default:
+ break;
}
- return 0;
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_PORT_VLAN:
+ err = rocker_port_vlans_add(rocker_port, obj->trans,
+ &obj->u.vlan);
+ break;
+ case SWITCHDEV_OBJ_IPV4_FIB:
+ fib4 = &obj->u.ipv4_fib;
+ err = rocker_port_fib_ipv4(rocker_port, obj->trans,
+ htonl(fib4->dst), fib4->dst_len,
+ fib4->fi, fib4->tb_id, 0);
+ break;
+ case SWITCHDEV_OBJ_PORT_FDB:
+ err = rocker_port_fdb_add(rocker_port, obj->trans, &obj->u.fdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
}
-static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev,
- u32 filter_mask, int nlflags)
+static int rocker_port_vlan_del(struct rocker_port *rocker_port,
+ u16 vid, u16 flags)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- u16 mode = BRIDGE_MODE_UNDEF;
- u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
+ int err;
- return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
- rocker_port->brport_flags, mask,
- nlflags);
+ err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
+ ROCKER_OP_FLAG_REMOVE, htons(vid));
+ if (err)
+ return err;
+
+ return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
+ ROCKER_OP_FLAG_REMOVE, vid);
}
-static int rocker_port_get_phys_port_name(struct net_device *dev,
- char *buf, size_t len)
+static int rocker_port_vlans_del(struct rocker_port *rocker_port,
+ const struct switchdev_obj_vlan *vlan)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- struct port_name name = { .buf = buf, .len = len };
+ u16 vid;
int err;
- err = rocker_cmd_exec(rocker_port->rocker, rocker_port,
- rocker_cmd_get_port_settings_prep, NULL,
- rocker_cmd_get_port_settings_phys_name_proc,
- &name, false);
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
+ if (err)
+ return err;
+ }
- return err ? -EOPNOTSUPP : 0;
+ return 0;
}
-static const struct net_device_ops rocker_port_netdev_ops = {
- .ndo_open = rocker_port_open,
- .ndo_stop = rocker_port_stop,
- .ndo_start_xmit = rocker_port_xmit,
- .ndo_set_mac_address = rocker_port_set_mac_address,
- .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid,
- .ndo_fdb_add = rocker_port_fdb_add,
- .ndo_fdb_del = rocker_port_fdb_del,
- .ndo_fdb_dump = rocker_port_fdb_dump,
- .ndo_bridge_setlink = rocker_port_bridge_setlink,
- .ndo_bridge_getlink = rocker_port_bridge_getlink,
- .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
-};
+static int rocker_port_fdb_del(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
+ const struct switchdev_obj_fdb *fdb)
+{
+ __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
+ int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
-/********************
- * swdev interface
- ********************/
+ if (!rocker_port_is_bridged(rocker_port))
+ return -EINVAL;
+
+ return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
+}
-static int rocker_port_swdev_parent_id_get(struct net_device *dev,
- struct netdev_phys_item_id *psid)
+static int rocker_port_obj_del(struct net_device *dev,
+ struct switchdev_obj *obj)
{
struct rocker_port *rocker_port = netdev_priv(dev);
- struct rocker *rocker = rocker_port->rocker;
+ const struct switchdev_obj_ipv4_fib *fib4;
+ int err = 0;
- psid->id_len = sizeof(rocker->hw.id);
- memcpy(&psid->id, &rocker->hw.id, psid->id_len);
- return 0;
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_PORT_VLAN:
+ err = rocker_port_vlans_del(rocker_port, &obj->u.vlan);
+ break;
+ case SWITCHDEV_OBJ_IPV4_FIB:
+ fib4 = &obj->u.ipv4_fib;
+ err = rocker_port_fib_ipv4(rocker_port, SWITCHDEV_TRANS_NONE,
+ htonl(fib4->dst), fib4->dst_len,
+ fib4->fi, fib4->tb_id,
+ ROCKER_OP_FLAG_REMOVE);
+ break;
+ case SWITCHDEV_OBJ_PORT_FDB:
+ err = rocker_port_fdb_del(rocker_port, obj->trans, &obj->u.fdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
}
-static int rocker_port_swdev_port_stp_update(struct net_device *dev, u8 state)
+static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
+ struct switchdev_obj *obj)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
+ struct rocker *rocker = rocker_port->rocker;
+ struct switchdev_obj_fdb *fdb = &obj->u.fdb;
+ struct rocker_fdb_tbl_entry *found;
+ struct hlist_node *tmp;
+ unsigned long lock_flags;
+ int bkt;
+ int err = 0;
- return rocker_port_stp_update(rocker_port, state);
+ spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
+ hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
+ if (found->key.pport != rocker_port->pport)
+ continue;
+ fdb->addr = found->key.addr;
+ fdb->vid = rocker_port_vlan_to_vid(rocker_port,
+ found->key.vlan_id);
+ err = obj->cb(rocker_port->dev, obj);
+ if (err)
+ break;
+ }
+ spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
+
+ return err;
}
-static int rocker_port_swdev_fib_ipv4_add(struct net_device *dev,
- __be32 dst, int dst_len,
- struct fib_info *fi,
- u8 tos, u8 type,
- u32 nlflags, u32 tb_id)
+static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
+ struct switchdev_obj *obj)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- int flags = 0;
+ struct switchdev_obj_vlan *vlan = &obj->u.vlan;
+ u16 vid;
+ int err = 0;
- return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
- fi, tb_id, flags);
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ if (!test_bit(vid, rocker_port->vlan_bitmap))
+ continue;
+ vlan->flags = 0;
+ if (rocker_vlan_id_is_internal(htons(vid)))
+ vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+ vlan->vid_begin = vlan->vid_end = vid;
+ err = obj->cb(rocker_port->dev, obj);
+ if (err)
+ break;
+ }
+
+ return err;
}
-static int rocker_port_swdev_fib_ipv4_del(struct net_device *dev,
- __be32 dst, int dst_len,
- struct fib_info *fi,
- u8 tos, u8 type, u32 tb_id)
+static int rocker_port_obj_dump(struct net_device *dev,
+ struct switchdev_obj *obj)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- int flags = ROCKER_OP_FLAG_REMOVE;
+ const struct rocker_port *rocker_port = netdev_priv(dev);
+ int err = 0;
- return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
- fi, tb_id, flags);
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_PORT_FDB:
+ err = rocker_port_fdb_dump(rocker_port, obj);
+ break;
+ case SWITCHDEV_OBJ_PORT_VLAN:
+ err = rocker_port_vlan_dump(rocker_port, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
}
-static const struct swdev_ops rocker_port_swdev_ops = {
- .swdev_parent_id_get = rocker_port_swdev_parent_id_get,
- .swdev_port_stp_update = rocker_port_swdev_port_stp_update,
- .swdev_fib_ipv4_add = rocker_port_swdev_fib_ipv4_add,
- .swdev_fib_ipv4_del = rocker_port_swdev_fib_ipv4_del,
+static const struct switchdev_ops rocker_port_switchdev_ops = {
+ .switchdev_port_attr_get = rocker_port_attr_get,
+ .switchdev_port_attr_set = rocker_port_attr_set,
+ .switchdev_port_obj_add = rocker_port_obj_add,
+ .switchdev_port_obj_del = rocker_port_obj_del,
+ .switchdev_port_obj_dump = rocker_port_obj_dump,
};
/********************
@@ -4334,8 +4568,7 @@ static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
}
static int
-rocker_cmd_get_port_stats_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -4359,14 +4592,13 @@ rocker_cmd_get_port_stats_prep(struct rocker *rocker,
}
static int
-rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
void *priv)
{
- struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
- struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
- struct rocker_tlv *pattr;
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
+ const struct rocker_tlv *pattr;
u32 pport;
u64 *data = priv;
int i;
@@ -4400,10 +4632,10 @@ rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
void *priv)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
rocker_cmd_get_port_stats_prep, NULL,
rocker_cmd_get_port_stats_ethtool_proc,
- priv, false);
+ priv);
}
static void rocker_port_get_stats(struct net_device *dev,
@@ -4417,8 +4649,6 @@ static void rocker_port_get_stats(struct net_device *dev,
for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
data[i] = 0;
}
-
- return;
}
static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
@@ -4453,8 +4683,8 @@ static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
{
struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_desc_info *desc_info;
+ const struct rocker *rocker = rocker_port->rocker;
+ const struct rocker_desc_info *desc_info;
u32 credits = 0;
int err;
@@ -4472,8 +4702,9 @@ static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
if (err == 0) {
rocker_port->dev->stats.tx_packets++;
rocker_port->dev->stats.tx_bytes += skb->len;
- } else
+ } else {
rocker_port->dev->stats.tx_errors++;
+ }
dev_kfree_skb_any(skb);
credits++;
@@ -4488,11 +4719,11 @@ static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
-static int rocker_port_rx_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_port_rx_proc(const struct rocker *rocker,
+ const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
size_t rx_len;
@@ -4514,7 +4745,7 @@ static int rocker_port_rx_proc(struct rocker *rocker,
netif_receive_skb(skb);
- return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
+ return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
}
static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
@@ -4525,7 +4756,7 @@ static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
{
struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
struct rocker_desc_info *desc_info;
u32 credits = 0;
int err;
@@ -4565,9 +4796,9 @@ static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
* PCI driver ops
*****************/
-static void rocker_carrier_init(struct rocker_port *rocker_port)
+static void rocker_carrier_init(const struct rocker_port *rocker_port)
{
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
bool link_up;
@@ -4578,23 +4809,27 @@ static void rocker_carrier_init(struct rocker_port *rocker_port)
netif_carrier_off(rocker_port->dev);
}
-static void rocker_remove_ports(struct rocker *rocker)
+static void rocker_remove_ports(const struct rocker *rocker)
{
struct rocker_port *rocker_port;
int i;
for (i = 0; i < rocker->port_count; i++) {
rocker_port = rocker->ports[i];
- rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
+ if (!rocker_port)
+ continue;
+ rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
+ ROCKER_OP_FLAG_REMOVE);
unregister_netdev(rocker_port->dev);
+ free_netdev(rocker_port->dev);
}
kfree(rocker->ports);
}
-static void rocker_port_dev_addr_init(struct rocker *rocker,
- struct rocker_port *rocker_port)
+static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct rocker *rocker = rocker_port->rocker;
+ const struct pci_dev *pdev = rocker->pdev;
int err;
err = rocker_cmd_get_port_settings_macaddr(rocker_port,
@@ -4607,9 +4842,10 @@ static void rocker_port_dev_addr_init(struct rocker *rocker,
static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
struct rocker_port *rocker_port;
struct net_device *dev;
+ u16 untagged_vid = 0;
int err;
dev = alloc_etherdev(sizeof(struct rocker_port));
@@ -4621,20 +4857,19 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
rocker_port->port_number = port_number;
rocker_port->pport = port_number + 1;
rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
+ INIT_LIST_HEAD(&rocker_port->trans_mem);
- rocker_port_dev_addr_init(rocker, rocker_port);
+ rocker_port_dev_addr_init(rocker_port);
dev->netdev_ops = &rocker_port_netdev_ops;
dev->ethtool_ops = &rocker_port_ethtool_ops;
- dev->swdev_ops = &rocker_port_swdev_ops;
+ dev->switchdev_ops = &rocker_port_switchdev_ops;
netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
NAPI_POLL_WEIGHT);
netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
NAPI_POLL_WEIGHT);
rocker_carrier_init(rocker_port);
- dev->features |= NETIF_F_NETNS_LOCAL |
- NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_SWITCH_OFFLOAD;
+ dev->features |= NETIF_F_NETNS_LOCAL;
err = register_netdev(dev);
if (err) {
@@ -4643,18 +4878,29 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
}
rocker->ports[port_number] = rocker_port;
- rocker_port_set_learning(rocker_port);
+ rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
- rocker_port->internal_vlan_id =
- rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
- err = rocker_port_ig_tbl(rocker_port, 0);
+ err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
if (err) {
dev_err(&pdev->dev, "install ig port table failed\n");
goto err_port_ig_tbl;
}
+ rocker_port->internal_vlan_id =
+ rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
+
+ err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+ untagged_vid, 0);
+ if (err) {
+ netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
+ goto err_untagged_vlan;
+ }
+
return 0;
+err_untagged_vlan:
+ rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
+ ROCKER_OP_FLAG_REMOVE);
err_port_ig_tbl:
unregister_netdev(dev);
err_register_netdev:
@@ -4669,7 +4915,7 @@ static int rocker_probe_ports(struct rocker *rocker)
int err;
alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
- rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+ rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
if (!rocker->ports)
return -ENOMEM;
for (i = 0; i < rocker->port_count; i++) {
@@ -4718,7 +4964,7 @@ err_enable_msix:
return err;
}
-static void rocker_msix_fini(struct rocker *rocker)
+static void rocker_msix_fini(const struct rocker *rocker)
{
pci_disable_msix(rocker->pdev);
kfree(rocker->msix_entries);
@@ -4884,7 +5130,7 @@ static struct pci_driver rocker_pci_driver = {
* Net device notifier event handler
************************************/
-static bool rocker_port_dev_check(struct net_device *dev)
+static bool rocker_port_dev_check(const struct net_device *dev)
{
return dev->netdev_ops == &rocker_port_netdev_ops;
}
@@ -4892,45 +5138,55 @@ static bool rocker_port_dev_check(struct net_device *dev)
static int rocker_port_bridge_join(struct rocker_port *rocker_port,
struct net_device *bridge)
{
+ u16 untagged_vid = 0;
int err;
+ /* Port is joining bridge, so the internal VLAN for the
+ * port is going to change to the bridge internal VLAN.
+ * Let's remove untagged VLAN (vid=0) from port and
+ * re-add once internal VLAN has changed.
+ */
+
+ err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
+ if (err)
+ return err;
+
rocker_port_internal_vlan_id_put(rocker_port,
rocker_port->dev->ifindex);
+ rocker_port->internal_vlan_id =
+ rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
rocker_port->bridge_dev = bridge;
- /* Use bridge internal VLAN ID for untagged pkts */
- err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
- if (err)
- return err;
- rocker_port->internal_vlan_id =
- rocker_port_internal_vlan_id_get(rocker_port,
- bridge->ifindex);
- return rocker_port_vlan(rocker_port, 0, 0);
+ return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+ untagged_vid, 0);
}
static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
{
+ u16 untagged_vid = 0;
int err;
- rocker_port_internal_vlan_id_put(rocker_port,
- rocker_port->bridge_dev->ifindex);
-
- rocker_port->bridge_dev = NULL;
-
- /* Use port internal VLAN ID for untagged pkts */
- err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
+ err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
if (err)
return err;
+
+ rocker_port_internal_vlan_id_put(rocker_port,
+ rocker_port->bridge_dev->ifindex);
rocker_port->internal_vlan_id =
rocker_port_internal_vlan_id_get(rocker_port,
rocker_port->dev->ifindex);
- err = rocker_port_vlan(rocker_port, 0, 0);
+
+ rocker_port->bridge_dev = NULL;
+
+ err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+ untagged_vid, 0);
if (err)
return err;
if (rocker_port->dev->flags & IFF_UP)
- err = rocker_port_fwd_enable(rocker_port);
+ err = rocker_port_fwd_enable(rocker_port,
+ SWITCHDEV_TRANS_NONE, 0);
return err;
}
@@ -4989,10 +5245,12 @@ static struct notifier_block rocker_netdevice_nb __read_mostly = {
static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
{
struct rocker_port *rocker_port = netdev_priv(dev);
- int flags = (n->nud_state & NUD_VALID) ? 0 : ROCKER_OP_FLAG_REMOVE;
+ int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
+ ROCKER_OP_FLAG_NOWAIT;
__be32 ip_addr = *(__be32 *)n->primary_key;
- return rocker_port_ipv4_neigh(rocker_port, flags, ip_addr, n->ha);
+ return rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
+ flags, ip_addr, n->ha);
}
static int rocker_netevent_event(struct notifier_block *unused,
@@ -5040,7 +5298,7 @@ static int __init rocker_module_init(void)
return 0;
err_pci_register_driver:
- unregister_netdevice_notifier(&rocker_netevent_nb);
+ unregister_netevent_notifier(&rocker_netevent_nb);
unregister_netdevice_notifier(&rocker_netdevice_nb);
return err;
}
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index a4e9591d7..c61fbf968 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -65,9 +65,9 @@ enum {
#define ROCKER_TEST_DMA_CTRL 0x0034
/* Rocker test register ctrl */
-#define ROCKER_TEST_DMA_CTRL_CLEAR (1 << 0)
-#define ROCKER_TEST_DMA_CTRL_FILL (1 << 1)
-#define ROCKER_TEST_DMA_CTRL_INVERT (1 << 2)
+#define ROCKER_TEST_DMA_CTRL_CLEAR BIT(0)
+#define ROCKER_TEST_DMA_CTRL_FILL BIT(1)
+#define ROCKER_TEST_DMA_CTRL_INVERT BIT(2)
/* Rocker DMA ring register offsets */
#define ROCKER_DMA_DESC_ADDR(x) (0x1000 + (x) * 32) /* 8-byte */
@@ -79,7 +79,7 @@ enum {
#define ROCKER_DMA_DESC_RES1(x) (0x101c + (x) * 32)
/* Rocker dma ctrl register bits */
-#define ROCKER_DMA_DESC_CTRL_RESET (1 << 0)
+#define ROCKER_DMA_DESC_CTRL_RESET BIT(0)
/* Rocker DMA ring types */
enum rocker_dma_type {
@@ -111,7 +111,7 @@ struct rocker_desc {
u16 comp_err;
};
-#define ROCKER_DMA_DESC_COMP_ERR_GEN (1 << 15)
+#define ROCKER_DMA_DESC_COMP_ERR_GEN BIT(15)
/* Rocker DMA TLV struct */
struct rocker_tlv {
@@ -237,14 +237,14 @@ enum {
ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1,
};
-#define ROCKER_RX_FLAGS_IPV4 (1 << 0)
-#define ROCKER_RX_FLAGS_IPV6 (1 << 1)
-#define ROCKER_RX_FLAGS_CSUM_CALC (1 << 2)
-#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD (1 << 3)
-#define ROCKER_RX_FLAGS_IP_FRAG (1 << 4)
-#define ROCKER_RX_FLAGS_TCP (1 << 5)
-#define ROCKER_RX_FLAGS_UDP (1 << 6)
-#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD (1 << 7)
+#define ROCKER_RX_FLAGS_IPV4 BIT(0)
+#define ROCKER_RX_FLAGS_IPV6 BIT(1)
+#define ROCKER_RX_FLAGS_CSUM_CALC BIT(2)
+#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD BIT(3)
+#define ROCKER_RX_FLAGS_IP_FRAG BIT(4)
+#define ROCKER_RX_FLAGS_TCP BIT(5)
+#define ROCKER_RX_FLAGS_UDP BIT(6)
+#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD BIT(7)
enum {
ROCKER_TLV_TX_UNSPEC,
@@ -460,6 +460,6 @@ enum rocker_of_dpa_overlay_type {
#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */
/* Rocker control bits */
-#define ROCKER_CONTROL_RESET (1 << 0)
+#define ROCKER_CONTROL_RESET BIT(0)
#endif
diff --git a/drivers/net/ethernet/seeq/Kconfig b/drivers/net/ethernet/seeq/Kconfig
index 11f168e46..69c62d892 100644
--- a/drivers/net/ethernet/seeq/Kconfig
+++ b/drivers/net/ethernet/seeq/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_SEEQ
default y
depends on HAS_IOMEM
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 088921294..4dd92b7b8 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -36,3 +36,12 @@ config SFC_SRIOV
This enables support for the SFC9000 I/O Virtualization
features, allowing accelerated network performance in
virtualized environments.
+config SFC_MCDI_LOGGING
+ bool "Solarflare SFC9000/SFC9100-family MCDI logging support"
+ depends on SFC
+ default y
+ ---help---
+ This enables support for tracing of MCDI (Management-Controller-to-
+ Driver-Interface) commands and responses, allowing debugging of
+ driver/firmware interaction. The tracing is actually enabled by
+ a sysfs file 'mcdi_logging' under the PCI device.
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 3a83c0dca..ce8470fe7 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -3,6 +3,6 @@ sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
tenxpress.o txc43128_phy.o falcon_boards.o \
mcdi.o mcdi_port.o mcdi_mon.o ptp.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
-sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
+sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index fbb6cfa0f..b1a4ea21c 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -15,6 +15,7 @@
#include "nic.h"
#include "workarounds.h"
#include "selftest.h"
+#include "ef10_sriov.h"
#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/wait.h>
@@ -30,6 +31,9 @@ enum {
/* The reserved RSS context value */
#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
+/* The maximum size of a shared RSS context */
+/* TODO: this should really be from the mcdi protocol export */
+#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
/* The filter table(s) are managed by firmware and we have write-only
* access. When removing filters we must identify them to the
@@ -77,7 +81,6 @@ struct efx_ef10_filter_table {
/* An arbitrary search limit for the software hash table */
#define EFX_EF10_FILTER_SEARCH_LIMIT 200
-static void efx_ef10_rx_push_rss_config(struct efx_nic *efx);
static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
static void efx_ef10_filter_table_remove(struct efx_nic *efx);
@@ -92,9 +95,55 @@ static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
{
- return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
+ int bar;
+
+ bar = efx->type->mem_bar;
+ return resource_size(&efx->pci_dev->resource[bar]);
+}
+
+static bool efx_ef10_is_vf(struct efx_nic *efx)
+{
+ return efx->type->is_vf;
}
+static int efx_ef10_get_pf_index(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
+ sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+
+ nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
+ return 0;
+}
+
+#ifdef CONFIG_SFC_SRIOV
+static int efx_ef10_get_vf_index(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
+ sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+
+ nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
+ return 0;
+}
+#endif
+
static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
@@ -117,6 +166,13 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
nic_data->datapath_caps =
MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
+ /* record the DPCPU firmware IDs to determine VEB vswitching support.
+ */
+ nic_data->rx_dpcpu_fw_id =
+ MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
+ nic_data->tx_dpcpu_fw_id =
+ MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
+
if (!(nic_data->datapath_caps &
(1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
netif_err(efx, drv, efx->net_dev,
@@ -147,7 +203,7 @@ static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
return rc > 0 ? rc : -ERANGE;
}
-static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
+static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
size_t outlen;
@@ -167,9 +223,66 @@ static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
return 0;
}
+static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
+ size_t outlen;
+ int num_addrs, rc;
+
+ MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
+ return -EIO;
+
+ num_addrs = MCDI_DWORD(outbuf,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
+
+ WARN_ON(num_addrs != 1);
+
+ ether_addr_copy(mac_address,
+ MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
+
+ return 0;
+}
+
+static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ return sprintf(buf, "%d\n",
+ ((efx->mcdi->fn_flags) &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
+ ? 1 : 0);
+}
+
+static ssize_t efx_ef10_show_primary_flag(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ return sprintf(buf, "%d\n",
+ ((efx->mcdi->fn_flags) &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
+ ? 1 : 0);
+}
+
+static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
+ NULL);
+static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
+
static int efx_ef10_probe(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data;
+ struct net_device *net_dev = efx->net_dev;
int i, rc;
/* We can have one VI for each 8K region. However, until we
@@ -178,7 +291,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
efx->max_channels =
min_t(unsigned int,
EFX_MAX_CHANNELS,
- resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
+ efx_ef10_mem_map_size(efx) /
(EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
if (WARN_ON(efx->max_channels == 0))
return -EIO;
@@ -188,6 +301,9 @@ static int efx_ef10_probe(struct efx_nic *efx)
return -ENOMEM;
efx->nic_data = nic_data;
+ /* we assume later that we can copy from this buffer in dwords */
+ BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
+
rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
if (rc)
@@ -209,6 +325,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
/* In case we're recovering from a crash (kexec), we want to
* cancel any outstanding request by the previous user of this
* function. We send a special message using the least
@@ -230,45 +348,85 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail3;
+ rc = device_create_file(&efx->pci_dev->dev,
+ &dev_attr_link_control_flag);
+ if (rc)
+ goto fail3;
+
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+ if (rc)
+ goto fail4;
+
+ rc = efx_ef10_get_pf_index(efx);
+ if (rc)
+ goto fail5;
+
rc = efx_ef10_init_datapath_caps(efx);
if (rc < 0)
- goto fail3;
+ goto fail5;
efx->rx_packet_len_offset =
ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
rc = efx_mcdi_port_get_number(efx);
if (rc < 0)
- goto fail3;
+ goto fail5;
efx->port_num = rc;
+ net_dev->dev_port = rc;
- rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
+ rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
if (rc)
- goto fail3;
+ goto fail5;
rc = efx_ef10_get_sysclk_freq(efx);
if (rc < 0)
- goto fail3;
+ goto fail5;
efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
- /* Check whether firmware supports bug 35388 workaround */
+ /* Check whether firmware supports bug 35388 workaround.
+ * First try to enable it, then if we get EPERM, just
+ * ask if it's already enabled
+ */
rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
- if (rc == 0)
+ if (rc == 0) {
nic_data->workaround_35388 = true;
- else if (rc != -ENOSYS && rc != -ENOENT)
- goto fail3;
+ } else if (rc == -EPERM) {
+ unsigned int enabled;
+
+ rc = efx_mcdi_get_workarounds(efx, NULL, &enabled);
+ if (rc)
+ goto fail3;
+ nic_data->workaround_35388 = enabled &
+ MC_CMD_GET_WORKAROUNDS_OUT_BUG35388;
+ } else if (rc != -ENOSYS && rc != -ENOENT) {
+ goto fail5;
+ }
netif_dbg(efx, probe, efx->net_dev,
"workaround for bug 35388 is %sabled\n",
nic_data->workaround_35388 ? "en" : "dis");
rc = efx_mcdi_mon_probe(efx);
- if (rc)
- goto fail3;
+ if (rc && rc != -EPERM)
+ goto fail5;
efx_ptp_probe(efx, NULL);
+#ifdef CONFIG_SFC_SRIOV
+ if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
+ struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+ struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+
+ efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
+ } else
+#endif
+ ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
+
return 0;
+fail5:
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+fail4:
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
fail3:
efx_mcdi_fini(efx);
fail2:
@@ -281,7 +439,7 @@ fail1:
static int efx_ef10_free_vis(struct efx_nic *efx)
{
- MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ MCDI_DECLARE_BUF_ERR(outbuf);
size_t outlen;
int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
@@ -352,9 +510,9 @@ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
static int efx_ef10_link_piobufs(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- MCDI_DECLARE_BUF(inbuf,
- max(MC_CMD_LINK_PIOBUF_IN_LEN,
- MC_CMD_UNLINK_PIOBUF_IN_LEN));
+ _MCDI_DECLARE_BUF(inbuf,
+ max(MC_CMD_LINK_PIOBUF_IN_LEN,
+ MC_CMD_UNLINK_PIOBUF_IN_LEN));
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
unsigned int offset, index;
@@ -363,6 +521,8 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
+ memset(inbuf, 0, sizeof(inbuf));
+
/* Link a buffer to each VI in the write-combining mapping */
for (index = 0; index < nic_data->n_piobufs; ++index) {
MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
@@ -475,6 +635,25 @@ static void efx_ef10_remove(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
+#ifdef CONFIG_SFC_SRIOV
+ struct efx_ef10_nic_data *nic_data_pf;
+ struct pci_dev *pci_dev_pf;
+ struct efx_nic *efx_pf;
+ struct ef10_vf *vf;
+
+ if (efx->pci_dev->is_virtfn) {
+ pci_dev_pf = efx->pci_dev->physfn;
+ if (pci_dev_pf) {
+ efx_pf = pci_get_drvdata(pci_dev_pf);
+ nic_data_pf = efx_pf->nic_data;
+ vf = nic_data_pf->vf + nic_data->vf_index;
+ vf->efx = NULL;
+ } else
+ netif_info(efx, drv, efx->net_dev,
+ "Could not get the PF id from VF\n");
+ }
+#endif
+
efx_ptp_remove(efx);
efx_mcdi_mon_remove(efx);
@@ -490,11 +669,120 @@ static void efx_ef10_remove(struct efx_nic *efx)
if (!nic_data->must_restore_piobufs)
efx_ef10_free_piobufs(efx);
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
+
efx_mcdi_fini(efx);
efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
kfree(nic_data);
}
+static int efx_ef10_probe_pf(struct efx_nic *efx)
+{
+ return efx_ef10_probe(efx);
+}
+
+int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+ return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+int efx_ef10_vport_add_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+}
+
+int efx_ef10_vport_del_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+}
+
+#ifdef CONFIG_SFC_SRIOV
+static int efx_ef10_probe_vf(struct efx_nic *efx)
+{
+ int rc;
+ struct pci_dev *pci_dev_pf;
+
+ /* If the parent PF has no VF data structure, it doesn't know about this
+ * VF so fail probe. The VF needs to be re-created. This can happen
+ * if the PF driver is unloaded while the VF is assigned to a guest.
+ */
+ pci_dev_pf = efx->pci_dev->physfn;
+ if (pci_dev_pf) {
+ struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+ struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
+
+ if (!nic_data_pf->vf) {
+ netif_info(efx, drv, efx->net_dev,
+ "The VF cannot link to its parent PF; "
+ "please destroy and re-create the VF\n");
+ return -EBUSY;
+ }
+ }
+
+ rc = efx_ef10_probe(efx);
+ if (rc)
+ return rc;
+
+ rc = efx_ef10_get_vf_index(efx);
+ if (rc)
+ goto fail;
+
+ if (efx->pci_dev->is_virtfn) {
+ if (efx->pci_dev->physfn) {
+ struct efx_nic *efx_pf =
+ pci_get_drvdata(efx->pci_dev->physfn);
+ struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ nic_data_p->vf[nic_data->vf_index].efx = efx;
+ nic_data_p->vf[nic_data->vf_index].pci_dev =
+ efx->pci_dev;
+ } else
+ netif_info(efx, drv, efx->net_dev,
+ "Could not get the PF id from VF\n");
+ }
+
+ return 0;
+
+fail:
+ efx_ef10_remove(efx);
+ return rc;
+}
+#else
+static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
+{
+ return 0;
+}
+#endif
+
static int efx_ef10_alloc_vis(struct efx_nic *efx,
unsigned int min_vis, unsigned int max_vis)
{
@@ -687,7 +975,9 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_restore_piobufs = false;
}
- efx_ef10_rx_push_rss_config(efx);
+ /* don't fail init if RSS setup doesn't work */
+ efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
+
return 0;
}
@@ -702,6 +992,14 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
}
+static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
+{
+ if (reason == RESET_TYPE_MC_FAILURE)
+ return RESET_TYPE_DATAPATH;
+
+ return efx_mcdi_map_reset_reason(reason);
+}
+
static int efx_ef10_map_reset_flags(u32 *flags)
{
enum {
@@ -760,93 +1058,112 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
- EF10_DMA_STAT(tx_bytes, TX_BYTES),
- EF10_DMA_STAT(tx_packets, TX_PKTS),
- EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
- EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
- EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
- EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
- EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
- EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
- EF10_DMA_STAT(tx_64, TX_64_PKTS),
- EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
- EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
- EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
- EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
- EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
- EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
- EF10_DMA_STAT(rx_bytes, RX_BYTES),
- EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
- EF10_OTHER_STAT(rx_good_bytes),
- EF10_OTHER_STAT(rx_bad_bytes),
- EF10_DMA_STAT(rx_packets, RX_PKTS),
- EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
- EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
- EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
- EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
- EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
- EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
- EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
- EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
- EF10_DMA_STAT(rx_64, RX_64_PKTS),
- EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
- EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
- EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
- EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
- EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
- EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
- EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
- EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
- EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
- EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
- EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
- EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+ EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
+ EF10_DMA_STAT(port_tx_packets, TX_PKTS),
+ EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
+ EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
+ EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
+ EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
+ EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
+ EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
+ EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
+ EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
+ EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
+ EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
+ EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
+ EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
+ EF10_OTHER_STAT(port_rx_good_bytes),
+ EF10_OTHER_STAT(port_rx_bad_bytes),
+ EF10_DMA_STAT(port_rx_packets, RX_PKTS),
+ EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
+ EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
+ EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
+ EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
+ EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
+ EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
+ EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
+ EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
+ EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
+ EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
+ EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
+ EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
+ EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
+ EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
+ EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
+ EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
+ EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
+ EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
GENERIC_SW_STAT(rx_nodesc_trunc),
GENERIC_SW_STAT(rx_noskb_drops),
- EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
- EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
- EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
- EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
- EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
- EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
- EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
- EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
- EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
- EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
- EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
- EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
+ EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
+ EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
+ EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
+ EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
+ EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
+ EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
+ EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
+ EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
+ EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
+ EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
+ EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
+ EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
+ EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
+ EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
+ EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
+ EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
+ EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
+ EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
+ EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
+ EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
+ EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
+ EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
+ EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
+ EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
+ EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
+ EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
+ EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
+ EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
+ EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
+ EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
};
-#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
- (1ULL << EF10_STAT_tx_packets) | \
- (1ULL << EF10_STAT_tx_pause) | \
- (1ULL << EF10_STAT_tx_unicast) | \
- (1ULL << EF10_STAT_tx_multicast) | \
- (1ULL << EF10_STAT_tx_broadcast) | \
- (1ULL << EF10_STAT_rx_bytes) | \
- (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
- (1ULL << EF10_STAT_rx_good_bytes) | \
- (1ULL << EF10_STAT_rx_bad_bytes) | \
- (1ULL << EF10_STAT_rx_packets) | \
- (1ULL << EF10_STAT_rx_good) | \
- (1ULL << EF10_STAT_rx_bad) | \
- (1ULL << EF10_STAT_rx_pause) | \
- (1ULL << EF10_STAT_rx_control) | \
- (1ULL << EF10_STAT_rx_unicast) | \
- (1ULL << EF10_STAT_rx_multicast) | \
- (1ULL << EF10_STAT_rx_broadcast) | \
- (1ULL << EF10_STAT_rx_lt64) | \
- (1ULL << EF10_STAT_rx_64) | \
- (1ULL << EF10_STAT_rx_65_to_127) | \
- (1ULL << EF10_STAT_rx_128_to_255) | \
- (1ULL << EF10_STAT_rx_256_to_511) | \
- (1ULL << EF10_STAT_rx_512_to_1023) | \
- (1ULL << EF10_STAT_rx_1024_to_15xx) | \
- (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \
- (1ULL << EF10_STAT_rx_gtjumbo) | \
- (1ULL << EF10_STAT_rx_bad_gtjumbo) | \
- (1ULL << EF10_STAT_rx_overflow) | \
- (1ULL << EF10_STAT_rx_nodesc_drops) | \
+#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
+ (1ULL << EF10_STAT_port_tx_packets) | \
+ (1ULL << EF10_STAT_port_tx_pause) | \
+ (1ULL << EF10_STAT_port_tx_unicast) | \
+ (1ULL << EF10_STAT_port_tx_multicast) | \
+ (1ULL << EF10_STAT_port_tx_broadcast) | \
+ (1ULL << EF10_STAT_port_rx_bytes) | \
+ (1ULL << \
+ EF10_STAT_port_rx_bytes_minus_good_bytes) | \
+ (1ULL << EF10_STAT_port_rx_good_bytes) | \
+ (1ULL << EF10_STAT_port_rx_bad_bytes) | \
+ (1ULL << EF10_STAT_port_rx_packets) | \
+ (1ULL << EF10_STAT_port_rx_good) | \
+ (1ULL << EF10_STAT_port_rx_bad) | \
+ (1ULL << EF10_STAT_port_rx_pause) | \
+ (1ULL << EF10_STAT_port_rx_control) | \
+ (1ULL << EF10_STAT_port_rx_unicast) | \
+ (1ULL << EF10_STAT_port_rx_multicast) | \
+ (1ULL << EF10_STAT_port_rx_broadcast) | \
+ (1ULL << EF10_STAT_port_rx_lt64) | \
+ (1ULL << EF10_STAT_port_rx_64) | \
+ (1ULL << EF10_STAT_port_rx_65_to_127) | \
+ (1ULL << EF10_STAT_port_rx_128_to_255) | \
+ (1ULL << EF10_STAT_port_rx_256_to_511) | \
+ (1ULL << EF10_STAT_port_rx_512_to_1023) |\
+ (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
+ (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
+ (1ULL << EF10_STAT_port_rx_gtjumbo) | \
+ (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
+ (1ULL << EF10_STAT_port_rx_overflow) | \
+ (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
(1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
(1ULL << GENERIC_STAT_rx_noskb_drops))
@@ -854,39 +1171,39 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
* switchable port we do not expose these because they might not
* include all the packets they should.
*/
-#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \
- (1ULL << EF10_STAT_tx_lt64) | \
- (1ULL << EF10_STAT_tx_64) | \
- (1ULL << EF10_STAT_tx_65_to_127) | \
- (1ULL << EF10_STAT_tx_128_to_255) | \
- (1ULL << EF10_STAT_tx_256_to_511) | \
- (1ULL << EF10_STAT_tx_512_to_1023) | \
- (1ULL << EF10_STAT_tx_1024_to_15xx) | \
- (1ULL << EF10_STAT_tx_15xx_to_jumbo))
+#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
+ (1ULL << EF10_STAT_port_tx_lt64) | \
+ (1ULL << EF10_STAT_port_tx_64) | \
+ (1ULL << EF10_STAT_port_tx_65_to_127) |\
+ (1ULL << EF10_STAT_port_tx_128_to_255) |\
+ (1ULL << EF10_STAT_port_tx_256_to_511) |\
+ (1ULL << EF10_STAT_port_tx_512_to_1023) |\
+ (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
+ (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
/* These statistics are only provided by the 40G MAC. For a 10G/40G
* switchable port we do expose these because the errors will otherwise
* be silent.
*/
-#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
- (1ULL << EF10_STAT_rx_length_error))
+#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
+ (1ULL << EF10_STAT_port_rx_length_error))
/* These statistics are only provided if the firmware supports the
* capability PM_AND_RXDP_COUNTERS.
*/
#define HUNT_PM_AND_RXDP_STAT_MASK ( \
- (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
- (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
- (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
- (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
- (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
- (1ULL << EF10_STAT_rx_pm_discard_qbb) | \
- (1ULL << EF10_STAT_rx_pm_discard_mapping) | \
- (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
- (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
- (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
- (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \
- (1ULL << EF10_STAT_rx_dp_hlb_wait))
+ (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \
+ (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \
+ (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \
+ (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \
+ (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \
+ (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \
+ (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
+ (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
{
@@ -894,6 +1211,10 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
u32 port_caps = efx_mcdi_phy_get_caps(efx);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ if (!(efx->mcdi->fn_flags &
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
+ return 0;
+
if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
else
@@ -908,13 +1229,28 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
{
- u64 raw_mask = efx_ef10_raw_stat_mask(efx);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u64 raw_mask[2];
+
+ raw_mask[0] = efx_ef10_raw_stat_mask(efx);
+
+ /* Only show vadaptor stats when EVB capability is present */
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
+ raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
+ raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
+ } else {
+ raw_mask[1] = 0;
+ }
#if BITS_PER_LONG == 64
- mask[0] = raw_mask;
+ mask[0] = raw_mask[0];
+ mask[1] = raw_mask[1];
#else
- mask[0] = raw_mask & 0xffffffff;
- mask[1] = raw_mask >> 32;
+ mask[0] = raw_mask[0] & 0xffffffff;
+ mask[1] = raw_mask[0] >> 32;
+ mask[2] = raw_mask[1] & 0xffffffff;
+ mask[3] = raw_mask[1] >> 32;
#endif
}
@@ -927,7 +1263,76 @@ static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
mask, names);
}
-static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
+static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
+ size_t stats_count = 0, index;
+
+ efx_ef10_get_stat_mask(efx, mask);
+
+ if (full_stats) {
+ for_each_set_bit(index, mask, EF10_STAT_COUNT) {
+ if (efx_ef10_stat_desc[index].name) {
+ *full_stats++ = stats[index];
+ ++stats_count;
+ }
+ }
+ }
+
+ if (!core_stats)
+ return stats_count;
+
+ if (nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
+ /* Use vadaptor stats. */
+ core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
+ stats[EF10_STAT_rx_multicast] +
+ stats[EF10_STAT_rx_broadcast];
+ core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
+ stats[EF10_STAT_tx_multicast] +
+ stats[EF10_STAT_tx_broadcast];
+ core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
+ stats[EF10_STAT_rx_multicast_bytes] +
+ stats[EF10_STAT_rx_broadcast_bytes];
+ core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
+ stats[EF10_STAT_tx_multicast_bytes] +
+ stats[EF10_STAT_tx_broadcast_bytes];
+ core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
+ stats[GENERIC_STAT_rx_noskb_drops];
+ core_stats->multicast = stats[EF10_STAT_rx_multicast];
+ core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
+ core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
+ core_stats->rx_errors = core_stats->rx_crc_errors;
+ core_stats->tx_errors = stats[EF10_STAT_tx_bad];
+ } else {
+ /* Use port stats. */
+ core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
+ core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
+ core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
+ core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
+ core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
+ stats[GENERIC_STAT_rx_nodesc_trunc] +
+ stats[GENERIC_STAT_rx_noskb_drops];
+ core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
+ core_stats->rx_length_errors =
+ stats[EF10_STAT_port_rx_gtjumbo] +
+ stats[EF10_STAT_port_rx_length_error];
+ core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
+ core_stats->rx_frame_errors =
+ stats[EF10_STAT_port_rx_align_error];
+ core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors);
+ }
+
+ return stats_count;
+}
+
+static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
DECLARE_BITMAP(mask, EF10_STAT_COUNT);
@@ -952,67 +1357,114 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
return -EAGAIN;
/* Update derived statistics */
- efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]);
- stats[EF10_STAT_rx_good_bytes] =
- stats[EF10_STAT_rx_bytes] -
- stats[EF10_STAT_rx_bytes_minus_good_bytes];
- efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
- stats[EF10_STAT_rx_bytes_minus_good_bytes]);
+ efx_nic_fix_nodesc_drop_stat(efx,
+ &stats[EF10_STAT_port_rx_nodesc_drops]);
+ stats[EF10_STAT_port_rx_good_bytes] =
+ stats[EF10_STAT_port_rx_bytes] -
+ stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
+ efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
+ stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
efx_update_sw_stats(efx, stats);
return 0;
}
-static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
- struct rtnl_link_stats64 *core_stats)
+static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
{
- DECLARE_BITMAP(mask, EF10_STAT_COUNT);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- u64 *stats = nic_data->stats;
- size_t stats_count = 0, index;
int retry;
- efx_ef10_get_stat_mask(efx, mask);
-
/* If we're unlucky enough to read statistics during the DMA, wait
* up to 10ms for it to finish (typically takes <500us)
*/
for (retry = 0; retry < 100; ++retry) {
- if (efx_ef10_try_update_nic_stats(efx) == 0)
+ if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
break;
udelay(100);
}
- if (full_stats) {
- for_each_set_bit(index, mask, EF10_STAT_COUNT) {
- if (efx_ef10_stat_desc[index].name) {
- *full_stats++ = stats[index];
- ++stats_count;
- }
- }
+ return efx_ef10_update_stats_common(efx, full_stats, core_stats);
+}
+
+static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+ __le64 generation_start, generation_end;
+ u64 *stats = nic_data->stats;
+ u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
+ struct efx_buffer stats_buf;
+ __le64 *dma_stats;
+ int rc;
+
+ spin_unlock_bh(&efx->stats_lock);
+
+ if (in_interrupt()) {
+ /* If in atomic context, cannot update stats. Just update the
+ * software stats and return so the caller can continue.
+ */
+ spin_lock_bh(&efx->stats_lock);
+ efx_update_sw_stats(efx, stats);
+ return 0;
}
- if (core_stats) {
- core_stats->rx_packets = stats[EF10_STAT_rx_packets];
- core_stats->tx_packets = stats[EF10_STAT_tx_packets];
- core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
- core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
- core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops] +
- stats[GENERIC_STAT_rx_nodesc_trunc] +
- stats[GENERIC_STAT_rx_noskb_drops];
- core_stats->multicast = stats[EF10_STAT_rx_multicast];
- core_stats->rx_length_errors =
- stats[EF10_STAT_rx_gtjumbo] +
- stats[EF10_STAT_rx_length_error];
- core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
- core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
- core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
- core_stats->rx_errors = (core_stats->rx_length_errors +
- core_stats->rx_crc_errors +
- core_stats->rx_frame_errors);
+ efx_ef10_get_stat_mask(efx, mask);
+
+ rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
+ if (rc) {
+ spin_lock_bh(&efx->stats_lock);
+ return rc;
}
- return stats_count;
+ dma_stats = stats_buf.addr;
+ dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+
+ MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
+ MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, 1);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ spin_lock_bh(&efx->stats_lock);
+ if (rc) {
+ /* Expect ENOENT if DMA queues have not been set up */
+ if (rc != -ENOENT || atomic_read(&efx->active_queues))
+ efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
+ sizeof(inbuf), NULL, 0, rc);
+ goto out;
+ }
+
+ generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
+ WARN_ON_ONCE(1);
+ goto out;
+ }
+ rmb();
+ efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
+ stats, stats_buf.addr, false);
+ rmb();
+ generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+ if (generation_end != generation_start) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ efx_update_sw_stats(efx, stats);
+out:
+ efx_nic_free_buffer(efx, &stats_buf);
+ return rc;
+}
+
+static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ if (efx_ef10_try_update_nic_stats_vf(efx))
+ return 0;
+
+ return efx_ef10_update_stats_common(efx, full_stats, core_stats);
}
static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
@@ -1044,6 +1496,14 @@ static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
}
}
+static void efx_ef10_get_wol_vf(struct efx_nic *efx,
+ struct ethtool_wolinfo *wol) {}
+
+static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
+{
+ return -EOPNOTSUPP;
+}
+
static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
{
wol->supported = 0;
@@ -1123,13 +1583,17 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
/* All our allocations have been reset */
efx_ef10_reset_mc_allocations(efx);
+ /* Driver-created vswitches and vports must be re-created */
+ nic_data->must_probe_vswitching = true;
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
/* The datapath firmware might have been changed */
nic_data->must_check_datapath_caps = true;
/* MAC statistics have been cleared on the NIC; clear the local
* statistic that we update with efx_update_diff_stat().
*/
- nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
+ nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
return -EIO;
}
@@ -1232,16 +1696,17 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
EFX_BUF_SIZE));
- MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
- size_t inlen, outlen;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t inlen;
dma_addr_t dma_addr;
efx_qword_t *txd;
int rc;
int i;
+ BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
@@ -1251,7 +1716,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
dma_addr = tx_queue->txd.buf.dma_addr;
@@ -1266,7 +1731,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
- outbuf, sizeof(outbuf), &outlen);
+ NULL, 0, NULL);
if (rc)
goto fail;
@@ -1299,7 +1764,7 @@ fail:
static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = tx_queue->efx;
size_t outlen;
int rc;
@@ -1378,19 +1843,33 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
}
}
-static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
+static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
+ bool exclusive, unsigned *context_size)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t outlen;
int rc;
+ u32 alloc_type = exclusive ?
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
+ unsigned rss_spread = exclusive ?
+ efx->rss_spread :
+ min(rounddown_pow_of_two(efx->rss_spread),
+ EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
+
+ if (!exclusive && rss_spread == 1) {
+ *context = EFX_EF10_RSS_CONTEXT_INVALID;
+ if (context_size)
+ *context_size = 1;
+ return 0;
+ }
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
- EVB_PORT_ID_ASSIGNED);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
- MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
- EFX_MAX_CHANNELS);
+ nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
@@ -1402,6 +1881,9 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
*context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
+ if (context_size)
+ *context_size = rss_spread;
+
return 0;
}
@@ -1418,7 +1900,8 @@ static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
WARN_ON(rc != 0);
}
-static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
+static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
+ const u32 *rx_indir_table)
{
MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
@@ -1432,7 +1915,7 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
MCDI_PTR(tablebuf,
RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
- (u8) efx->rx_indir_table[i];
+ (u8) rx_indir_table[i];
rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
sizeof(tablebuf), NULL, 0, NULL);
@@ -1460,27 +1943,119 @@ static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
}
-static void efx_ef10_rx_push_rss_config(struct efx_nic *efx)
+static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
+ unsigned *context_size)
{
+ u32 new_rx_rss_context;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- int rc;
+ int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
+ false, context_size);
- netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n");
+ if (rc != 0)
+ return rc;
- if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
- rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
- if (rc != 0)
- goto fail;
+ nic_data->rx_rss_context = new_rx_rss_context;
+ nic_data->rx_rss_context_exclusive = false;
+ efx_set_default_rx_indir_table(efx);
+ return 0;
+}
+
+static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
+ const u32 *rx_indir_table)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+ u32 new_rx_rss_context;
+
+ if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID ||
+ !nic_data->rx_rss_context_exclusive) {
+ rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
+ true, NULL);
+ if (rc == -EOPNOTSUPP)
+ return rc;
+ else if (rc != 0)
+ goto fail1;
+ } else {
+ new_rx_rss_context = nic_data->rx_rss_context;
}
- rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
+ rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context,
+ rx_indir_table);
if (rc != 0)
- goto fail;
+ goto fail2;
- return;
+ if (nic_data->rx_rss_context != new_rx_rss_context)
+ efx_ef10_rx_free_indir_table(efx);
+ nic_data->rx_rss_context = new_rx_rss_context;
+ nic_data->rx_rss_context_exclusive = true;
+ if (rx_indir_table != efx->rx_indir_table)
+ memcpy(efx->rx_indir_table, rx_indir_table,
+ sizeof(efx->rx_indir_table));
+ return 0;
-fail:
+fail2:
+ if (new_rx_rss_context != nic_data->rx_rss_context)
+ efx_ef10_free_rss_context(efx, new_rx_rss_context);
+fail1:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table)
+{
+ int rc;
+
+ if (efx->rss_spread == 1)
+ return 0;
+
+ rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table);
+
+ if (rc == -ENOBUFS && !user) {
+ unsigned context_size;
+ bool mismatch = false;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch;
+ i++)
+ mismatch = rx_indir_table[i] !=
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
+
+ rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
+ if (rc == 0) {
+ if (context_size != efx->rss_spread)
+ netif_warn(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one of"
+ " different size."
+ " Wanted %u, got %u.\n",
+ efx->rss_spread, context_size);
+ else if (mismatch)
+ netif_warn(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one but"
+ " could not apply custom"
+ " indirection.\n");
+ else
+ netif_info(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one.\n");
+ }
+ }
+ return rc;
+}
+
+static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table
+ __attribute__ ((unused)))
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (user)
+ return -EOPNOTSUPP;
+ if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
+ return 0;
+ return efx_ef10_rx_push_shared_rss_config(efx, NULL);
}
static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
@@ -1496,14 +2071,15 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
MCDI_DECLARE_BUF(inbuf,
MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
EFX_BUF_SIZE));
- MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = rx_queue->efx;
- size_t inlen, outlen;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t inlen;
dma_addr_t dma_addr;
int rc;
int i;
+ BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
rx_queue->scatter_n = 0;
rx_queue->scatter_len = 0;
@@ -1517,7 +2093,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
INIT_RXQ_IN_FLAG_PREFIX, 1,
INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
dma_addr = rx_queue->rxd.buf.dma_addr;
@@ -1532,7 +2108,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
- outbuf, sizeof(outbuf), &outlen);
+ NULL, 0, NULL);
if (rc)
netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
efx_rx_queue_index(rx_queue));
@@ -1541,7 +2117,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = rx_queue->efx;
size_t outlen;
int rc;
@@ -1703,7 +2279,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
static void efx_ef10_ev_fini(struct efx_channel *channel)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = channel->efx;
size_t outlen;
int rc;
@@ -2286,11 +2862,12 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
match_fields);
}
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
@@ -3055,6 +3632,9 @@ fail:
return rc;
}
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
static void efx_ef10_filter_table_restore(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3064,9 +3644,14 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
bool failed = false;
int rc;
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+
if (!nic_data->must_restore_filters)
return;
+ if (!table)
+ return;
+
spin_lock_bh(&efx->filter_lock);
for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
@@ -3102,6 +3687,7 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
nic_data->must_restore_filters = false;
}
+/* Caller must hold efx->filter_sem for write */
static void efx_ef10_filter_table_remove(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3110,6 +3696,10 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
unsigned int filter_idx;
int rc;
+ efx->filter_state = NULL;
+ if (!table)
+ return;
+
for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (!spec)
@@ -3135,6 +3725,9 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
kfree(table);
}
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3149,6 +3742,9 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
if (!efx_dev_registered(efx))
return;
+ if (!table)
+ return;
+
/* Mark old filters that may need to be removed */
spin_lock_bh(&efx->filter_lock);
n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
@@ -3280,6 +3876,149 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
WARN_ON(remove_failed);
}
+static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u8 mac_old[ETH_ALEN];
+ int rc, rc2;
+
+ /* Only reconfigure a PF-created vport */
+ if (is_zero_ether_addr(nic_data->vport_mac))
+ return 0;
+
+ efx_device_detach_sync(efx);
+ efx_net_stop(efx->net_dev);
+ down_write(&efx->filter_sem);
+ efx_ef10_filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+
+ rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+ if (rc)
+ goto restore_filters;
+
+ ether_addr_copy(mac_old, nic_data->vport_mac);
+ rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+ nic_data->vport_mac);
+ if (rc)
+ goto restore_vadaptor;
+
+ rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
+ efx->net_dev->dev_addr);
+ if (!rc) {
+ ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
+ } else {
+ rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
+ if (rc2) {
+ /* Failed to add original MAC, so clear vport_mac */
+ eth_zero_addr(nic_data->vport_mac);
+ goto reset_nic;
+ }
+ }
+
+restore_vadaptor:
+ rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ if (rc2)
+ goto reset_nic;
+restore_filters:
+ down_write(&efx->filter_sem);
+ rc2 = efx_ef10_filter_table_probe(efx);
+ up_write(&efx->filter_sem);
+ if (rc2)
+ goto reset_nic;
+
+ rc2 = efx_net_open(efx->net_dev);
+ if (rc2)
+ goto reset_nic;
+
+ netif_device_attach(efx->net_dev);
+
+ return rc;
+
+reset_nic:
+ netif_err(efx, drv, efx->net_dev,
+ "Failed to restore when changing MAC address - scheduling reset\n");
+ efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
+
+ return rc ? rc : rc2;
+}
+
+static int efx_ef10_set_mac_address(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ bool was_enabled = efx->port_enabled;
+ int rc;
+
+ efx_device_detach_sync(efx);
+ efx_net_stop(efx->net_dev);
+ down_write(&efx->filter_sem);
+ efx_ef10_filter_table_remove(efx);
+
+ ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
+ efx->net_dev->dev_addr);
+ MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
+ nic_data->vport_id);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+
+ efx_ef10_filter_table_probe(efx);
+ up_write(&efx->filter_sem);
+ if (was_enabled)
+ efx_net_open(efx->net_dev);
+ netif_device_attach(efx->net_dev);
+
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
+ struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+
+ if (rc == -EPERM) {
+ struct efx_nic *efx_pf;
+
+ /* Switch to PF and change MAC address on vport */
+ efx_pf = pci_get_drvdata(pci_dev_pf);
+
+ rc = efx_ef10_sriov_set_vf_mac(efx_pf,
+ nic_data->vf_index,
+ efx->net_dev->dev_addr);
+ } else if (!rc) {
+ struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+ struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
+ unsigned int i;
+
+ /* MAC address successfully changed by VF (with MAC
+ * spoofing) so update the parent PF if possible.
+ */
+ for (i = 0; i < efx_pf->vf_count; ++i) {
+ struct ef10_vf *vf = nic_data->vf + i;
+
+ if (vf->efx == efx) {
+ ether_addr_copy(vf->mac,
+ efx->net_dev->dev_addr);
+ return 0;
+ }
+ }
+ }
+ } else
+#endif
+ if (rc == -EPERM) {
+ netif_err(efx, drv, efx->net_dev,
+ "Cannot change MAC address; use sfboot to enable"
+ " mac-spoofing on this interface\n");
+ } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
+ /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
+ * fall-back to the method of changing the MAC address on the
+ * vport. This only applies to PFs because such versions of
+ * MCFW do not support VFs.
+ */
+ rc = efx_ef10_vport_set_mac_address(efx);
+ } else {
+ efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
+ sizeof(inbuf), NULL, 0, rc);
+ }
+
+ return rc;
+}
+
static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
{
efx_ef10_filter_sync_rx_mode(efx);
@@ -3287,6 +4026,13 @@ static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
return efx_mcdi_set_mac(efx);
}
+static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
+{
+ efx_ef10_filter_sync_rx_mode(efx);
+
+ return 0;
+}
+
static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
@@ -3494,6 +4240,9 @@ static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
_efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
}
+static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
+ u32 host_time) {}
+
static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
bool temp)
{
@@ -3571,6 +4320,12 @@ static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
return 0;
}
+static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
+ struct hwtstamp_config *init)
+{
+ return -EOPNOTSUPP;
+}
+
static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
struct hwtstamp_config *init)
{
@@ -3607,14 +4362,118 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
}
}
+const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
+ .is_vf = true,
+ .mem_bar = EFX_MEM_VF_BAR,
+ .mem_map_size = efx_ef10_mem_map_size,
+ .probe = efx_ef10_probe_vf,
+ .remove = efx_ef10_remove,
+ .dimension_resources = efx_ef10_dimension_resources,
+ .init = efx_ef10_init_nic,
+ .fini = efx_port_dummy_op_void,
+ .map_reset_reason = efx_ef10_map_reset_reason,
+ .map_reset_flags = efx_ef10_map_reset_flags,
+ .reset = efx_ef10_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_ef10_fini_dmaq,
+ .prepare_flr = efx_ef10_prepare_flr,
+ .finish_flr = efx_port_dummy_op_void,
+ .describe_stats = efx_ef10_describe_stats,
+ .update_stats = efx_ef10_update_stats_vf,
+ .start_stats = efx_port_dummy_op_void,
+ .pull_stats = efx_port_dummy_op_void,
+ .stop_stats = efx_port_dummy_op_void,
+ .set_id_led = efx_mcdi_set_id_led,
+ .push_irq_moderation = efx_ef10_push_irq_moderation,
+ .reconfigure_mac = efx_ef10_mac_reconfigure_vf,
+ .check_mac_fault = efx_mcdi_mac_check_fault,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .get_wol = efx_ef10_get_wol_vf,
+ .set_wol = efx_ef10_set_wol_vf,
+ .resume_wol = efx_port_dummy_op_void,
+ .mcdi_request = efx_ef10_mcdi_request,
+ .mcdi_poll_response = efx_ef10_mcdi_poll_response,
+ .mcdi_read_response = efx_ef10_mcdi_read_response,
+ .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef10_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .irq_handle_msi = efx_ef10_msi_interrupt,
+ .irq_handle_legacy = efx_ef10_legacy_interrupt,
+ .tx_probe = efx_ef10_tx_probe,
+ .tx_init = efx_ef10_tx_init,
+ .tx_remove = efx_ef10_tx_remove,
+ .tx_write = efx_ef10_tx_write,
+ .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
+ .rx_probe = efx_ef10_rx_probe,
+ .rx_init = efx_ef10_rx_init,
+ .rx_remove = efx_ef10_rx_remove,
+ .rx_write = efx_ef10_rx_write,
+ .rx_defer_refill = efx_ef10_rx_defer_refill,
+ .ev_probe = efx_ef10_ev_probe,
+ .ev_init = efx_ef10_ev_init,
+ .ev_fini = efx_ef10_ev_fini,
+ .ev_remove = efx_ef10_ev_remove,
+ .ev_process = efx_ef10_ev_process,
+ .ev_read_ack = efx_ef10_ev_read_ack,
+ .ev_test_generate = efx_ef10_ev_test_generate,
+ .filter_table_probe = efx_ef10_filter_table_probe,
+ .filter_table_restore = efx_ef10_filter_table_restore,
+ .filter_table_remove = efx_ef10_filter_table_remove,
+ .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
+ .filter_insert = efx_ef10_filter_insert,
+ .filter_remove_safe = efx_ef10_filter_remove_safe,
+ .filter_get_safe = efx_ef10_filter_get_safe,
+ .filter_clear_rx = efx_ef10_filter_clear_rx,
+ .filter_count_rx_used = efx_ef10_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_ef10_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = efx_port_dummy_op_int,
+#endif
+ .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
+ .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
+#ifdef CONFIG_SFC_SRIOV
+ .vswitching_probe = efx_ef10_vswitching_probe_vf,
+ .vswitching_restore = efx_ef10_vswitching_restore_vf,
+ .vswitching_remove = efx_ef10_vswitching_remove_vf,
+ .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id,
+#endif
+ .get_mac_address = efx_ef10_get_mac_address_vf,
+ .set_mac_address = efx_ef10_set_mac_address,
+
+ .revision = EFX_REV_HUNT_A0,
+ .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
+ .can_rx_scatter = true,
+ .always_rx_scatter = true,
+ .max_interrupt_mode = EFX_INT_MODE_MSIX,
+ .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
+ .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .mcdi_max_ver = 2,
+ .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+ .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_ALL,
+};
+
const struct efx_nic_type efx_hunt_a0_nic_type = {
+ .is_vf = false,
+ .mem_bar = EFX_MEM_BAR,
.mem_map_size = efx_ef10_mem_map_size,
- .probe = efx_ef10_probe,
+ .probe = efx_ef10_probe_pf,
.remove = efx_ef10_remove,
.dimension_resources = efx_ef10_dimension_resources,
.init = efx_ef10_init_nic,
.fini = efx_port_dummy_op_void,
- .map_reset_reason = efx_mcdi_map_reset_reason,
+ .map_reset_reason = efx_ef10_map_reset_reason,
.map_reset_flags = efx_ef10_map_reset_flags,
.reset = efx_ef10_reset,
.probe_port = efx_mcdi_port_probe,
@@ -3623,7 +4482,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.prepare_flr = efx_ef10_prepare_flr,
.finish_flr = efx_port_dummy_op_void,
.describe_stats = efx_ef10_describe_stats,
- .update_stats = efx_ef10_update_stats,
+ .update_stats = efx_ef10_update_stats_pf,
.start_stats = efx_mcdi_mac_start_stats,
.pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
@@ -3650,7 +4509,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.tx_init = efx_ef10_tx_init,
.tx_remove = efx_ef10_tx_remove,
.tx_write = efx_ef10_tx_write,
- .rx_push_rss_config = efx_ef10_rx_push_rss_config,
+ .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
.rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init,
.rx_remove = efx_ef10_rx_remove,
@@ -3689,11 +4548,24 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ptp_write_host_time = efx_ef10_ptp_write_host_time,
.ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
.ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = efx_ef10_sriov_configure,
.sriov_init = efx_ef10_sriov_init,
.sriov_fini = efx_ef10_sriov_fini,
- .sriov_mac_address_changed = efx_ef10_sriov_mac_address_changed,
.sriov_wanted = efx_ef10_sriov_wanted,
.sriov_reset = efx_ef10_sriov_reset,
+ .sriov_flr = efx_ef10_sriov_flr,
+ .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
+ .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
+ .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
+ .sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
+ .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
+ .vswitching_probe = efx_ef10_vswitching_probe_pf,
+ .vswitching_restore = efx_ef10_vswitching_restore_pf,
+ .vswitching_remove = efx_ef10_vswitching_remove_pf,
+#endif
+ .get_mac_address = efx_ef10_get_mac_address_pf,
+ .set_mac_address = efx_ef10_set_mac_address,
.revision = EFX_REV_HUNT_A0,
.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
new file mode 100644
index 000000000..3c17f274e
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -0,0 +1,746 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "net_driver.h"
+#include "ef10_sriov.h"
+#include "efx.h"
+#include "nic.h"
+#include "mcdi_pcol.h"
+
+static int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id,
+ unsigned int vf_fn)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_EVB_PORT_ASSIGN_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ MCDI_SET_DWORD(inbuf, EVB_PORT_ASSIGN_IN_PORT_ID, port_id);
+ MCDI_POPULATE_DWORD_2(inbuf, EVB_PORT_ASSIGN_IN_FUNCTION,
+ EVB_PORT_ASSIGN_IN_PF, nic_data->pf_index,
+ EVB_PORT_ASSIGN_IN_VF, vf_fn);
+
+ return efx_mcdi_rpc(efx, MC_CMD_EVB_PORT_ASSIGN, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id,
+ unsigned int vswitch_type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_ALLOC_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_TYPE, vswitch_type);
+ MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, 2);
+ MCDI_POPULATE_DWORD_1(inbuf, VSWITCH_ALLOC_IN_FLAGS,
+ VSWITCH_ALLOC_IN_FLAG_AUTO_PORT, 0);
+
+ /* Quietly try to allocate 2 VLAN tags */
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VSWITCH_ALLOC, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+
+ /* If 2 VLAN tags is too many, revert to trying with 1 VLAN tags */
+ if (rc == -EPROTO) {
+ MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, 1);
+ rc = efx_mcdi_rpc(efx, MC_CMD_VSWITCH_ALLOC, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+ } else if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_VSWITCH_ALLOC,
+ MC_CMD_VSWITCH_ALLOC_IN_LEN,
+ NULL, 0, rc);
+ }
+ return rc;
+}
+
+static int efx_ef10_vswitch_free(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VSWITCH_FREE_IN_UPSTREAM_PORT_ID, port_id);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VSWITCH_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_ef10_vport_alloc(struct efx_nic *efx,
+ unsigned int port_id_in,
+ unsigned int vport_type,
+ u16 vlan,
+ unsigned int *port_id_out)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ALLOC_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_ALLOC_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ EFX_WARN_ON_PARANOID(!port_id_out);
+
+ MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_UPSTREAM_PORT_ID, port_id_in);
+ MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_TYPE, vport_type);
+ MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_NUM_VLAN_TAGS,
+ (vlan != EFX_EF10_NO_VLAN));
+ MCDI_POPULATE_DWORD_1(inbuf, VPORT_ALLOC_IN_FLAGS,
+ VPORT_ALLOC_IN_FLAG_AUTO_PORT, 0);
+ if (vlan != EFX_EF10_NO_VLAN)
+ MCDI_POPULATE_DWORD_1(inbuf, VPORT_ALLOC_IN_VLAN_TAGS,
+ VPORT_ALLOC_IN_VLAN_TAG_0, vlan);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_VPORT_ALLOC_OUT_LEN)
+ return -EIO;
+
+ *port_id_out = MCDI_DWORD(outbuf, VPORT_ALLOC_OUT_VPORT_ID);
+ return 0;
+}
+
+static int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_FREE_IN_VPORT_ID, port_id);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int i;
+
+ if (!nic_data->vf)
+ return;
+
+ for (i = 0; i < efx->vf_count; i++) {
+ struct ef10_vf *vf = nic_data->vf + i;
+
+ /* If VF is assigned, do not free the vport */
+ if (vf->pci_dev &&
+ vf->pci_dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+ continue;
+
+ if (vf->vport_assigned) {
+ efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, i);
+ vf->vport_assigned = 0;
+ }
+
+ if (!is_zero_ether_addr(vf->mac)) {
+ efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac);
+ eth_zero_addr(vf->mac);
+ }
+
+ if (vf->vport_id) {
+ efx_ef10_vport_free(efx, vf->vport_id);
+ vf->vport_id = 0;
+ }
+
+ vf->efx = NULL;
+ }
+}
+
+static void efx_ef10_sriov_free_vf_vswitching(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ efx_ef10_sriov_free_vf_vports(efx);
+ kfree(nic_data->vf);
+ nic_data->vf = NULL;
+}
+
+static int efx_ef10_sriov_assign_vf_vport(struct efx_nic *efx,
+ unsigned int vf_i)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct ef10_vf *vf = nic_data->vf + vf_i;
+ int rc;
+
+ if (WARN_ON_ONCE(!nic_data->vf))
+ return -EOPNOTSUPP;
+
+ rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+ MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+ vf->vlan, &vf->vport_id);
+ if (rc)
+ return rc;
+
+ rc = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
+ if (rc) {
+ eth_zero_addr(vf->mac);
+ return rc;
+ }
+
+ rc = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+ if (rc)
+ return rc;
+
+ vf->vport_assigned = 1;
+ return 0;
+}
+
+static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int i;
+ int rc;
+
+ nic_data->vf = kcalloc(efx->vf_count, sizeof(struct ef10_vf),
+ GFP_KERNEL);
+ if (!nic_data->vf)
+ return -ENOMEM;
+
+ for (i = 0; i < efx->vf_count; i++) {
+ random_ether_addr(nic_data->vf[i].mac);
+ nic_data->vf[i].efx = NULL;
+ nic_data->vf[i].vlan = EFX_EF10_NO_VLAN;
+
+ rc = efx_ef10_sriov_assign_vf_vport(efx, i);
+ if (rc)
+ goto fail;
+ }
+
+ return 0;
+fail:
+ efx_ef10_sriov_free_vf_vports(efx);
+ kfree(nic_data->vf);
+ nic_data->vf = NULL;
+ return rc;
+}
+
+static int efx_ef10_sriov_restore_vf_vswitching(struct efx_nic *efx)
+{
+ unsigned int i;
+ int rc;
+
+ for (i = 0; i < efx->vf_count; i++) {
+ rc = efx_ef10_sriov_assign_vf_vport(efx, i);
+ if (rc)
+ goto fail;
+ }
+
+ return 0;
+fail:
+ efx_ef10_sriov_free_vf_vswitching(efx);
+ return rc;
+}
+
+/* On top of the default firmware vswitch setup, create a VEB vswitch and
+ * expansion vport for use by this function.
+ */
+int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct net_device *net_dev = efx->net_dev;
+ int rc;
+
+ if (pci_sriov_get_totalvfs(efx->pci_dev) <= 0) {
+ /* vswitch not needed as we have no VFs */
+ efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ return 0;
+ }
+
+ rc = efx_ef10_vswitch_alloc(efx, EVB_PORT_ID_ASSIGNED,
+ MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB);
+ if (rc)
+ goto fail1;
+
+ rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+ MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+ EFX_EF10_NO_VLAN, &nic_data->vport_id);
+ if (rc)
+ goto fail2;
+
+ rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, net_dev->dev_addr);
+ if (rc)
+ goto fail3;
+ ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr);
+
+ rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ if (rc)
+ goto fail4;
+
+ return 0;
+fail4:
+ efx_ef10_vport_del_mac(efx, nic_data->vport_id, nic_data->vport_mac);
+ eth_zero_addr(nic_data->vport_mac);
+fail3:
+ efx_ef10_vport_free(efx, nic_data->vport_id);
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+fail2:
+ efx_ef10_vswitch_free(efx, EVB_PORT_ID_ASSIGNED);
+fail1:
+ return rc;
+}
+
+int efx_ef10_vswitching_probe_vf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ return efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+}
+
+int efx_ef10_vswitching_restore_pf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (!nic_data->must_probe_vswitching)
+ return 0;
+
+ rc = efx_ef10_vswitching_probe_pf(efx);
+ if (rc)
+ goto fail;
+
+ rc = efx_ef10_sriov_restore_vf_vswitching(efx);
+ if (rc)
+ goto fail;
+
+ nic_data->must_probe_vswitching = false;
+fail:
+ return rc;
+}
+
+int efx_ef10_vswitching_restore_vf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (!nic_data->must_probe_vswitching)
+ return 0;
+
+ rc = efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
+ if (rc)
+ return rc;
+
+ nic_data->must_probe_vswitching = false;
+ return 0;
+}
+
+void efx_ef10_vswitching_remove_pf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ efx_ef10_sriov_free_vf_vswitching(efx);
+
+ efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+
+ if (nic_data->vport_id == EVB_PORT_ID_ASSIGNED)
+ return; /* No vswitch was ever created */
+
+ if (!is_zero_ether_addr(nic_data->vport_mac)) {
+ efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+ efx->net_dev->dev_addr);
+ eth_zero_addr(nic_data->vport_mac);
+ }
+ efx_ef10_vport_free(efx, nic_data->vport_id);
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
+ /* Only free the vswitch if no VFs are assigned */
+ if (!pci_vfs_assigned(efx->pci_dev))
+ efx_ef10_vswitch_free(efx, nic_data->vport_id);
+}
+
+void efx_ef10_vswitching_remove_vf(struct efx_nic *efx)
+{
+ efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
+}
+
+static int efx_ef10_pci_sriov_enable(struct efx_nic *efx, int num_vfs)
+{
+ int rc = 0;
+ struct pci_dev *dev = efx->pci_dev;
+
+ efx->vf_count = num_vfs;
+
+ rc = efx_ef10_sriov_alloc_vf_vswitching(efx);
+ if (rc)
+ goto fail1;
+
+ rc = pci_enable_sriov(dev, num_vfs);
+ if (rc)
+ goto fail2;
+
+ return 0;
+fail2:
+ efx_ef10_sriov_free_vf_vswitching(efx);
+fail1:
+ efx->vf_count = 0;
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to enable SRIOV VFs\n");
+ return rc;
+}
+
+static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
+{
+ struct pci_dev *dev = efx->pci_dev;
+ unsigned int vfs_assigned = 0;
+
+ vfs_assigned = pci_vfs_assigned(dev);
+
+ if (vfs_assigned && !force) {
+ netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
+ "please detach them before disabling SR-IOV\n");
+ return -EBUSY;
+ }
+
+ if (!vfs_assigned)
+ pci_disable_sriov(dev);
+
+ efx_ef10_sriov_free_vf_vswitching(efx);
+ efx->vf_count = 0;
+ return 0;
+}
+
+int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs)
+{
+ if (num_vfs == 0)
+ return efx_ef10_pci_sriov_disable(efx, false);
+ else
+ return efx_ef10_pci_sriov_enable(efx, num_vfs);
+}
+
+int efx_ef10_sriov_init(struct efx_nic *efx)
+{
+ return 0;
+}
+
+void efx_ef10_sriov_fini(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int i;
+ int rc;
+
+ if (!nic_data->vf) {
+ /* Remove any un-assigned orphaned VFs */
+ if (pci_num_vf(efx->pci_dev) && !pci_vfs_assigned(efx->pci_dev))
+ pci_disable_sriov(efx->pci_dev);
+ return;
+ }
+
+ /* Remove any VFs in the host */
+ for (i = 0; i < efx->vf_count; ++i) {
+ struct efx_nic *vf_efx = nic_data->vf[i].efx;
+
+ if (vf_efx)
+ vf_efx->pci_dev->driver->remove(vf_efx->pci_dev);
+ }
+
+ rc = efx_ef10_pci_sriov_disable(efx, true);
+ if (rc)
+ netif_dbg(efx, drv, efx->net_dev,
+ "Disabling SRIOV was not successful rc=%d\n", rc);
+ else
+ netif_dbg(efx, drv, efx->net_dev, "SRIOV disabled\n");
+}
+
+static int efx_ef10_vport_del_vf_mac(struct efx_nic *efx, unsigned int port_id,
+ u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+
+ return rc;
+}
+
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct ef10_vf *vf;
+ int rc;
+
+ if (!nic_data->vf)
+ return -EOPNOTSUPP;
+
+ if (vf_i >= efx->vf_count)
+ return -EINVAL;
+ vf = nic_data->vf + vf_i;
+
+ if (vf->efx) {
+ efx_device_detach_sync(vf->efx);
+ efx_net_stop(vf->efx->net_dev);
+
+ down_write(&vf->efx->filter_sem);
+ vf->efx->type->filter_table_remove(vf->efx);
+
+ rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
+ if (rc) {
+ up_write(&vf->efx->filter_sem);
+ return rc;
+ }
+ }
+
+ rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
+ if (rc)
+ return rc;
+
+ if (!is_zero_ether_addr(vf->mac)) {
+ rc = efx_ef10_vport_del_vf_mac(efx, vf->vport_id, vf->mac);
+ if (rc)
+ return rc;
+ }
+
+ if (!is_zero_ether_addr(mac)) {
+ rc = efx_ef10_vport_add_mac(efx, vf->vport_id, mac);
+ if (rc) {
+ eth_zero_addr(vf->mac);
+ goto fail;
+ }
+ if (vf->efx)
+ ether_addr_copy(vf->efx->net_dev->dev_addr, mac);
+ }
+
+ ether_addr_copy(vf->mac, mac);
+
+ rc = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+ if (rc)
+ goto fail;
+
+ if (vf->efx) {
+ /* VF cannot use the vport_id that the PF created */
+ rc = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
+ if (rc) {
+ up_write(&vf->efx->filter_sem);
+ return rc;
+ }
+ vf->efx->type->filter_table_probe(vf->efx);
+ up_write(&vf->efx->filter_sem);
+ efx_net_open(vf->efx->net_dev);
+ netif_device_attach(vf->efx->net_dev);
+ }
+
+ return 0;
+
+fail:
+ memset(vf->mac, 0, ETH_ALEN);
+ return rc;
+}
+
+int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
+ u8 qos)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct ef10_vf *vf;
+ u16 old_vlan, new_vlan;
+ int rc = 0, rc2 = 0;
+
+ if (vf_i >= efx->vf_count)
+ return -EINVAL;
+ if (qos != 0)
+ return -EINVAL;
+
+ vf = nic_data->vf + vf_i;
+
+ new_vlan = (vlan == 0) ? EFX_EF10_NO_VLAN : vlan;
+ if (new_vlan == vf->vlan)
+ return 0;
+
+ if (vf->efx) {
+ efx_device_detach_sync(vf->efx);
+ efx_net_stop(vf->efx->net_dev);
+
+ down_write(&vf->efx->filter_sem);
+ vf->efx->type->filter_table_remove(vf->efx);
+
+ rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
+ if (rc)
+ goto restore_filters;
+ }
+
+ if (vf->vport_assigned) {
+ rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
+ if (rc) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Failed to change vlan on VF %d.\n", vf_i);
+ netif_warn(efx, drv, efx->net_dev,
+ "This is likely because the VF is bound to a driver in a VM.\n");
+ netif_warn(efx, drv, efx->net_dev,
+ "Please unload the driver in the VM.\n");
+ goto restore_vadaptor;
+ }
+ vf->vport_assigned = 0;
+ }
+
+ if (!is_zero_ether_addr(vf->mac)) {
+ rc = efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac);
+ if (rc)
+ goto restore_evb_port;
+ }
+
+ if (vf->vport_id) {
+ rc = efx_ef10_vport_free(efx, vf->vport_id);
+ if (rc)
+ goto restore_mac;
+ vf->vport_id = 0;
+ }
+
+ /* Do the actual vlan change */
+ old_vlan = vf->vlan;
+ vf->vlan = new_vlan;
+
+ /* Restore everything in reverse order */
+ rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+ MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+ vf->vlan, &vf->vport_id);
+ if (rc)
+ goto reset_nic_up_write;
+
+restore_mac:
+ if (!is_zero_ether_addr(vf->mac)) {
+ rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
+ if (rc2) {
+ eth_zero_addr(vf->mac);
+ goto reset_nic_up_write;
+ }
+ }
+
+restore_evb_port:
+ rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+ if (rc2)
+ goto reset_nic_up_write;
+ else
+ vf->vport_assigned = 1;
+
+restore_vadaptor:
+ if (vf->efx) {
+ rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
+ if (rc2)
+ goto reset_nic_up_write;
+ }
+
+restore_filters:
+ if (vf->efx) {
+ rc2 = vf->efx->type->filter_table_probe(vf->efx);
+ if (rc2)
+ goto reset_nic_up_write;
+
+ up_write(&vf->efx->filter_sem);
+
+ up_write(&vf->efx->filter_sem);
+
+ rc2 = efx_net_open(vf->efx->net_dev);
+ if (rc2)
+ goto reset_nic;
+
+ netif_device_attach(vf->efx->net_dev);
+ }
+ return rc;
+
+reset_nic_up_write:
+ if (vf->efx)
+ up_write(&vf->efx->filter_sem);
+
+reset_nic:
+ if (vf->efx) {
+ netif_err(efx, drv, efx->net_dev,
+ "Failed to restore VF - scheduling reset.\n");
+ efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH);
+ } else {
+ netif_err(efx, drv, efx->net_dev,
+ "Failed to restore the VF and cannot reset the VF "
+ "- VF is not functional.\n");
+ netif_err(efx, drv, efx->net_dev,
+ "Please reload the driver attached to the VF.\n");
+ }
+
+ return rc ? rc : rc2;
+}
+
+int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
+ bool spoofchk)
+{
+ return spoofchk ? -EOPNOTSUPP : 0;
+}
+
+int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i,
+ int link_state)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_STATE_MODE_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ BUILD_BUG_ON(IFLA_VF_LINK_STATE_AUTO !=
+ MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO);
+ BUILD_BUG_ON(IFLA_VF_LINK_STATE_ENABLE !=
+ MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP);
+ BUILD_BUG_ON(IFLA_VF_LINK_STATE_DISABLE !=
+ MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN);
+ MCDI_POPULATE_DWORD_2(inbuf, LINK_STATE_MODE_IN_FUNCTION,
+ LINK_STATE_MODE_IN_FUNCTION_PF,
+ nic_data->pf_index,
+ LINK_STATE_MODE_IN_FUNCTION_VF, vf_i);
+ MCDI_SET_DWORD(inbuf, LINK_STATE_MODE_IN_NEW_MODE, link_state);
+ return efx_mcdi_rpc(efx, MC_CMD_LINK_STATE_MODE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL); /* don't care what old mode was */
+}
+
+int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
+ struct ifla_vf_info *ivf)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_STATE_MODE_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_LINK_STATE_MODE_OUT_LEN);
+
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct ef10_vf *vf;
+ size_t outlen;
+ int rc;
+
+ if (vf_i >= efx->vf_count)
+ return -EINVAL;
+
+ if (!nic_data->vf)
+ return -EOPNOTSUPP;
+
+ vf = nic_data->vf + vf_i;
+
+ ivf->vf = vf_i;
+ ivf->min_tx_rate = 0;
+ ivf->max_tx_rate = 0;
+ ether_addr_copy(ivf->mac, vf->mac);
+ ivf->vlan = (vf->vlan == EFX_EF10_NO_VLAN) ? 0 : vf->vlan;
+ ivf->qos = 0;
+
+ MCDI_POPULATE_DWORD_2(inbuf, LINK_STATE_MODE_IN_FUNCTION,
+ LINK_STATE_MODE_IN_FUNCTION_PF,
+ nic_data->pf_index,
+ LINK_STATE_MODE_IN_FUNCTION_VF, vf_i);
+ MCDI_SET_DWORD(inbuf, LINK_STATE_MODE_IN_NEW_MODE,
+ MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE);
+ rc = efx_mcdi_rpc(efx, MC_CMD_LINK_STATE_MODE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_LINK_STATE_MODE_OUT_LEN)
+ return -EIO;
+ ivf->linkstate = MCDI_DWORD(outbuf, LINK_STATE_MODE_OUT_OLD_MODE);
+
+ return 0;
+}
+
+int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (!is_valid_ether_addr(nic_data->port_id))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = ETH_ALEN;
+ memcpy(ppid->id, nic_data->port_id, ppid->id_len);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
new file mode 100644
index 000000000..6d25b92cb
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -0,0 +1,75 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF10_SRIOV_H
+#define EF10_SRIOV_H
+
+#include "net_driver.h"
+
+/**
+ * struct ef10_vf - PF's store of VF data
+ * @efx: efx_nic struct for the current VF
+ * @pci_dev: the pci_dev struct for the VF, retained while the VF is assigned
+ * @vport_id: vport ID for the VF
+ * @vport_assigned: record whether the vport is currently assigned to the VF
+ * @mac: MAC address for the VF, zero when address is removed from the vport
+ * @vlan: Default VLAN for the VF or #EFX_EF10_NO_VLAN
+ */
+struct ef10_vf {
+ struct efx_nic *efx;
+ struct pci_dev *pci_dev;
+ unsigned int vport_id;
+ unsigned int vport_assigned;
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+#define EFX_EF10_NO_VLAN 0
+};
+
+static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx)
+{
+ return false;
+}
+
+int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs);
+int efx_ef10_sriov_init(struct efx_nic *efx);
+static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
+void efx_ef10_sriov_fini(struct efx_nic *efx);
+static inline void efx_ef10_sriov_flr(struct efx_nic *efx, unsigned vf_i) {}
+
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+
+int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
+ u16 vlan, u8 qos);
+
+int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
+ bool spoofchk);
+
+int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
+ struct ifla_vf_info *ivf);
+
+int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i,
+ int link_state);
+
+int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid);
+
+int efx_ef10_vswitching_probe_pf(struct efx_nic *efx);
+int efx_ef10_vswitching_probe_vf(struct efx_nic *efx);
+int efx_ef10_vswitching_restore_pf(struct efx_nic *efx);
+int efx_ef10_vswitching_restore_vf(struct efx_nic *efx);
+void efx_ef10_vswitching_remove_pf(struct efx_nic *efx);
+void efx_ef10_vswitching_remove_vf(struct efx_nic *efx);
+int efx_ef10_vport_add_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac);
+int efx_ef10_vport_del_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac);
+int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id);
+int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id);
+
+#endif /* EF10_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 4b00545a3..03bc03b67 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -26,6 +26,7 @@
#include "efx.h"
#include "nic.h"
#include "selftest.h"
+#include "sriov.h"
#include "mcdi.h"
#include "workarounds.h"
@@ -76,6 +77,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
[RESET_TYPE_WORLD] = "WORLD",
[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+ [RESET_TYPE_DATAPATH] = "DATAPATH",
[RESET_TYPE_MC_BIST] = "MC_BIST",
[RESET_TYPE_DISABLE] = "DISABLE",
[RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
@@ -243,11 +245,17 @@ static int efx_check_disabled(struct efx_nic *efx)
*/
static int efx_process_channel(struct efx_channel *channel, int budget)
{
+ struct efx_tx_queue *tx_queue;
int spent;
if (unlikely(!channel->enabled))
return 0;
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->pkts_compl = 0;
+ tx_queue->bytes_compl = 0;
+ }
+
spent = efx_nic_process_eventq(channel, budget);
if (spent && efx_channel_has_rx_queue(channel)) {
struct efx_rx_queue *rx_queue =
@@ -257,6 +265,14 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_fast_push_rx_descriptors(rx_queue, true);
}
+ /* Update BQL */
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->bytes_compl) {
+ netdev_tx_completed_queue(tx_queue->core_txq,
+ tx_queue->pkts_compl, tx_queue->bytes_compl);
+ }
+ }
+
return spent;
}
@@ -948,6 +964,16 @@ void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
static void efx_fini_port(struct efx_nic *efx);
+/* We assume that efx->type->reconfigure_mac will always try to sync RX
+ * filters and therefore needs to read-lock the filter table against freeing
+ */
+void efx_mac_reconfigure(struct efx_nic *efx)
+{
+ down_read(&efx->filter_sem);
+ efx->type->reconfigure_mac(efx);
+ up_read(&efx->filter_sem);
+}
+
/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
* the MAC appropriately. All other PHY configuration changes are pushed
* through phy_op->set_settings(), and pushed asynchronously to the MAC
@@ -1001,7 +1027,7 @@ static void efx_mac_work(struct work_struct *data)
mutex_lock(&efx->mac_lock);
if (efx->port_enabled)
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
}
@@ -1041,11 +1067,11 @@ static int efx_init_port(struct efx_nic *efx)
/* Reconfigure the MAC before creating dma queues (required for
* Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
/* Ensure the PHY advertises the correct flow control settings */
rc = efx->phy_op->reconfigure(efx);
- if (rc)
+ if (rc && rc != -EPERM)
goto fail2;
mutex_unlock(&efx->mac_lock);
@@ -1067,7 +1093,7 @@ static void efx_start_port(struct efx_nic *efx)
efx->port_enabled = true;
/* Ensure MAC ingress/egress is enabled */
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
}
@@ -1200,10 +1226,12 @@ static int efx_init_io(struct efx_nic *efx)
struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask;
unsigned int mem_map_size = efx->type->mem_map_size(efx);
- int rc;
+ int rc, bar;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
+ bar = efx->type->mem_bar;
+
rc = pci_enable_device(pci_dev);
if (rc) {
netif_err(efx, probe, efx->net_dev,
@@ -1234,8 +1262,8 @@ static int efx_init_io(struct efx_nic *efx)
netif_dbg(efx, probe, efx->net_dev,
"using DMA mask %llx\n", (unsigned long long) dma_mask);
- efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
- rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
+ efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
+ rc = pci_request_region(pci_dev, bar, "sfc");
if (rc) {
netif_err(efx, probe, efx->net_dev,
"request for memory BAR failed\n");
@@ -1258,7 +1286,7 @@ static int efx_init_io(struct efx_nic *efx)
return 0;
fail4:
- pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+ pci_release_region(efx->pci_dev, bar);
fail3:
efx->membase_phys = 0;
fail2:
@@ -1269,6 +1297,8 @@ static int efx_init_io(struct efx_nic *efx)
static void efx_fini_io(struct efx_nic *efx)
{
+ int bar;
+
netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
if (efx->membase) {
@@ -1277,11 +1307,23 @@ static void efx_fini_io(struct efx_nic *efx)
}
if (efx->membase_phys) {
- pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+ bar = efx->type->mem_bar;
+ pci_release_region(efx->pci_dev, bar);
efx->membase_phys = 0;
}
- pci_disable_device(efx->pci_dev);
+ /* Don't disable bus-mastering if VFs are assigned */
+ if (!pci_vfs_assigned(efx->pci_dev))
+ pci_disable_device(efx->pci_dev);
+}
+
+void efx_set_default_rx_indir_table(struct efx_nic *efx)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+ efx->rx_indir_table[i] =
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
}
static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
@@ -1304,7 +1346,7 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
if (!cpumask_test_cpu(cpu, thread_mask)) {
++count;
cpumask_or(thread_mask, thread_mask,
- topology_thread_cpumask(cpu));
+ topology_sibling_cpumask(cpu));
}
}
@@ -1314,15 +1356,19 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
/* If RSS is requested for the PF *and* VFs then we can't write RSS
* table entries that are inaccessible to VFs
*/
- if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
- count > efx_vf_size(efx)) {
- netif_warn(efx, probe, efx->net_dev,
- "Reducing number of RSS channels from %u to %u for "
- "VF support. Increase vf-msix-limit to use more "
- "channels on the PF.\n",
- count, efx_vf_size(efx));
- count = efx_vf_size(efx);
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->type->sriov_wanted) {
+ if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
+ count > efx_vf_size(efx)) {
+ netif_warn(efx, probe, efx->net_dev,
+ "Reducing number of RSS channels from %u to %u for "
+ "VF support. Increase vf-msix-limit to use more "
+ "channels on the PF.\n",
+ count, efx_vf_size(efx));
+ count = efx_vf_size(efx);
+ }
}
+#endif
return count;
}
@@ -1426,10 +1472,15 @@ static int efx_probe_interrupts(struct efx_nic *efx)
}
/* RSS might be usable on VFs even if it is disabled on the PF */
-
- efx->rss_spread = ((efx->n_rx_channels > 1 ||
- !efx->type->sriov_wanted(efx)) ?
- efx->n_rx_channels : efx_vf_size(efx));
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->type->sriov_wanted) {
+ efx->rss_spread = ((efx->n_rx_channels > 1 ||
+ !efx->type->sriov_wanted(efx)) ?
+ efx->n_rx_channels : efx_vf_size(efx));
+ return 0;
+ }
+#endif
+ efx->rss_spread = efx->n_rx_channels;
return 0;
}
@@ -1593,7 +1644,6 @@ static void efx_set_channels(struct efx_nic *efx)
static int efx_probe_nic(struct efx_nic *efx)
{
- size_t i;
int rc;
netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
@@ -1616,10 +1666,9 @@ static int efx_probe_nic(struct efx_nic *efx)
goto fail2;
if (efx->n_channels > 1)
- netdev_rss_key_fill(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
- for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
- efx->rx_indir_table[i] =
- ethtool_rxfh_indir_default(i, efx->rss_spread);
+ netdev_rss_key_fill(&efx->rx_hash_key,
+ sizeof(efx->rx_hash_key));
+ efx_set_default_rx_indir_table(efx);
netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
@@ -1650,10 +1699,11 @@ static int efx_probe_filters(struct efx_nic *efx)
int rc;
spin_lock_init(&efx->filter_lock);
-
+ init_rwsem(&efx->filter_sem);
+ down_write(&efx->filter_sem);
rc = efx->type->filter_table_probe(efx);
if (rc)
- return rc;
+ goto out_unlock;
#ifdef CONFIG_RFS_ACCEL
if (efx->type->offload_features & NETIF_F_NTUPLE) {
@@ -1662,12 +1712,14 @@ static int efx_probe_filters(struct efx_nic *efx)
GFP_KERNEL);
if (!efx->rps_flow_id) {
efx->type->filter_table_remove(efx);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_unlock;
}
}
#endif
-
- return 0;
+out_unlock:
+ up_write(&efx->filter_sem);
+ return rc;
}
static void efx_remove_filters(struct efx_nic *efx)
@@ -1675,12 +1727,16 @@ static void efx_remove_filters(struct efx_nic *efx)
#ifdef CONFIG_RFS_ACCEL
kfree(efx->rps_flow_id);
#endif
+ down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx);
+ up_write(&efx->filter_sem);
}
static void efx_restore_filters(struct efx_nic *efx)
{
+ down_read(&efx->filter_sem);
efx->type->filter_table_restore(efx);
+ up_read(&efx->filter_sem);
}
/**************************************************************************
@@ -1712,21 +1768,33 @@ static int efx_probe_all(struct efx_nic *efx)
}
efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx->type->vswitching_probe(efx);
+ if (rc) /* not fatal; the PF will still work fine */
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to setup vswitching rc=%d;"
+ " VFs may not function\n", rc);
+#endif
+
rc = efx_probe_filters(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to create filter tables\n");
- goto fail3;
+ goto fail4;
}
rc = efx_probe_channels(efx);
if (rc)
- goto fail4;
+ goto fail5;
return 0;
- fail4:
+ fail5:
efx_remove_filters(efx);
+ fail4:
+#ifdef CONFIG_SFC_SRIOV
+ efx->type->vswitching_remove(efx);
+#endif
fail3:
efx_remove_port(efx);
fail2:
@@ -1816,6 +1884,9 @@ static void efx_remove_all(struct efx_nic *efx)
{
efx_remove_channels(efx);
efx_remove_filters(efx);
+#ifdef CONFIG_SFC_SRIOV
+ efx->type->vswitching_remove(efx);
+#endif
efx_remove_port(efx);
efx_remove_nic(efx);
}
@@ -2059,7 +2130,7 @@ static int efx_busy_poll(struct napi_struct *napi)
*************************************************************************/
/* Context: process, rtnl_lock() held. */
-static int efx_net_open(struct net_device *net_dev)
+int efx_net_open(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
@@ -2088,7 +2159,7 @@ static int efx_net_open(struct net_device *net_dev)
* Note that the kernel will ignore our return code; this method
* should really be a void.
*/
-static int efx_net_stop(struct net_device *net_dev)
+int efx_net_stop(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -2146,7 +2217,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
mutex_lock(&efx->mac_lock);
net_dev->mtu = new_mtu;
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
@@ -2159,6 +2230,8 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
struct efx_nic *efx = netdev_priv(net_dev);
struct sockaddr *addr = data;
u8 *new_addr = addr->sa_data;
+ u8 old_addr[6];
+ int rc;
if (!is_valid_ether_addr(new_addr)) {
netif_err(efx, drv, efx->net_dev,
@@ -2167,12 +2240,20 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
return -EADDRNOTAVAIL;
}
+ /* save old address */
+ ether_addr_copy(old_addr, net_dev->dev_addr);
ether_addr_copy(net_dev->dev_addr, new_addr);
- efx->type->sriov_mac_address_changed(efx);
+ if (efx->type->set_mac_address) {
+ rc = efx->type->set_mac_address(efx);
+ if (rc) {
+ ether_addr_copy(net_dev->dev_addr, old_addr);
+ return rc;
+ }
+ }
/* Reconfigure the MAC */
mutex_lock(&efx->mac_lock);
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
return 0;
@@ -2199,7 +2280,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
return 0;
}
-static const struct net_device_ops efx_farch_netdev_ops = {
+static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
.ndo_get_stats64 = efx_net_stats,
@@ -2212,10 +2293,12 @@ static const struct net_device_ops efx_farch_netdev_ops = {
.ndo_set_rx_mode = efx_set_rx_mode,
.ndo_set_features = efx_set_features,
#ifdef CONFIG_SFC_SRIOV
- .ndo_set_vf_mac = efx_siena_sriov_set_vf_mac,
- .ndo_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
- .ndo_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
- .ndo_get_vf_config = efx_siena_sriov_get_vf_config,
+ .ndo_set_vf_mac = efx_sriov_set_vf_mac,
+ .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
+ .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
+ .ndo_get_vf_config = efx_sriov_get_vf_config,
+ .ndo_set_vf_link_state = efx_sriov_set_vf_link_state,
+ .ndo_get_phys_port_id = efx_sriov_get_phys_port_id,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
@@ -2229,29 +2312,6 @@ static const struct net_device_ops efx_farch_netdev_ops = {
#endif
};
-static const struct net_device_ops efx_ef10_netdev_ops = {
- .ndo_open = efx_net_open,
- .ndo_stop = efx_net_stop,
- .ndo_get_stats64 = efx_net_stats,
- .ndo_tx_timeout = efx_watchdog,
- .ndo_start_xmit = efx_hard_start_xmit,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = efx_ioctl,
- .ndo_change_mtu = efx_change_mtu,
- .ndo_set_mac_address = efx_set_mac_address,
- .ndo_set_rx_mode = efx_set_rx_mode,
- .ndo_set_features = efx_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = efx_netpoll,
-#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = efx_busy_poll,
-#endif
-#ifdef CONFIG_RFS_ACCEL
- .ndo_rx_flow_steer = efx_filter_rfs,
-#endif
-};
-
static void efx_update_name(struct efx_nic *efx)
{
strcpy(efx->name, efx->net_dev->name);
@@ -2264,8 +2324,7 @@ static int efx_netdev_event(struct notifier_block *this,
{
struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
- if ((net_dev->netdev_ops == &efx_farch_netdev_ops ||
- net_dev->netdev_ops == &efx_ef10_netdev_ops) &&
+ if ((net_dev->netdev_ops == &efx_netdev_ops) &&
event == NETDEV_CHANGENAME)
efx_update_name(netdev_priv(net_dev));
@@ -2284,6 +2343,28 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
+}
+static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ bool enable = count > 0 && *buf != '0';
+
+ mcdi->logging_enabled = enable;
+ return count;
+}
+static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
+#endif
+
static int efx_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
@@ -2292,12 +2373,9 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
- if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
- net_dev->netdev_ops = &efx_ef10_netdev_ops;
+ net_dev->netdev_ops = &efx_netdev_ops;
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
net_dev->priv_flags |= IFF_UNICAST_FLT;
- } else {
- net_dev->netdev_ops = &efx_farch_netdev_ops;
- }
net_dev->ethtool_ops = &efx_ethtool_ops;
net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
@@ -2344,9 +2422,21 @@ static int efx_register_netdev(struct efx_nic *efx)
"failed to init net dev attributes\n");
goto fail_registered;
}
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to init net dev attributes\n");
+ goto fail_attr_mcdi_logging;
+ }
+#endif
return 0;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+fail_attr_mcdi_logging:
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+#endif
fail_registered:
rtnl_lock();
efx_dissociate(efx);
@@ -2365,13 +2455,14 @@ static void efx_unregister_netdev(struct efx_nic *efx)
BUG_ON(netdev_priv(efx->net_dev) != efx);
- strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
- device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
-
- rtnl_lock();
- unregister_netdevice(efx->net_dev);
- efx->state = STATE_UNINIT;
- rtnl_unlock();
+ if (efx_dev_registered(efx)) {
+ strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+#endif
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+ unregister_netdev(efx->net_dev);
+ }
}
/**************************************************************************
@@ -2393,7 +2484,8 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
efx_disable_interrupts(efx);
mutex_lock(&efx->mac_lock);
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH)
efx->phy_op->fini(efx);
efx->type->fini(efx);
}
@@ -2422,11 +2514,13 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
if (!ok)
goto fail;
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH) {
rc = efx->phy_op->init(efx);
if (rc)
goto fail;
- if (efx->phy_op->reconfigure(efx))
+ rc = efx->phy_op->reconfigure(efx);
+ if (rc && rc != -EPERM)
netif_err(efx, drv, efx->net_dev,
"could not restore PHY settings\n");
}
@@ -2434,8 +2528,20 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
rc = efx_enable_interrupts(efx);
if (rc)
goto fail;
+
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx->type->vswitching_restore(efx);
+ if (rc) /* not fatal; the PF will still work fine */
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to restore vswitching rc=%d;"
+ " VFs may not function\n", rc);
+#endif
+
+ down_read(&efx->filter_sem);
efx_restore_filters(efx);
- efx->type->sriov_reset(efx);
+ up_read(&efx->filter_sem);
+ if (efx->type->sriov_reset)
+ efx->type->sriov_reset(efx);
mutex_unlock(&efx->mac_lock);
@@ -2605,6 +2711,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE:
case RESET_TYPE_RECOVER_OR_DISABLE:
+ case RESET_TYPE_DATAPATH:
case RESET_TYPE_MC_BIST:
case RESET_TYPE_MCDI_TIMEOUT:
method = type;
@@ -2655,6 +2762,8 @@ static const struct pci_device_id efx_pci_table[] = {
.driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */
+ .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{0} /* end of list */
@@ -2809,7 +2918,8 @@ static void efx_pci_remove_main(struct efx_nic *efx)
}
/* Final NIC shutdown
- * This is called only at module unload (or hotplug removal).
+ * This is called only at module unload (or hotplug removal). A PF can call
+ * this on its VFs to ensure they are unbound first.
*/
static void efx_pci_remove(struct pci_dev *pci_dev)
{
@@ -2824,9 +2934,12 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_dissociate(efx);
dev_close(efx->net_dev);
efx_disable_interrupts(efx);
+ efx->state = STATE_UNINIT;
rtnl_unlock();
- efx->type->sriov_fini(efx);
+ if (efx->type->sriov_fini)
+ efx->type->sriov_fini(efx);
+
efx_unregister_netdev(efx);
efx_mtd_remove(efx);
@@ -3008,7 +3121,8 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_info(efx, probe, efx->net_dev,
"Solarflare NIC detected\n");
- efx_probe_vpd_strings(efx);
+ if (!efx->type->is_vf)
+ efx_probe_vpd_strings(efx);
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
@@ -3023,10 +3137,12 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
if (rc)
goto fail4;
- rc = efx->type->sriov_init(efx);
- if (rc)
- netif_err(efx, probe, efx->net_dev,
- "SR-IOV can't be enabled rc %d\n", rc);
+ if (efx->type->sriov_init) {
+ rc = efx->type->sriov_init(efx);
+ if (rc)
+ netif_err(efx, probe, efx->net_dev,
+ "SR-IOV can't be enabled rc %d\n", rc);
+ }
netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
@@ -3058,6 +3174,26 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
return rc;
}
+/* efx_pci_sriov_configure returns the actual number of Virtual Functions
+ * enabled on success
+ */
+#ifdef CONFIG_SFC_SRIOV
+static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ int rc;
+ struct efx_nic *efx = pci_get_drvdata(dev);
+
+ if (efx->type->sriov_configure) {
+ rc = efx->type->sriov_configure(efx, num_vfs);
+ if (rc)
+ return rc;
+ else
+ return num_vfs;
+ } else
+ return -EOPNOTSUPP;
+}
+#endif
+
static int efx_pm_freeze(struct device *dev)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
@@ -3280,6 +3416,9 @@ static struct pci_driver efx_pci_driver = {
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
.err_handler = &efx_err_handlers,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = efx_pci_sriov_configure,
+#endif
};
/**************************************************************************
@@ -3302,9 +3441,11 @@ static int __init efx_init_module(void)
if (rc)
goto err_notifier;
+#ifdef CONFIG_SFC_SRIOV
rc = efx_init_sriov();
if (rc)
goto err_sriov;
+#endif
reset_workqueue = create_singlethread_workqueue("sfc_reset");
if (!reset_workqueue) {
@@ -3321,8 +3462,10 @@ static int __init efx_init_module(void)
err_pci:
destroy_workqueue(reset_workqueue);
err_reset:
+#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
err_sriov:
+#endif
unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier:
return rc;
@@ -3334,7 +3477,9 @@ static void __exit efx_exit_module(void)
pci_unregister_driver(&efx_pci_driver);
destroy_workqueue(reset_workqueue);
+#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
+#endif
unregister_netdevice_notifier(&efx_netdev_notifier);
}
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 2587c582a..acb1e0718 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -15,7 +15,12 @@
#include "filter.h"
/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
+/* All VFs use BAR 0/1 for memory */
#define EFX_MEM_BAR 2
+#define EFX_MEM_VF_BAR 0
+
+int efx_net_open(struct net_device *net_dev);
+int efx_net_stop(struct net_device *net_dev);
/* TX */
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
@@ -32,6 +37,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
extern unsigned int efx_piobuf_size;
/* RX */
+void efx_set_default_rx_indir_table(struct efx_nic *efx);
void efx_rx_config_page_split(struct efx_nic *efx);
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
@@ -71,6 +77,8 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
/* Filters */
+void efx_mac_reconfigure(struct efx_nic *efx);
+
/**
* efx_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
@@ -220,6 +228,13 @@ static inline void efx_mtd_rename(struct efx_nic *efx) {}
static inline void efx_mtd_remove(struct efx_nic *efx) {}
#endif
+#ifdef CONFIG_SFC_SRIOV
+static inline unsigned int efx_vf_size(struct efx_nic *efx)
+{
+ return 1 << efx->vi_scale;
+}
+#endif
+
static inline void efx_schedule_channel(struct efx_channel *channel)
{
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index d1dbb5fb3..c94f56271 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -143,6 +143,7 @@ enum efx_loopback_mode {
* @RESET_TYPE_WORLD: Reset as much as possible
* @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
* unsuccessful.
+ * @RESET_TYPE_DATAPATH: Reset datapath only.
* @RESET_TYPE_MC_BIST: MC entering BIST mode.
* @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
@@ -159,6 +160,7 @@ enum reset_type {
RESET_TYPE_ALL,
RESET_TYPE_WORLD,
RESET_TYPE_RECOVER_OR_DISABLE,
+ RESET_TYPE_DATAPATH,
RESET_TYPE_MC_BIST,
RESET_TYPE_DISABLE,
RESET_TYPE_MAX_METHOD,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 4835bc0d0..034797661 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -734,7 +734,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
/* Reconfigure the MAC. The PHY *may* generate a link state change event
* if the user just changed the advertised capabilities, but there's no
* harm doing this twice */
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
out:
mutex_unlock(&efx->mac_lock);
@@ -1109,9 +1109,8 @@ static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
return -EOPNOTSUPP;
if (!indir)
return 0;
- memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
- efx->type->rx_push_rss_config(efx);
- return 0;
+
+ return efx->type->rx_push_rss_config(efx, true, indir);
}
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index f166c8ef3..80e69af21 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -477,16 +477,29 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
*
**************************************************************************
*/
+static int dummy_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table)
+{
+ (void) efx;
+ (void) user;
+ (void) rx_indir_table;
+ return -ENOSYS;
+}
-static void falcon_b0_rx_push_rss_config(struct efx_nic *efx)
+static int falcon_b0_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table)
{
efx_oword_t temp;
+ (void) user;
/* Set hash key for IPv4 */
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+ memcpy(efx->rx_indir_table, rx_indir_table,
+ sizeof(efx->rx_indir_table));
efx_farch_rx_push_indir_table(efx);
+ return 0;
}
/**************************************************************************
@@ -2507,7 +2520,7 @@ static int falcon_init_nic(struct efx_nic *efx)
falcon_init_rx_cfg(efx);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- falcon_b0_rx_push_rss_config(efx);
+ falcon_b0_rx_push_rss_config(efx, false, efx->rx_indir_table);
/* Set destination of both TX and RX Flush events */
EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
@@ -2687,6 +2700,8 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
*/
const struct efx_nic_type falcon_a1_nic_type = {
+ .is_vf = false,
+ .mem_bar = EFX_MEM_BAR,
.mem_map_size = falcon_a1_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
@@ -2729,7 +2744,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
- .rx_push_rss_config = efx_port_dummy_op_void,
+ .rx_push_rss_config = dummy_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
@@ -2766,11 +2781,6 @@ const struct efx_nic_type falcon_a1_nic_type = {
.mtd_write = falcon_mtd_write,
.mtd_sync = falcon_mtd_sync,
#endif
- .sriov_init = efx_falcon_sriov_init,
- .sriov_fini = efx_falcon_sriov_fini,
- .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
- .sriov_wanted = efx_falcon_sriov_wanted,
- .sriov_reset = efx_falcon_sriov_reset,
.revision = EFX_REV_FALCON_A1,
.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
@@ -2788,6 +2798,8 @@ const struct efx_nic_type falcon_a1_nic_type = {
};
const struct efx_nic_type falcon_b0_nic_type = {
+ .is_vf = false,
+ .mem_bar = EFX_MEM_BAR,
.mem_map_size = falcon_b0_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
@@ -2867,11 +2879,6 @@ const struct efx_nic_type falcon_b0_nic_type = {
.mtd_write = falcon_mtd_write,
.mtd_sync = falcon_mtd_sync,
#endif
- .sriov_init = efx_falcon_sriov_init,
- .sriov_fini = efx_falcon_sriov_fini,
- .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
- .sriov_wanted = efx_falcon_sriov_wanted,
- .sriov_reset = efx_falcon_sriov_reset,
.revision = EFX_REV_FALCON_B0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index bb89e96a1..f08266f0e 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -20,6 +20,8 @@
#include "efx.h"
#include "nic.h"
#include "farch_regs.h"
+#include "sriov.h"
+#include "siena_sriov.h"
#include "io.h"
#include "workarounds.h"
@@ -1198,13 +1200,17 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_tx_flush_done(efx, event);
+#ifdef CONFIG_SFC_SRIOV
efx_siena_sriov_tx_flush_done(efx, event);
+#endif
break;
case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_rx_flush_done(efx, event);
+#ifdef CONFIG_SFC_SRIOV
efx_siena_sriov_rx_flush_done(efx, event);
+#endif
break;
case FSE_AZ_EVQ_INIT_DONE_EV:
netif_dbg(efx, hw, efx->net_dev,
@@ -1242,8 +1248,11 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
" RX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
- } else
+ }
+#ifdef CONFIG_SFC_SRIOV
+ else
efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
+#endif
break;
case FSE_BZ_TX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) {
@@ -1252,8 +1261,11 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
" TX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
- } else
+ }
+#ifdef CONFIG_SFC_SRIOV
+ else
efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
+#endif
break;
default:
netif_vdbg(efx, hw, efx->net_dev,
@@ -1317,9 +1329,11 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
case FSE_AZ_EV_CODE_DRIVER_EV:
efx_farch_handle_driver_event(channel, &event);
break;
+#ifdef CONFIG_SFC_SRIOV
case FSE_CZ_EV_CODE_USER_EV:
efx_siena_sriov_event(channel, &event);
break;
+#endif
case FSE_CZ_EV_CODE_MCDI_EV:
efx_mcdi_process_event(channel, &event);
break;
@@ -1685,28 +1699,32 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
#ifdef CONFIG_SFC_SRIOV
- if (efx->type->sriov_wanted(efx)) {
- unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
-
- nic_data->vf_buftbl_base = buftbl_min;
-
- vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
- vi_count = max(vi_count, EFX_VI_BASE);
- buftbl_free = (sram_lim_qw - buftbl_min -
- vi_count * vi_dc_entries);
-
- entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
- efx_vf_size(efx));
- vf_limit = min(buftbl_free / entries_per_vf,
- (1024U - EFX_VI_BASE) >> efx->vi_scale);
-
- if (efx->vf_count > vf_limit) {
- netif_err(efx, probe, efx->net_dev,
- "Reducing VF count from from %d to %d\n",
- efx->vf_count, vf_limit);
- efx->vf_count = vf_limit;
+ if (efx->type->sriov_wanted) {
+ if (efx->type->sriov_wanted(efx)) {
+ unsigned vi_dc_entries, buftbl_free;
+ unsigned entries_per_vf, vf_limit;
+
+ nic_data->vf_buftbl_base = buftbl_min;
+
+ vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
+ vi_count = max(vi_count, EFX_VI_BASE);
+ buftbl_free = (sram_lim_qw - buftbl_min -
+ vi_count * vi_dc_entries);
+
+ entries_per_vf = ((vi_dc_entries +
+ EFX_VF_BUFTBL_PER_VI) *
+ efx_vf_size(efx));
+ vf_limit = min(buftbl_free / entries_per_vf,
+ (1024U - EFX_VI_BASE) >> efx->vi_scale);
+
+ if (efx->vf_count > vf_limit) {
+ netif_err(efx, probe, efx->net_dev,
+ "Reducing VF count from from %d to %d\n",
+ efx->vf_count, vf_limit);
+ efx->vf_count = vf_limit;
+ }
+ vi_count += efx->vf_count * efx_vf_size(efx);
}
- vi_count += efx->vf_count * efx_vf_size(efx);
}
#endif
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index d37928f01..81640f8bb 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -8,6 +8,7 @@
*/
#include <linux/delay.h>
+#include <linux/moduleparam.h>
#include <asm/cmpxchg.h>
#include "net_driver.h"
#include "nic.h"
@@ -54,18 +55,32 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
static bool efx_mcdi_poll_once(struct efx_nic *efx);
static void efx_mcdi_abandon(struct efx_nic *efx);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static bool mcdi_logging_default;
+module_param(mcdi_logging_default, bool, 0644);
+MODULE_PARM_DESC(mcdi_logging_default,
+ "Enable MCDI logging on newly-probed functions");
+#endif
+
int efx_mcdi_init(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
bool already_attached;
- int rc;
+ int rc = -ENOMEM;
efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
if (!efx->mcdi)
- return -ENOMEM;
+ goto fail;
mcdi = efx_mcdi(efx);
mcdi->efx = efx;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ /* consuming code assumes buffer is page-sized */
+ mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL);
+ if (!mcdi->logging_buffer)
+ goto fail1;
+ mcdi->logging_enabled = mcdi_logging_default;
+#endif
init_waitqueue_head(&mcdi->wq);
spin_lock_init(&mcdi->iface_lock);
mcdi->state = MCDI_STATE_QUIESCENT;
@@ -81,7 +96,7 @@ int efx_mcdi_init(struct efx_nic *efx)
/* Recover from a failed assertion before probing */
rc = efx_mcdi_handle_assertion(efx);
if (rc)
- return rc;
+ goto fail2;
/* Let the MC (and BMC, if this is a LOM) know that the driver
* is loaded. We should do this before we reset the NIC.
@@ -90,7 +105,7 @@ int efx_mcdi_init(struct efx_nic *efx)
if (rc) {
netif_err(efx, probe, efx->net_dev,
"Unable to register driver with MCPU\n");
- return rc;
+ goto fail2;
}
if (already_attached)
/* Not a fatal error */
@@ -102,6 +117,15 @@ int efx_mcdi_init(struct efx_nic *efx)
efx->primary = efx;
return 0;
+fail2:
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ free_page((unsigned long)mcdi->logging_buffer);
+fail1:
+#endif
+ kfree(efx->mcdi);
+ efx->mcdi = NULL;
+fail:
+ return rc;
}
void efx_mcdi_fini(struct efx_nic *efx)
@@ -114,6 +138,10 @@ void efx_mcdi_fini(struct efx_nic *efx)
/* Relinquish the device (back to the BMC, if this is a LOM) */
efx_mcdi_drv_attach(efx, false, NULL);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ free_page((unsigned long)efx->mcdi->iface.logging_buffer);
+#endif
+
kfree(efx->mcdi);
}
@@ -121,6 +149,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
efx_dword_t hdr[2];
size_t hdr_len;
u32 xflags, seqno;
@@ -165,6 +196,31 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
hdr_len = 8;
}
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+ int bytes = 0;
+ int i;
+ /* Lengths should always be a whole number of dwords, so scream
+ * if they're not.
+ */
+ WARN_ON_ONCE(hdr_len % 4);
+ WARN_ON_ONCE(inlen % 4);
+
+ /* We own the logging buffer, as only one MCDI can be in
+ * progress on a NIC at any one time. So no need for locking.
+ */
+ for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(hdr[i].u32[0]));
+
+ for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(inbuf[i].u32[0]));
+
+ netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
+ }
+#endif
+
efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
mcdi->new_epoch = false;
@@ -206,6 +262,9 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned int respseq, respcmd, error;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
efx_dword_t hdr;
efx->type->mcdi_read_response(efx, &hdr, 0, 4);
@@ -223,6 +282,39 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
}
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+ size_t hdr_len, data_len;
+ int bytes = 0;
+ int i;
+
+ WARN_ON_ONCE(mcdi->resp_hdr_len % 4);
+ hdr_len = mcdi->resp_hdr_len / 4;
+ /* MCDI_DECLARE_BUF ensures that underlying buffer is padded
+ * to dword size, and the MCDI buffer is always dword size
+ */
+ data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4);
+
+ /* We own the logging buffer, as only one MCDI can be in
+ * progress on a NIC at any one time. So no need for locking.
+ */
+ for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
+ efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(hdr.u32[0]));
+ }
+
+ for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
+ efx->type->mcdi_read_response(efx, &hdr,
+ mcdi->resp_hdr_len + (i * 4), 4);
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(hdr.u32[0]));
+ }
+
+ netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
+ }
+#endif
+
if (error && mcdi->resp_data_len == 0) {
netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
mcdi->resprc = -EIO;
@@ -406,7 +498,7 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
struct efx_mcdi_async_param *async;
size_t hdr_len, data_len, err_len;
efx_dword_t *outbuf;
- MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+ MCDI_DECLARE_BUF_ERR(errbuf);
int rc;
if (cmpxchg(&mcdi->state,
@@ -534,7 +626,7 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
size_t *outlen_actual, bool quiet)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+ MCDI_DECLARE_BUF_ERR(errbuf);
int rc;
if (mcdi->mode == MCDI_MODE_POLL)
@@ -1035,7 +1127,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
/* MAC stats are gather lazily. We can ignore this. */
break;
case MCDI_EVENT_CODE_FLR:
- efx_siena_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
+ if (efx->type->sriov_flr)
+ efx->type->sriov_flr(efx,
+ MCDI_EVENT_FIELD(*event, FLR_VF));
break;
case MCDI_EVENT_CODE_PTP_RX:
case MCDI_EVENT_CODE_PTP_FAULT:
@@ -1081,9 +1175,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
{
- MCDI_DECLARE_BUF(outbuf,
- max(MC_CMD_GET_VERSION_OUT_LEN,
- MC_CMD_GET_CAPABILITIES_OUT_LEN));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
size_t outlength;
const __le16 *ver_words;
size_t offset;
@@ -1108,19 +1200,11 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
* single version. Report which variants are running.
*/
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
- BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
- outbuf, sizeof(outbuf), &outlength);
- if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN)
- offset += snprintf(
- buf + offset, len - offset, " rx? tx?");
- else
- offset += snprintf(
- buf + offset, len - offset, " rx%x tx%x",
- MCDI_WORD(outbuf,
- GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID),
- MCDI_WORD(outbuf,
- GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID));
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ offset += snprintf(buf + offset, len - offset, " rx%x tx%x",
+ nic_data->rx_dpcpu_fw_id,
+ nic_data->tx_dpcpu_fw_id);
/* It's theoretically possible for the string to exceed 31
* characters, though in practice the first three version
@@ -1150,10 +1234,26 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
- rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ /* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID
+ * specified will fail with EPERM, and we have to tell the MC we don't
+ * care what firmware we get.
+ */
+ if (rc == -EPERM) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
+ MC_CMD_FW_DONT_CARE);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf),
+ &outlen);
+ }
+ if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf),
+ outbuf, outlen, rc);
goto fail;
+ }
if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
rc = -EIO;
goto fail;
@@ -1178,16 +1278,6 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
* and are completely trusted by firmware. Abort probing
* if that's not true for this function.
*/
- if (driver_operating &&
- (efx->mcdi->fn_flags &
- (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
- 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
- (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
- 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
- netif_err(efx, probe, efx->net_dev,
- "This driver version only supports one function per port\n");
- return -ENODEV;
- }
if (was_attached != NULL)
*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
@@ -1385,10 +1475,13 @@ fail1:
return rc;
}
+/* Returns 1 if an assertion was read, 0 if no assertion had fired,
+ * negative on error.
+ */
static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
- MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
unsigned int flags, index;
const char *reason;
size_t outlen;
@@ -1406,6 +1499,8 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
outbuf, sizeof(outbuf), &outlen);
+ if (rc == -EPERM)
+ return 0;
} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
if (rc) {
@@ -1443,24 +1538,31 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
index));
- return 0;
+ return 1;
}
-static void efx_mcdi_exit_assertion(struct efx_nic *efx)
+static int efx_mcdi_exit_assertion(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
+ int rc;
/* If the MC is running debug firmware, it might now be
* waiting for a debugger to attach, but we just want it to
* reboot. We set a flag that makes the command a no-op if it
- * has already done so. We don't know what return code to
- * expect (0 or -EIO), so ignore it.
+ * has already done so.
+ * The MCDI will thus return either 0 or -EIO.
*/
BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
- (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, NULL);
+ if (rc == -EIO)
+ rc = 0;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, rc);
+ return rc;
}
int efx_mcdi_handle_assertion(struct efx_nic *efx)
@@ -1468,12 +1570,10 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
int rc;
rc = efx_mcdi_read_assertion(efx);
- if (rc)
+ if (rc <= 0)
return rc;
- efx_mcdi_exit_assertion(efx);
-
- return 0;
+ return efx_mcdi_exit_assertion(efx);
}
void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
@@ -1550,7 +1650,9 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
if (rc)
return rc;
- if (method == RESET_TYPE_WORLD)
+ if (method == RESET_TYPE_DATAPATH)
+ return 0;
+ else if (method == RESET_TYPE_WORLD)
return efx_mcdi_reset_mc(efx);
else
return efx_mcdi_reset_func(efx);
@@ -1688,6 +1790,36 @@ int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
NULL, 0, NULL);
}
+int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
+ unsigned int *enabled_out)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ if (impl_out)
+ *impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED);
+
+ if (enabled_out)
+ *enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED);
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
#ifdef CONFIG_SFC_MTD
#define EFX_MCDI_NVRAM_LEN_MAX 128
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 56465f746..1838afe2d 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -58,6 +58,8 @@ enum efx_mcdi_mode {
* enabled
* @async_list: Queue of asynchronous requests
* @async_timer: Timer for asynchronous request timeout
+ * @logging_buffer: buffer that may be used to build MCDI tracing messages
+ * @logging_enabled: whether to trace MCDI
*/
struct efx_mcdi_iface {
struct efx_nic *efx;
@@ -74,6 +76,10 @@ struct efx_mcdi_iface {
spinlock_t async_lock;
struct list_head async_list;
struct timer_list async_timer;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ char *logging_buffer;
+ bool logging_enabled;
+#endif
};
struct efx_mcdi_mon {
@@ -176,10 +182,12 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
* 32-bit-aligned. Also, on Siena we must copy to the MC shared
* memory strictly 32 bits at a time, so add any necessary padding.
*/
-#define MCDI_DECLARE_BUF(_name, _len) \
+#define _MCDI_DECLARE_BUF(_name, _len) \
efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
-#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len) \
- MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8))
+#define MCDI_DECLARE_BUF(_name, _len) \
+ _MCDI_DECLARE_BUF(_name, _len) = {{{0}}}
+#define MCDI_DECLARE_BUF_ERR(_name) \
+ MCDI_DECLARE_BUF(_name, 8)
#define _MCDI_PTR(_buf, _offset) \
((u8 *)(_buf) + (_offset))
#define MCDI_PTR(_buf, _field) \
@@ -339,6 +347,8 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
+ unsigned int *enabled_out);
#ifdef CONFIG_SFC_MCDI_MON
int efx_mcdi_mon_probe(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index e028de10e..45fca9fc6 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -638,6 +638,8 @@
*/
#define MC_CMD_READ32 0x1
+#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_READ32_IN msgrequest */
#define MC_CMD_READ32_IN_LEN 8
#define MC_CMD_READ32_IN_ADDR_OFST 0
@@ -659,6 +661,8 @@
*/
#define MC_CMD_WRITE32 0x2
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_WRITE32_IN msgrequest */
#define MC_CMD_WRITE32_IN_LENMIN 8
#define MC_CMD_WRITE32_IN_LENMAX 252
@@ -679,6 +683,8 @@
*/
#define MC_CMD_COPYCODE 0x3
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_COPYCODE_IN msgrequest */
#define MC_CMD_COPYCODE_IN_LEN 16
/* Source address */
@@ -717,6 +723,8 @@
*/
#define MC_CMD_SET_FUNC 0x4
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_FUNC_IN msgrequest */
#define MC_CMD_SET_FUNC_IN_LEN 4
/* Set function */
@@ -732,6 +740,8 @@
*/
#define MC_CMD_GET_BOOT_STATUS 0x5
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
@@ -758,6 +768,8 @@
*/
#define MC_CMD_GET_ASSERTS 0x6
+#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_ASSERTS_IN msgrequest */
#define MC_CMD_GET_ASSERTS_IN_LEN 4
/* Set to clear assertion */
@@ -794,6 +806,8 @@
*/
#define MC_CMD_LOG_CTRL 0x7
+#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_LOG_CTRL_IN msgrequest */
#define MC_CMD_LOG_CTRL_IN_LEN 8
/* Log destination */
@@ -814,6 +828,8 @@
*/
#define MC_CMD_GET_VERSION 0x8
+#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_VERSION_IN msgrequest */
#define MC_CMD_GET_VERSION_IN_LEN 0
@@ -870,6 +886,8 @@
*/
#define MC_CMD_PTP 0xb
+#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_PTP_IN msgrequest */
#define MC_CMD_PTP_IN_LEN 1
/* PTP operation code */
@@ -1404,6 +1422,8 @@
*/
#define MC_CMD_CSR_READ32 0xc
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_CSR_READ32_IN msgrequest */
#define MC_CMD_CSR_READ32_IN_LEN 12
/* Address */
@@ -1428,6 +1448,8 @@
*/
#define MC_CMD_CSR_WRITE32 0xd
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_CSR_WRITE32_IN msgrequest */
#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
@@ -1452,6 +1474,8 @@
*/
#define MC_CMD_HP 0x54
+#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_HP_IN msgrequest */
#define MC_CMD_HP_IN_LEN 16
/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
@@ -1493,6 +1517,8 @@
*/
#define MC_CMD_STACKINFO 0xf
+#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_STACKINFO_IN msgrequest */
#define MC_CMD_STACKINFO_IN_LEN 0
@@ -1513,6 +1539,8 @@
*/
#define MC_CMD_MDIO_READ 0x10
+#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_MDIO_READ_IN msgrequest */
#define MC_CMD_MDIO_READ_IN_LEN 16
/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
@@ -1552,6 +1580,8 @@
*/
#define MC_CMD_MDIO_WRITE 0x11
+#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_MDIO_WRITE_IN msgrequest */
#define MC_CMD_MDIO_WRITE_IN_LEN 20
/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
@@ -1591,6 +1621,8 @@
*/
#define MC_CMD_DBI_WRITE 0x12
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DBI_WRITE_IN msgrequest */
#define MC_CMD_DBI_WRITE_IN_LENMIN 12
#define MC_CMD_DBI_WRITE_IN_LENMAX 252
@@ -1739,6 +1771,8 @@
*/
#define MC_CMD_GET_BOARD_CFG 0x18
+#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_BOARD_CFG_IN msgrequest */
#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
@@ -1778,6 +1812,8 @@
*/
#define MC_CMD_DBI_READX 0x19
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DBI_READX_IN msgrequest */
#define MC_CMD_DBI_READX_IN_LENMIN 8
#define MC_CMD_DBI_READX_IN_LENMAX 248
@@ -1822,6 +1858,8 @@
*/
#define MC_CMD_SET_RAND_SEED 0x1a
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_RAND_SEED_IN msgrequest */
#define MC_CMD_SET_RAND_SEED_IN_LEN 16
/* Seed value. */
@@ -1863,6 +1901,8 @@
*/
#define MC_CMD_DRV_ATTACH 0x1c
+#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_DRV_ATTACH_IN msgrequest */
#define MC_CMD_DRV_ATTACH_IN_LEN 12
/* new state (0=detached, 1=attached) to set if UPDATE=1 */
@@ -1875,6 +1915,8 @@
#define MC_CMD_FW_FULL_FEATURED 0x0
/* enum: Prefer to use firmware with fewer features but lower latency */
#define MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Only this option is allowed for non-admin functions */
+#define MC_CMD_FW_DONT_CARE 0xffffffff
/* MC_CMD_DRV_ATTACH_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_OUT_LEN 4
@@ -1920,6 +1962,8 @@
*/
#define MC_CMD_PORT_RESET 0x20
+#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_PORT_RESET_IN msgrequest */
#define MC_CMD_PORT_RESET_IN_LEN 0
@@ -1934,6 +1978,7 @@
* extended version of the deprecated MC_CMD_PORT_RESET with added fields.
*/
#define MC_CMD_ENTITY_RESET 0x20
+/* MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */
/* MC_CMD_ENTITY_RESET_IN msgrequest */
#define MC_CMD_ENTITY_RESET_IN_LEN 4
@@ -2023,6 +2068,8 @@
*/
#define MC_CMD_PUTS 0x23
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_PUTS_IN msgrequest */
#define MC_CMD_PUTS_IN_LENMIN 13
#define MC_CMD_PUTS_IN_LENMAX 252
@@ -2050,6 +2097,8 @@
*/
#define MC_CMD_GET_PHY_CFG 0x24
+#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PHY_CFG_IN msgrequest */
#define MC_CMD_GET_PHY_CFG_IN_LEN 0
@@ -2149,6 +2198,8 @@
*/
#define MC_CMD_START_BIST 0x25
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_START_BIST_IN msgrequest */
#define MC_CMD_START_BIST_IN_LEN 4
/* Type of test. */
@@ -2185,6 +2236,8 @@
*/
#define MC_CMD_POLL_BIST 0x26
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_POLL_BIST_IN msgrequest */
#define MC_CMD_POLL_BIST_IN_LEN 0
@@ -2344,6 +2397,8 @@
*/
#define MC_CMD_GET_LOOPBACK_MODES 0x28
+#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
@@ -2463,6 +2518,8 @@
*/
#define MC_CMD_GET_LINK 0x29
+#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_LINK_IN msgrequest */
#define MC_CMD_GET_LINK_IN_LEN 0
@@ -2519,6 +2576,8 @@
*/
#define MC_CMD_SET_LINK 0x2a
+#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_SET_LINK_IN msgrequest */
#define MC_CMD_SET_LINK_IN_LEN 16
/* ??? */
@@ -2550,6 +2609,8 @@
*/
#define MC_CMD_SET_ID_LED 0x2b
+#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_SET_ID_LED_IN msgrequest */
#define MC_CMD_SET_ID_LED_IN_LEN 4
/* Set LED state. */
@@ -2568,6 +2629,8 @@
*/
#define MC_CMD_SET_MAC 0x2c
+#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_SET_MAC_IN msgrequest */
#define MC_CMD_SET_MAC_IN_LEN 24
/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
@@ -2609,6 +2672,8 @@
*/
#define MC_CMD_PHY_STATS 0x2d
+#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_PHY_STATS_IN msgrequest */
#define MC_CMD_PHY_STATS_IN_LEN 8
/* ??? */
@@ -2687,8 +2752,10 @@
*/
#define MC_CMD_MAC_STATS 0x2e
+#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_MAC_STATS_IN msgrequest */
-#define MC_CMD_MAC_STATS_IN_LEN 16
+#define MC_CMD_MAC_STATS_IN_LEN 20
/* ??? */
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
@@ -2710,6 +2777,8 @@
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -2824,11 +2893,31 @@
/* enum: RXDP counter: Number of times an emergency descriptor fetch was
* performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47
+#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
/* enum: RXDP counter: Number of times the DPCPU waited for an existing
* descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */
/* enum: Start of GMAC stats buffer space, for Siena only. */
#define MC_CMD_GMAC_DMABUF_START 0x40
/* enum: End of GMAC stats buffer space, for Siena only. */
@@ -2926,6 +3015,8 @@
*/
#define MC_CMD_WOL_FILTER_SET 0x32
+#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
@@ -3020,6 +3111,8 @@
*/
#define MC_CMD_WOL_FILTER_REMOVE 0x33
+#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
@@ -3035,6 +3128,8 @@
*/
#define MC_CMD_WOL_FILTER_RESET 0x34
+#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
@@ -3069,6 +3164,8 @@
*/
#define MC_CMD_NVRAM_TYPES 0x36
+#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_TYPES_IN msgrequest */
#define MC_CMD_NVRAM_TYPES_IN_LEN 0
@@ -3125,6 +3222,8 @@
*/
#define MC_CMD_NVRAM_INFO 0x37
+#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_INFO_IN msgrequest */
#define MC_CMD_NVRAM_INFO_IN_LEN 4
#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
@@ -3157,6 +3256,8 @@
*/
#define MC_CMD_NVRAM_UPDATE_START 0x38
+#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest */
#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
@@ -3175,6 +3276,8 @@
*/
#define MC_CMD_NVRAM_READ 0x39
+#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_READ_IN msgrequest */
#define MC_CMD_NVRAM_READ_IN_LEN 12
#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
@@ -3202,6 +3305,8 @@
*/
#define MC_CMD_NVRAM_WRITE 0x3a
+#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_WRITE_IN msgrequest */
#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
@@ -3228,6 +3333,8 @@
*/
#define MC_CMD_NVRAM_ERASE 0x3b
+#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_ERASE_IN msgrequest */
#define MC_CMD_NVRAM_ERASE_IN_LEN 12
#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
@@ -3248,6 +3355,8 @@
*/
#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
+#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest */
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
@@ -3279,6 +3388,8 @@
*/
#define MC_CMD_REBOOT 0x3d
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_REBOOT_IN msgrequest */
#define MC_CMD_REBOOT_IN_LEN 4
#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
@@ -3316,6 +3427,8 @@
*/
#define MC_CMD_REBOOT_MODE 0x3f
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_REBOOT_MODE_IN msgrequest */
#define MC_CMD_REBOOT_MODE_IN_LEN 4
#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
@@ -3368,6 +3481,8 @@
*/
#define MC_CMD_SENSOR_INFO 0x41
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SENSOR_INFO_IN msgrequest */
#define MC_CMD_SENSOR_INFO_IN_LEN 0
@@ -3542,6 +3657,8 @@
*/
#define MC_CMD_READ_SENSORS 0x42
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_READ_SENSORS_IN msgrequest */
#define MC_CMD_READ_SENSORS_IN_LEN 8
/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
@@ -3602,6 +3719,8 @@
*/
#define MC_CMD_GET_PHY_STATE 0x43
+#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PHY_STATE_IN msgrequest */
#define MC_CMD_GET_PHY_STATE_IN_LEN 0
@@ -3636,6 +3755,8 @@
*/
#define MC_CMD_WOL_FILTER_GET 0x45
+#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_WOL_FILTER_GET_IN msgrequest */
#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
@@ -3651,6 +3772,8 @@
*/
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
@@ -3692,6 +3815,8 @@
*/
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
+#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
@@ -3722,6 +3847,8 @@
*/
#define MC_CMD_TESTASSERT 0x49
+#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_TESTASSERT_IN msgrequest */
#define MC_CMD_TESTASSERT_IN_LEN 0
@@ -3739,6 +3866,8 @@
*/
#define MC_CMD_WORKAROUND 0x4a
+#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_WORKAROUND_IN msgrequest */
#define MC_CMD_WORKAROUND_IN_LEN 8
#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
@@ -3765,6 +3894,8 @@
*/
#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
@@ -3788,6 +3919,8 @@
*/
#define MC_CMD_NVRAM_TEST 0x4c
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_TEST_IN msgrequest */
#define MC_CMD_NVRAM_TEST_IN_LEN 4
#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
@@ -3849,6 +3982,8 @@
*/
#define MC_CMD_SENSOR_SET_LIMS 0x4e
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
@@ -3890,6 +4025,8 @@
*/
#define MC_CMD_NVRAM_PARTITIONS 0x51
+#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
@@ -3913,6 +4050,8 @@
*/
#define MC_CMD_NVRAM_METADATA 0x52
+#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_METADATA_IN msgrequest */
#define MC_CMD_NVRAM_METADATA_IN_LEN 4
/* Partition type ID code */
@@ -3958,6 +4097,8 @@
*/
#define MC_CMD_GET_MAC_ADDRESSES 0x55
+#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
@@ -4087,11 +4228,66 @@
/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+/* enum: Bug 17230 work around. */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+
+
+/***********************************/
/* MC_CMD_READ_REGS
* Get a dump of the MCPU registers
*/
#define MC_CMD_READ_REGS 0x50
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_READ_REGS_IN msgrequest */
#define MC_CMD_READ_REGS_IN_LEN 0
@@ -4115,6 +4311,8 @@
*/
#define MC_CMD_INIT_EVQ 0x80
+#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_INIT_EVQ_IN msgrequest */
#define MC_CMD_INIT_EVQ_IN_LENMIN 44
#define MC_CMD_INIT_EVQ_IN_LENMAX 548
@@ -4213,6 +4411,8 @@
*/
#define MC_CMD_INIT_RXQ 0x81
+#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_INIT_RXQ_IN msgrequest */
#define MC_CMD_INIT_RXQ_IN_LENMIN 36
#define MC_CMD_INIT_RXQ_IN_LENMAX 252
@@ -4265,6 +4465,8 @@
*/
#define MC_CMD_INIT_TXQ 0x82
+#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_INIT_TXQ_IN msgrequest */
#define MC_CMD_INIT_TXQ_IN_LENMIN 36
#define MC_CMD_INIT_TXQ_IN_LENMAX 252
@@ -4322,6 +4524,8 @@
*/
#define MC_CMD_FINI_EVQ 0x83
+#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FINI_EVQ_IN msgrequest */
#define MC_CMD_FINI_EVQ_IN_LEN 4
/* Instance of EVQ to destroy. Should be the same instance as that previously
@@ -4339,6 +4543,8 @@
*/
#define MC_CMD_FINI_RXQ 0x84
+#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FINI_RXQ_IN msgrequest */
#define MC_CMD_FINI_RXQ_IN_LEN 4
/* Instance of RXQ to destroy */
@@ -4354,6 +4560,8 @@
*/
#define MC_CMD_FINI_TXQ 0x85
+#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FINI_TXQ_IN msgrequest */
#define MC_CMD_FINI_TXQ_IN_LEN 4
/* Instance of TXQ to destroy */
@@ -4369,6 +4577,8 @@
*/
#define MC_CMD_DRIVER_EVENT 0x86
+#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_DRIVER_EVENT_IN msgrequest */
#define MC_CMD_DRIVER_EVENT_IN_LEN 12
/* Handle of target EVQ */
@@ -4392,6 +4602,8 @@
*/
#define MC_CMD_PROXY_CMD 0x5b
+#define MC_CMD_0x5b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_PROXY_CMD_IN msgrequest */
#define MC_CMD_PROXY_CMD_IN_LEN 4
/* The handle of the target function. */
@@ -4414,6 +4626,8 @@
*/
#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
/* Owner ID to use */
@@ -4437,6 +4651,8 @@
*/
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
@@ -4463,6 +4679,8 @@
*/
#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
@@ -4477,6 +4695,8 @@
*/
#define MC_CMD_FILTER_OP 0x8a
+#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FILTER_OP_IN msgrequest */
#define MC_CMD_FILTER_OP_IN_LEN 108
/* identifies the type of operation requested */
@@ -4637,6 +4857,8 @@
*/
#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
/* identifies the type of operation requested */
@@ -4669,6 +4891,8 @@
*/
#define MC_CMD_PARSER_DISP_RW 0xe5
+#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
/* identifies the target of the operation */
@@ -4719,6 +4943,8 @@
*/
#define MC_CMD_GET_PF_COUNT 0xb6
+#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PF_COUNT_IN msgrequest */
#define MC_CMD_GET_PF_COUNT_IN_LEN 0
@@ -4750,6 +4976,8 @@
*/
#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
@@ -4765,6 +4993,8 @@
*/
#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
/* Identifies the port assignment for this function. */
@@ -4780,6 +5010,8 @@
*/
#define MC_CMD_ALLOC_VIS 0x8b
+#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_ALLOC_VIS_IN msgrequest */
#define MC_CMD_ALLOC_VIS_IN_LEN 8
/* The minimum number of VIs that is acceptable */
@@ -4804,6 +5036,8 @@
*/
#define MC_CMD_FREE_VIS 0x8c
+#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FREE_VIS_IN msgrequest */
#define MC_CMD_FREE_VIS_IN_LEN 0
@@ -4817,6 +5051,8 @@
*/
#define MC_CMD_GET_SRIOV_CFG 0xba
+#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
@@ -4841,6 +5077,8 @@
*/
#define MC_CMD_SET_SRIOV_CFG 0xbb
+#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
/* Number of VFs currently enabled. */
@@ -4870,6 +5108,8 @@
*/
#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
@@ -4889,6 +5129,8 @@
*/
#define MC_CMD_DUMP_VI_STATE 0x8e
+#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
/* The VI number to query. */
@@ -4998,6 +5240,8 @@
*/
#define MC_CMD_ALLOC_PIOBUF 0x8f
+#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
@@ -5013,6 +5257,8 @@
*/
#define MC_CMD_FREE_PIOBUF 0x90
+#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_FREE_PIOBUF_IN msgrequest */
#define MC_CMD_FREE_PIOBUF_IN_LEN 4
/* Handle for allocated push I/O buffer. */
@@ -5028,6 +5274,8 @@
*/
#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
+#define MC_CMD_0xb0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
/* VI number to get information for. */
@@ -5062,6 +5310,8 @@
*/
#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
+#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
/* VI number to set information for. */
@@ -5096,6 +5346,8 @@
*/
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
+#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
@@ -5157,6 +5409,8 @@
*/
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
+#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
@@ -5203,6 +5457,8 @@
*/
#define MC_CMD_SATELLITE_DOWNLOAD 0x91
+#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
* are subtle, and so downloads must proceed in a number of phases.
*
@@ -5318,6 +5574,7 @@
*/
#define MC_CMD_GET_CAPABILITIES 0xbe
+#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
@@ -5343,6 +5600,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
/* RxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
@@ -5433,6 +5692,8 @@
*/
#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
+#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
@@ -5448,6 +5709,8 @@
*/
#define MC_CMD_TCM_BUCKET_FREE 0xb3
+#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
/* the bucket id */
@@ -5463,6 +5726,8 @@
*/
#define MC_CMD_TCM_BUCKET_INIT 0xb4
+#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
/* the bucket id */
@@ -5480,6 +5745,8 @@
*/
#define MC_CMD_TCM_TXQ_INIT 0xb5
+#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
/* the txq id */
@@ -5511,6 +5778,8 @@
*/
#define MC_CMD_LINK_PIOBUF 0x92
+#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_LINK_PIOBUF_IN msgrequest */
#define MC_CMD_LINK_PIOBUF_IN_LEN 8
/* Handle for allocated push I/O buffer. */
@@ -5528,6 +5797,8 @@
*/
#define MC_CMD_UNLINK_PIOBUF 0x93
+#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
/* Function Local Instance (VI) number. */
@@ -5543,6 +5814,8 @@
*/
#define MC_CMD_VSWITCH_ALLOC 0x94
+#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
/* The port to connect to the v-switch's upstream port. */
@@ -5572,6 +5845,8 @@
*/
#define MC_CMD_VSWITCH_FREE 0x95
+#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VSWITCH_FREE_IN msgrequest */
#define MC_CMD_VSWITCH_FREE_IN_LEN 4
/* The port to which the v-switch is connected. */
@@ -5587,6 +5862,8 @@
*/
#define MC_CMD_VPORT_ALLOC 0x96
+#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_ALLOC_IN msgrequest */
#define MC_CMD_VPORT_ALLOC_IN_LEN 20
/* The port to which the v-switch is connected. */
@@ -5636,6 +5913,8 @@
*/
#define MC_CMD_VPORT_FREE 0x97
+#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_FREE_IN msgrequest */
#define MC_CMD_VPORT_FREE_IN_LEN 4
/* The handle of the v-port */
@@ -5651,8 +5930,10 @@
*/
#define MC_CMD_VADAPTOR_ALLOC 0x98
+#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
-#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
/* The port to connect to the v-adaptor's port. */
#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
/* Flags controlling v-adaptor creation */
@@ -5661,6 +5942,19 @@
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
/* The number of VLAN tags to strip on receive */
#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+/* The number of VLAN tags to transparently insert/remove. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+/* The MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6
+/* enum: Derive the MAC address from the upstream port */
+#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0
/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
@@ -5672,6 +5966,8 @@
*/
#define MC_CMD_VADAPTOR_FREE 0x99
+#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
/* The port to which the v-adaptor is connected. */
@@ -5682,11 +5978,53 @@
/***********************************/
+/* MC_CMD_VADAPTOR_SET_MAC
+ * assign a new MAC address to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_SET_MAC 0x5d
+
+#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The new MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
+
+/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_GET_MAC
+ * read the MAC address assigned to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_GET_MAC 0x5e
+
+#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
+/* The MAC address assigned to this v-adaptor */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
+
+
+/***********************************/
/* MC_CMD_EVB_PORT_ASSIGN
* assign a port to a PCI function.
*/
#define MC_CMD_EVB_PORT_ASSIGN 0x9a
+#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
/* The port to assign. */
@@ -5708,6 +6046,8 @@
*/
#define MC_CMD_RDWR_A64_REGIONS 0x9b
+#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
@@ -5736,6 +6076,8 @@
*/
#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
/* The handle of the owning upstream port */
@@ -5753,6 +6095,8 @@
*/
#define MC_CMD_ONLOAD_STACK_FREE 0x9d
+#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
/* The handle of the Onload stack */
@@ -5768,6 +6112,8 @@
*/
#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
/* The handle of the owning upstream port */
@@ -5800,6 +6146,8 @@
*/
#define MC_CMD_RSS_CONTEXT_FREE 0x9f
+#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
/* The handle of the RSS context */
@@ -5815,6 +6163,8 @@
*/
#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
/* The handle of the RSS context */
@@ -5833,6 +6183,8 @@
*/
#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
/* The handle of the RSS context */
@@ -5851,6 +6203,8 @@
*/
#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
/* The handle of the RSS context */
@@ -5869,6 +6223,8 @@
*/
#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
/* The handle of the RSS context */
@@ -5887,6 +6243,8 @@
*/
#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
/* The handle of the RSS context */
@@ -5912,6 +6270,8 @@
*/
#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
/* The handle of the RSS context */
@@ -5937,6 +6297,8 @@
*/
#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
+#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
/* The handle of the owning upstream port */
@@ -5959,6 +6321,8 @@
*/
#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
+#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
/* The handle of the .1p mapping */
@@ -5974,6 +6338,8 @@
*/
#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
+#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
/* The handle of the .1p mapping */
@@ -5994,6 +6360,8 @@
*/
#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
+#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
/* The handle of the .1p mapping */
@@ -6014,6 +6382,8 @@
*/
#define MC_CMD_GET_VECTOR_CFG 0xbf
+#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
@@ -6033,6 +6403,8 @@
*/
#define MC_CMD_SET_VECTOR_CFG 0xc0
+#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
@@ -6423,6 +6795,8 @@
*/
#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
@@ -6441,6 +6815,8 @@
*/
#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
@@ -6459,6 +6835,8 @@
*/
#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
/* The handle of the v-port */
@@ -6486,6 +6864,8 @@
*/
#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
/* Index of the first buffer table entry. */
@@ -6510,6 +6890,8 @@
*/
#define MC_CMD_SET_RXDP_CONFIG 0xc1
+#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
@@ -6526,6 +6908,8 @@
*/
#define MC_CMD_GET_RXDP_CONFIG 0xc2
+#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
@@ -6890,6 +7274,8 @@
*/
#define MC_CMD_GET_CLOCK 0xac
+#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_CLOCK_IN msgrequest */
#define MC_CMD_GET_CLOCK_IN_LEN 0
@@ -6907,6 +7293,8 @@
*/
#define MC_CMD_SET_CLOCK 0xad
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_CLOCK_IN msgrequest */
#define MC_CMD_SET_CLOCK_IN_LEN 12
/* Requested system frequency in MHz; 0 leaves unchanged. */
@@ -6932,6 +7320,8 @@
*/
#define MC_CMD_DPCPU_RPC 0xae
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DPCPU_RPC_IN msgrequest */
#define MC_CMD_DPCPU_RPC_IN_LEN 36
#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
@@ -7016,6 +7406,8 @@
*/
#define MC_CMD_TRIGGER_INTERRUPT 0xe3
+#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
/* Interrupt level relative to base for function. */
@@ -7031,6 +7423,8 @@
*/
#define MC_CMD_CAP_BLK_READ 0xe7
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_CAP_BLK_READ_IN msgrequest */
#define MC_CMD_CAP_BLK_READ_IN_LEN 12
#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
@@ -7055,6 +7449,8 @@
*/
#define MC_CMD_DUMP_DO 0xe8
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DUMP_DO_IN msgrequest */
#define MC_CMD_DUMP_DO_IN_LEN 52
#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
@@ -7108,6 +7504,8 @@
*/
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
@@ -7151,6 +7549,8 @@
*/
#define MC_CMD_SET_PSU 0xea
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_PSU_IN msgrequest */
#define MC_CMD_SET_PSU_IN_LEN 12
#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
@@ -7171,6 +7571,8 @@
*/
#define MC_CMD_GET_FUNCTION_INFO 0xec
+#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
@@ -7188,6 +7590,8 @@
*/
#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
@@ -7203,6 +7607,8 @@
*/
#define MC_CMD_UART_SEND_DATA 0xee
+#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
@@ -7231,6 +7637,8 @@
*/
#define MC_CMD_UART_RECV_DATA 0xef
+#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
/* CRC32 over OFFSET, LENGTH, RESERVED */
@@ -7266,6 +7674,8 @@
*/
#define MC_CMD_READ_FUSES 0xf0
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_READ_FUSES_IN msgrequest */
#define MC_CMD_READ_FUSES_IN_LEN 8
/* Offset in OTP to read */
@@ -7292,6 +7702,8 @@
*/
#define MC_CMD_KR_TUNE 0xf1
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_KR_TUNE_IN msgrequest */
#define MC_CMD_KR_TUNE_IN_LENMIN 4
#define MC_CMD_KR_TUNE_IN_LENMAX 252
@@ -7550,6 +7962,8 @@
*/
#define MC_CMD_PCIE_TUNE 0xf2
+#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_PCIE_TUNE_IN msgrequest */
#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
@@ -7711,6 +8125,8 @@
*/
#define MC_CMD_LICENSING 0xf3
+#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_LICENSING_IN msgrequest */
#define MC_CMD_LICENSING_IN_LEN 4
/* identifies the type of operation requested */
@@ -7756,6 +8172,8 @@
*/
#define MC_CMD_MC2MC_PROXY 0xf4
+#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_MC2MC_PROXY_IN msgrequest */
#define MC_CMD_MC2MC_PROXY_IN_LEN 0
@@ -7771,6 +8189,8 @@
*/
#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
/* application ID to query (LICENSED_APP_ID_xxx) */
@@ -7792,6 +8212,8 @@
*/
#define MC_CMD_LICENSED_APP_OP 0xf6
+#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
@@ -7847,6 +8269,8 @@
*/
#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
/* configuration flags */
@@ -7881,6 +8305,8 @@
*/
#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index fb19b70ea..7f295c4d7 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -865,6 +865,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
+ /* This has no effect on EF10 */
ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
efx->net_dev->dev_addr);
@@ -923,6 +924,7 @@ enum efx_stats_action {
static int efx_mcdi_mac_stats(struct efx_nic *efx,
enum efx_stats_action action, int clear)
{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
int rc;
int change = action == EFX_STATS_PULL ? 0 : 1;
@@ -944,9 +946,14 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
MAC_STATS_IN_PERIODIC_NOEVENT, 1,
MAC_STATS_IN_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ /* Expect ENOENT if DMA queues have not been set up */
+ if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues)))
+ efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf),
+ NULL, 0, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 325dd94bc..47d1e3a96 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -25,6 +25,7 @@
#include <linux/highmem.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/vmalloc.h>
#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
@@ -240,6 +241,8 @@ struct efx_tx_queue {
unsigned int read_count ____cacheline_aligned_in_smp;
unsigned int old_write_count;
unsigned int merge_events;
+ unsigned int bytes_compl;
+ unsigned int pkts_compl;
/* Members used only on the xmit path */
unsigned int insert_count ____cacheline_aligned_in_smp;
@@ -793,7 +796,6 @@ union efx_multicast_hash {
efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
};
-struct efx_vf;
struct vfdi_status;
/**
@@ -897,7 +899,8 @@ struct vfdi_status;
* @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
- * @filter_lock: Filter table lock
+ * @filter_sem: Filter table rw_semaphore, for freeing the table
+ * @filter_lock: Filter table lock, for mere content changes
* @filter_state: Architecture-dependent filter table state
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
* indexed by filter ID
@@ -909,7 +912,6 @@ struct vfdi_status;
* completed (either success or failure). Not used when MCDI is used to
* flush receive queues.
* @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
- * @vf: Array of &struct efx_vf objects.
* @vf_count: Number of VFs intended to be enabled.
* @vf_init_count: Number of VFs that have been fully initialised.
* @vi_scale: log2 number of vnics per VF.
@@ -1040,6 +1042,7 @@ struct efx_nic {
void *loopback_selftest;
+ struct rw_semaphore filter_sem;
spinlock_t filter_lock;
void *filter_state;
#ifdef CONFIG_RFS_ACCEL
@@ -1053,7 +1056,6 @@ struct efx_nic {
wait_queue_head_t flush_wq;
#ifdef CONFIG_SFC_SRIOV
- struct efx_vf *vf;
unsigned vf_count;
unsigned vf_init_count;
unsigned vi_scale;
@@ -1092,6 +1094,7 @@ struct efx_mtd_partition {
/**
* struct efx_nic_type - Efx device type definition
+ * @mem_bar: Get the memory BAR
* @mem_map_size: Get memory BAR mapped size
* @probe: Probe the controller
* @remove: Free resources allocated by probe()
@@ -1204,6 +1207,7 @@ struct efx_mtd_partition {
* @ptp_set_ts_config: Set hardware timestamp configuration. The flags
* and tx_type will already have been validated but this operation
* must validate and update rx_filter.
+ * @set_mac_address: Set the MAC address of the device
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1226,6 +1230,8 @@ struct efx_mtd_partition {
* @hwtstamp_filters: Mask of hardware timestamp filter types supported
*/
struct efx_nic_type {
+ bool is_vf;
+ unsigned int mem_bar;
unsigned int (*mem_map_size)(struct efx_nic *efx);
int (*probe)(struct efx_nic *efx);
void (*remove)(struct efx_nic *efx);
@@ -1277,7 +1283,8 @@ struct efx_nic_type {
void (*tx_init)(struct efx_tx_queue *tx_queue);
void (*tx_remove)(struct efx_tx_queue *tx_queue);
void (*tx_write)(struct efx_tx_queue *tx_queue);
- void (*rx_push_rss_config)(struct efx_nic *efx);
+ int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table);
int (*rx_probe)(struct efx_rx_queue *rx_queue);
void (*rx_init)(struct efx_rx_queue *rx_queue);
void (*rx_remove)(struct efx_rx_queue *rx_queue);
@@ -1330,11 +1337,28 @@ struct efx_nic_type {
int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
int (*ptp_set_ts_config)(struct efx_nic *efx,
struct hwtstamp_config *init);
+ int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
int (*sriov_init)(struct efx_nic *efx);
void (*sriov_fini)(struct efx_nic *efx);
- void (*sriov_mac_address_changed)(struct efx_nic *efx);
bool (*sriov_wanted)(struct efx_nic *efx);
void (*sriov_reset)(struct efx_nic *efx);
+ void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i);
+ int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, u8 *mac);
+ int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan,
+ u8 qos);
+ int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i,
+ bool spoofchk);
+ int (*sriov_get_vf_config)(struct efx_nic *efx, int vf_i,
+ struct ifla_vf_info *ivi);
+ int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i,
+ int link_state);
+ int (*sriov_get_phys_port_id)(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid);
+ int (*vswitching_probe)(struct efx_nic *efx);
+ int (*vswitching_restore)(struct efx_nic *efx);
+ void (*vswitching_remove)(struct efx_nic *efx);
+ int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr);
+ int (*set_mac_address)(struct efx_nic *efx);
int revision;
unsigned int txd_ptr_tbl_base;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 93d10cbbd..31ff9084d 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -381,6 +381,7 @@ enum {
* @efx: Pointer back to main interface structure
* @wol_filter_id: Wake-on-LAN packet filter id
* @stats: Hardware statistics
+ * @vf: Array of &struct siena_vf objects
* @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
* @vfdi_status: Common VFDI status page to be dmad to VF address space.
* @local_addr_list: List of local addresses. Protected by %local_lock.
@@ -394,6 +395,7 @@ struct siena_nic_data {
int wol_filter_id;
u64 stats[SIENA_STAT_COUNT];
#ifdef CONFIG_SFC_SRIOV
+ struct siena_vf *vf;
struct efx_channel *vfdi_channel;
unsigned vf_buftbl_base;
struct efx_buffer vfdi_status;
@@ -405,59 +407,77 @@ struct siena_nic_data {
};
enum {
- EF10_STAT_tx_bytes = GENERIC_STAT_COUNT,
- EF10_STAT_tx_packets,
- EF10_STAT_tx_pause,
- EF10_STAT_tx_control,
- EF10_STAT_tx_unicast,
- EF10_STAT_tx_multicast,
- EF10_STAT_tx_broadcast,
- EF10_STAT_tx_lt64,
- EF10_STAT_tx_64,
- EF10_STAT_tx_65_to_127,
- EF10_STAT_tx_128_to_255,
- EF10_STAT_tx_256_to_511,
- EF10_STAT_tx_512_to_1023,
- EF10_STAT_tx_1024_to_15xx,
- EF10_STAT_tx_15xx_to_jumbo,
- EF10_STAT_rx_bytes,
- EF10_STAT_rx_bytes_minus_good_bytes,
- EF10_STAT_rx_good_bytes,
- EF10_STAT_rx_bad_bytes,
- EF10_STAT_rx_packets,
- EF10_STAT_rx_good,
- EF10_STAT_rx_bad,
- EF10_STAT_rx_pause,
- EF10_STAT_rx_control,
+ EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT,
+ EF10_STAT_port_tx_packets,
+ EF10_STAT_port_tx_pause,
+ EF10_STAT_port_tx_control,
+ EF10_STAT_port_tx_unicast,
+ EF10_STAT_port_tx_multicast,
+ EF10_STAT_port_tx_broadcast,
+ EF10_STAT_port_tx_lt64,
+ EF10_STAT_port_tx_64,
+ EF10_STAT_port_tx_65_to_127,
+ EF10_STAT_port_tx_128_to_255,
+ EF10_STAT_port_tx_256_to_511,
+ EF10_STAT_port_tx_512_to_1023,
+ EF10_STAT_port_tx_1024_to_15xx,
+ EF10_STAT_port_tx_15xx_to_jumbo,
+ EF10_STAT_port_rx_bytes,
+ EF10_STAT_port_rx_bytes_minus_good_bytes,
+ EF10_STAT_port_rx_good_bytes,
+ EF10_STAT_port_rx_bad_bytes,
+ EF10_STAT_port_rx_packets,
+ EF10_STAT_port_rx_good,
+ EF10_STAT_port_rx_bad,
+ EF10_STAT_port_rx_pause,
+ EF10_STAT_port_rx_control,
+ EF10_STAT_port_rx_unicast,
+ EF10_STAT_port_rx_multicast,
+ EF10_STAT_port_rx_broadcast,
+ EF10_STAT_port_rx_lt64,
+ EF10_STAT_port_rx_64,
+ EF10_STAT_port_rx_65_to_127,
+ EF10_STAT_port_rx_128_to_255,
+ EF10_STAT_port_rx_256_to_511,
+ EF10_STAT_port_rx_512_to_1023,
+ EF10_STAT_port_rx_1024_to_15xx,
+ EF10_STAT_port_rx_15xx_to_jumbo,
+ EF10_STAT_port_rx_gtjumbo,
+ EF10_STAT_port_rx_bad_gtjumbo,
+ EF10_STAT_port_rx_overflow,
+ EF10_STAT_port_rx_align_error,
+ EF10_STAT_port_rx_length_error,
+ EF10_STAT_port_rx_nodesc_drops,
+ EF10_STAT_port_rx_pm_trunc_bb_overflow,
+ EF10_STAT_port_rx_pm_discard_bb_overflow,
+ EF10_STAT_port_rx_pm_trunc_vfifo_full,
+ EF10_STAT_port_rx_pm_discard_vfifo_full,
+ EF10_STAT_port_rx_pm_trunc_qbb,
+ EF10_STAT_port_rx_pm_discard_qbb,
+ EF10_STAT_port_rx_pm_discard_mapping,
+ EF10_STAT_port_rx_dp_q_disabled_packets,
+ EF10_STAT_port_rx_dp_di_dropped_packets,
+ EF10_STAT_port_rx_dp_streaming_packets,
+ EF10_STAT_port_rx_dp_hlb_fetch,
+ EF10_STAT_port_rx_dp_hlb_wait,
EF10_STAT_rx_unicast,
+ EF10_STAT_rx_unicast_bytes,
EF10_STAT_rx_multicast,
+ EF10_STAT_rx_multicast_bytes,
EF10_STAT_rx_broadcast,
- EF10_STAT_rx_lt64,
- EF10_STAT_rx_64,
- EF10_STAT_rx_65_to_127,
- EF10_STAT_rx_128_to_255,
- EF10_STAT_rx_256_to_511,
- EF10_STAT_rx_512_to_1023,
- EF10_STAT_rx_1024_to_15xx,
- EF10_STAT_rx_15xx_to_jumbo,
- EF10_STAT_rx_gtjumbo,
- EF10_STAT_rx_bad_gtjumbo,
+ EF10_STAT_rx_broadcast_bytes,
+ EF10_STAT_rx_bad,
+ EF10_STAT_rx_bad_bytes,
EF10_STAT_rx_overflow,
- EF10_STAT_rx_align_error,
- EF10_STAT_rx_length_error,
- EF10_STAT_rx_nodesc_drops,
- EF10_STAT_rx_pm_trunc_bb_overflow,
- EF10_STAT_rx_pm_discard_bb_overflow,
- EF10_STAT_rx_pm_trunc_vfifo_full,
- EF10_STAT_rx_pm_discard_vfifo_full,
- EF10_STAT_rx_pm_trunc_qbb,
- EF10_STAT_rx_pm_discard_qbb,
- EF10_STAT_rx_pm_discard_mapping,
- EF10_STAT_rx_dp_q_disabled_packets,
- EF10_STAT_rx_dp_di_dropped_packets,
- EF10_STAT_rx_dp_streaming_packets,
- EF10_STAT_rx_dp_hlb_fetch,
- EF10_STAT_rx_dp_hlb_wait,
+ EF10_STAT_tx_unicast,
+ EF10_STAT_tx_unicast_bytes,
+ EF10_STAT_tx_multicast,
+ EF10_STAT_tx_multicast_bytes,
+ EF10_STAT_tx_broadcast,
+ EF10_STAT_tx_broadcast_bytes,
+ EF10_STAT_tx_bad,
+ EF10_STAT_tx_bad_bytes,
+ EF10_STAT_tx_overflow,
EF10_STAT_COUNT
};
@@ -483,12 +503,21 @@ enum {
* @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
* reboot
* @rx_rss_context: Firmware handle for our RSS context
+ * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
* @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
* after MC reboot
* @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
* %MC_CMD_GET_CAPABILITIES response)
+ * @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU
+ * @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
+ * @vport_id: The function's vport ID, only relevant for PFs
+ * @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot
+ * @pf_index: The number for this PF, or the parent PF if this is a VF
+#ifdef CONFIG_SFC_SRIOV
+ * @vf: Pointer to VF data structure
+#endif
*/
struct efx_ef10_nic_data {
struct efx_buffer mcdi_buf;
@@ -503,126 +532,27 @@ struct efx_ef10_nic_data {
unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
bool must_restore_piobufs;
u32 rx_rss_context;
+ bool rx_rss_context_exclusive;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
bool must_check_datapath_caps;
u32 datapath_caps;
-};
-
-/*
- * On the SFC9000 family each port is associated with 1 PCI physical
- * function (PF) handled by sfc and a configurable number of virtual
- * functions (VFs) that may be handled by some other driver, often in
- * a VM guest. The queue pointer registers are mapped in both PF and
- * VF BARs such that an 8K region provides access to a single RX, TX
- * and event queue (collectively a Virtual Interface, VI or VNIC).
- *
- * The PF has access to all 1024 VIs while VFs are mapped to VIs
- * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
- * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
- * The number of VIs and the VI_SCALE value are configurable but must
- * be established at boot time by firmware.
- */
-
-/* Maximum VI_SCALE parameter supported by Siena */
-#define EFX_VI_SCALE_MAX 6
-/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
- * so this is the smallest allowed value. */
-#define EFX_VI_BASE 128U
-/* Maximum number of VFs allowed */
-#define EFX_VF_COUNT_MAX 127
-/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
-#define EFX_MAX_VF_EVQ_SIZE 8192UL
-/* The number of buffer table entries reserved for each VI on a VF */
-#define EFX_VF_BUFTBL_PER_VI \
- ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \
- sizeof(efx_qword_t) / EFX_BUF_SIZE)
-
+ unsigned int rx_dpcpu_fw_id;
+ unsigned int tx_dpcpu_fw_id;
+ unsigned int vport_id;
+ bool must_probe_vswitching;
+ unsigned int pf_index;
+ u8 port_id[ETH_ALEN];
#ifdef CONFIG_SFC_SRIOV
-
-/* SIENA */
-static inline bool efx_siena_sriov_wanted(struct efx_nic *efx)
-{
- return efx->vf_count != 0;
-}
-
-static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
-{
- return efx->vf_init_count != 0;
-}
-
-static inline unsigned int efx_vf_size(struct efx_nic *efx)
-{
- return 1 << efx->vi_scale;
-}
+ unsigned int vf_index;
+ struct ef10_vf *vf;
+#endif
+ u8 vport_mac[ETH_ALEN];
+};
int efx_init_sriov(void);
-void efx_siena_sriov_probe(struct efx_nic *efx);
-int efx_siena_sriov_init(struct efx_nic *efx);
-void efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
-void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
-void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
-void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
-void efx_siena_sriov_reset(struct efx_nic *efx);
-void efx_siena_sriov_fini(struct efx_nic *efx);
void efx_fini_sriov(void);
-/* EF10 */
-static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
-
-#else
-
-/* SIENA */
-static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return false; }
-static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; }
-static inline int efx_init_sriov(void) { return 0; }
-static inline void efx_siena_sriov_probe(struct efx_nic *efx) {}
-static inline int efx_siena_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_siena_sriov_tx_flush_done(struct efx_nic *efx,
- efx_qword_t *event) {}
-static inline void efx_siena_sriov_rx_flush_done(struct efx_nic *efx,
- efx_qword_t *event) {}
-static inline void efx_siena_sriov_event(struct efx_channel *channel,
- efx_qword_t *event) {}
-static inline void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx,
- unsigned dmaq) {}
-static inline void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr) {}
-static inline void efx_siena_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_siena_sriov_fini(struct efx_nic *efx) {}
-static inline void efx_fini_sriov(void) {}
-
-/* EF10 */
-static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
-
-#endif
-
-/* FALCON */
-static inline bool efx_falcon_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_falcon_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_falcon_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_falcon_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_falcon_sriov_fini(struct efx_nic *efx) {}
-
-int efx_siena_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
-int efx_siena_sriov_set_vf_vlan(struct net_device *dev, int vf,
- u16 vlan, u8 qos);
-int efx_siena_sriov_get_vf_config(struct net_device *dev, int vf,
- struct ifla_vf_info *ivf);
-int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
- bool spoofchk);
-
struct ethtool_ts_info;
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
@@ -654,6 +584,7 @@ extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
extern const struct efx_nic_type siena_a0_nic_type;
extern const struct efx_nic_type efx_hunt_a0_nic_type;
+extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
/**************************************************************************
*
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index a2e9aee05..ad62615a9 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -306,7 +306,7 @@ struct efx_ptp_data {
struct work_struct pps_work;
struct workqueue_struct *pps_workwq;
bool nic_ts_enabled;
- MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
+ _MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
unsigned int good_syncs;
unsigned int fast_syncs;
@@ -389,11 +389,8 @@ size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), NULL);
- if (rc) {
- netif_err(efx, hw, efx->net_dev,
- "MC_CMD_PTP_OP_STATUS failed (%d)\n", rc);
+ if (rc)
memset(outbuf, 0, sizeof(outbuf));
- }
efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
efx_ptp_stat_mask,
stats, _MCDI_PTR(outbuf, 0), false);
@@ -490,14 +487,20 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
*/
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &out_len);
- if (rc == 0)
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &out_len);
+ if (rc == 0) {
fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT);
- else if (rc == -EINVAL)
+ } else if (rc == -EINVAL) {
fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
- else
+ } else if (rc == -EPERM) {
+ netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+ return rc;
+ } else {
+ efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf),
+ outbuf, sizeof(outbuf), rc);
return rc;
+ }
if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) {
ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
@@ -541,8 +544,8 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
if (rc == 0) {
efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf,
PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
@@ -558,6 +561,8 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
efx->ptp_data->ts_corrections.pps_out = 0;
efx->ptp_data->ts_corrections.pps_in = 0;
} else {
+ efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), outbuf,
+ sizeof(outbuf), rc);
return rc;
}
@@ -568,7 +573,7 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
static int efx_ptp_enable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
- MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ MCDI_DECLARE_BUF_ERR(outbuf);
int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
@@ -596,7 +601,7 @@ static int efx_ptp_enable(struct efx_nic *efx)
static int efx_ptp_disable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
- MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ MCDI_DECLARE_BUF_ERR(outbuf);
int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
@@ -604,7 +609,12 @@ static int efx_ptp_disable(struct efx_nic *efx)
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), NULL);
rc = (rc == -EALREADY) ? 0 : rc;
- if (rc)
+ /* If we get ENOSYS, the NIC doesn't support PTP, and thus this function
+ * should only have been called during probe.
+ */
+ if (rc == -ENOSYS || rc == -EPERM)
+ netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+ else if (rc)
efx_mcdi_display_error(efx, MC_CMD_PTP,
MC_CMD_PTP_IN_DISABLE_LEN,
outbuf, sizeof(outbuf), rc);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index f12c81193..b323b9167 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -25,6 +25,7 @@
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "selftest.h"
+#include "siena_sriov.h"
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
@@ -306,7 +307,9 @@ static int siena_probe_nic(struct efx_nic *efx)
if (rc)
goto fail5;
+#ifdef CONFIG_SFC_SRIOV
efx_siena_sriov_probe(efx);
+#endif
efx_ptp_defer_probe_with_channel(efx);
return 0;
@@ -321,7 +324,8 @@ fail1:
return rc;
}
-static void siena_rx_push_rss_config(struct efx_nic *efx)
+static int siena_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table)
{
efx_oword_t temp;
@@ -343,7 +347,11 @@ static void siena_rx_push_rss_config(struct efx_nic *efx)
FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+ memcpy(efx->rx_indir_table, rx_indir_table,
+ sizeof(efx->rx_indir_table));
efx_farch_rx_push_indir_table(efx);
+
+ return 0;
}
/* This call performs hardware-specific global initialisation, such as
@@ -386,7 +394,7 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_RX_USR_BUF_SIZE >> 5);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
- siena_rx_push_rss_config(efx);
+ siena_rx_push_rss_config(efx, false, efx->rx_indir_table);
/* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
@@ -909,6 +917,8 @@ fail:
*/
const struct efx_nic_type siena_a0_nic_type = {
+ .is_vf = false,
+ .mem_bar = EFX_MEM_BAR,
.mem_map_size = siena_mem_map_size,
.probe = siena_probe_nic,
.remove = siena_remove_nic,
@@ -996,11 +1006,22 @@ const struct efx_nic_type siena_a0_nic_type = {
#endif
.ptp_write_host_time = siena_ptp_write_host_time,
.ptp_set_ts_config = siena_ptp_set_ts_config,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = efx_siena_sriov_configure,
.sriov_init = efx_siena_sriov_init,
.sriov_fini = efx_siena_sriov_fini,
- .sriov_mac_address_changed = efx_siena_sriov_mac_address_changed,
.sriov_wanted = efx_siena_sriov_wanted,
.sriov_reset = efx_siena_sriov_reset,
+ .sriov_flr = efx_siena_sriov_flr,
+ .sriov_set_vf_mac = efx_siena_sriov_set_vf_mac,
+ .sriov_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
+ .sriov_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
+ .sriov_get_vf_config = efx_siena_sriov_get_vf_config,
+ .vswitching_probe = efx_port_dummy_op_int,
+ .vswitching_restore = efx_port_dummy_op_int,
+ .vswitching_remove = efx_port_dummy_op_void,
+ .set_mac_address = efx_siena_sriov_mac_address_changed,
+#endif
.revision = EFX_REV_SIENA_A0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index fe8343079..da7b94f34 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -16,6 +16,7 @@
#include "filter.h"
#include "mcdi_pcol.h"
#include "farch_regs.h"
+#include "siena_sriov.h"
#include "vfdi.h"
/* Number of longs required to track all the VIs in a VF */
@@ -38,7 +39,7 @@ enum efx_vf_tx_filter_mode {
};
/**
- * struct efx_vf - Back-end resource and protocol state for a PCI VF
+ * struct siena_vf - Back-end resource and protocol state for a PCI VF
* @efx: The Efx NIC owning this VF
* @pci_rid: The PCI requester ID for this VF
* @pci_name: The PCI name (formatted address) of this VF
@@ -83,7 +84,7 @@ enum efx_vf_tx_filter_mode {
* @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
* @reset_work: Work item to schedule a VF reset.
*/
-struct efx_vf {
+struct siena_vf {
struct efx_nic *efx;
unsigned int pci_rid;
char pci_name[13]; /* dddd:bb:dd.f */
@@ -189,7 +190,7 @@ MODULE_PARM_DESC(max_vfs,
*/
static struct workqueue_struct *vfdi_workqueue;
-static unsigned abs_index(struct efx_vf *vf, unsigned index)
+static unsigned abs_index(struct siena_vf *vf, unsigned index)
{
return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
}
@@ -207,8 +208,8 @@ static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable,
MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE);
MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count);
- rc = efx_mcdi_rpc(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
- outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
+ outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
if (rc)
return rc;
if (outlen < MC_CMD_SRIOV_OUT_LEN)
@@ -299,7 +300,7 @@ out:
/* The TX filter is entirely controlled by this driver, and is modified
* underneath the feet of the VF
*/
-static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_tx_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
@@ -343,7 +344,7 @@ static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
}
/* The RX filter is managed here on behalf of the VF driver */
-static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_rx_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
@@ -382,7 +383,7 @@ static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
}
}
-static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
+static void __efx_siena_sriov_update_vf_addr(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -397,7 +398,7 @@ static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
* local_page_list, either by acquiring local_lock or by running from
* efx_siena_sriov_peer_work()
*/
-static void __efx_siena_sriov_push_vf_status(struct efx_vf *vf)
+static void __efx_siena_sriov_push_vf_status(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -509,8 +510,9 @@ static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count)
* Optionally set VF index and VI index within the VF.
*/
static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
- struct efx_vf **vf_out, unsigned *rel_index_out)
+ struct siena_vf **vf_out, unsigned *rel_index_out)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
unsigned vf_i;
if (abs_index < EFX_VI_BASE)
@@ -520,13 +522,13 @@ static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
return true;
if (vf_out)
- *vf_out = efx->vf + vf_i;
+ *vf_out = nic_data->vf + vf_i;
if (rel_index_out)
*rel_index_out = abs_index % efx_vf_size(efx);
return false;
}
-static int efx_vfdi_init_evq(struct efx_vf *vf)
+static int efx_vfdi_init_evq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
@@ -567,7 +569,7 @@ static int efx_vfdi_init_evq(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_init_rxq(struct efx_vf *vf)
+static int efx_vfdi_init_rxq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
@@ -608,7 +610,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_init_txq(struct efx_vf *vf)
+static int efx_vfdi_init_txq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
@@ -655,7 +657,7 @@ static int efx_vfdi_init_txq(struct efx_vf *vf)
}
/* Returns true when efx_vfdi_fini_all_queues should wake */
-static bool efx_vfdi_flush_wake(struct efx_vf *vf)
+static bool efx_vfdi_flush_wake(struct siena_vf *vf)
{
/* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
smp_mb();
@@ -664,7 +666,7 @@ static bool efx_vfdi_flush_wake(struct efx_vf *vf)
atomic_read(&vf->rxq_retry_count);
}
-static void efx_vfdi_flush_clear(struct efx_vf *vf)
+static void efx_vfdi_flush_clear(struct siena_vf *vf)
{
memset(vf->txq_mask, 0, sizeof(vf->txq_mask));
vf->txq_count = 0;
@@ -674,7 +676,7 @@ static void efx_vfdi_flush_clear(struct efx_vf *vf)
atomic_set(&vf->rxq_retry_count, 0);
}
-static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
+static int efx_vfdi_fini_all_queues(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
efx_oword_t reg;
@@ -757,7 +759,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
return timeout ? 0 : VFDI_RC_ETIMEDOUT;
}
-static int efx_vfdi_insert_filter(struct efx_vf *vf)
+static int efx_vfdi_insert_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -789,7 +791,7 @@ static int efx_vfdi_insert_filter(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
+static int efx_vfdi_remove_all_filters(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -801,7 +803,7 @@ static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_set_status_page(struct efx_vf *vf)
+static int efx_vfdi_set_status_page(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -846,7 +848,7 @@ static int efx_vfdi_set_status_page(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_clear_status_page(struct efx_vf *vf)
+static int efx_vfdi_clear_status_page(struct siena_vf *vf)
{
mutex_lock(&vf->status_lock);
vf->status_addr = 0;
@@ -855,7 +857,7 @@ static int efx_vfdi_clear_status_page(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-typedef int (*efx_vfdi_op_t)(struct efx_vf *vf);
+typedef int (*efx_vfdi_op_t)(struct siena_vf *vf);
static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
[VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq,
@@ -870,7 +872,7 @@ static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
static void efx_siena_sriov_vfdi(struct work_struct *work)
{
- struct efx_vf *vf = container_of(work, struct efx_vf, req);
+ struct siena_vf *vf = container_of(work, struct siena_vf, req);
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
struct efx_memcpy_req copy[2];
@@ -936,7 +938,8 @@ static void efx_siena_sriov_vfdi(struct work_struct *work)
* event ring in guest memory with VFDI reset events, then (re-initialise) the
* event queue to raise an interrupt. The guest driver will then recover.
*/
-static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
+
+static void efx_siena_sriov_reset_vf(struct siena_vf *vf,
struct efx_buffer *buffer)
{
struct efx_nic *efx = vf->efx;
@@ -1006,7 +1009,7 @@ static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
static void efx_siena_sriov_reset_vf_work(struct work_struct *work)
{
- struct efx_vf *vf = container_of(work, struct efx_vf, req);
+ struct siena_vf *vf = container_of(work, struct siena_vf, req);
struct efx_nic *efx = vf->efx;
struct efx_buffer buf;
@@ -1055,8 +1058,10 @@ void efx_siena_sriov_probe(struct efx_nic *efx)
if (!max_vfs)
return;
- if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count))
+ if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) {
+ netif_info(efx, probe, efx->net_dev, "no SR-IOV VFs probed\n");
return;
+ }
if (count > 0 && count > max_vfs)
count = max_vfs;
@@ -1077,7 +1082,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
peer_work);
struct efx_nic *efx = nic_data->efx;
struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
- struct efx_vf *vf;
+ struct siena_vf *vf;
struct efx_local_addr *local_addr;
struct vfdi_endpoint *peer;
struct efx_endpoint_page *epp;
@@ -1099,7 +1104,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
peer_space = ARRAY_SIZE(vfdi_status->peers) - 1;
peer_count = 1;
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
mutex_lock(&vf->status_lock);
if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) {
@@ -1155,7 +1160,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
/* Finally, push the pages */
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
mutex_lock(&vf->status_lock);
if (vf->status_addr)
@@ -1190,14 +1195,16 @@ static void efx_siena_sriov_free_local(struct efx_nic *efx)
static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
{
unsigned index;
- struct efx_vf *vf;
+ struct siena_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
- efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL);
- if (!efx->vf)
+ nic_data->vf = kcalloc(efx->vf_count, sizeof(*nic_data->vf),
+ GFP_KERNEL);
+ if (!nic_data->vf)
return -ENOMEM;
for (index = 0; index < efx->vf_count; ++index) {
- vf = efx->vf + index;
+ vf = nic_data->vf + index;
vf->efx = efx;
vf->index = index;
@@ -1216,11 +1223,12 @@ static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
static void efx_siena_sriov_vfs_fini(struct efx_nic *efx)
{
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
unsigned int pos;
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
efx_nic_free_buffer(efx, &vf->buf);
kfree(vf->peer_page_addrs);
@@ -1237,7 +1245,7 @@ static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
struct siena_nic_data *nic_data = efx->nic_data;
unsigned index, devfn, sriov, buftbl_base;
u16 offset, stride;
- struct efx_vf *vf;
+ struct siena_vf *vf;
int rc;
sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV);
@@ -1250,7 +1258,7 @@ static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
buftbl_base = nic_data->vf_buftbl_base;
devfn = pci_dev->devfn + offset;
for (index = 0; index < efx->vf_count; ++index) {
- vf = efx->vf + index;
+ vf = nic_data->vf + index;
/* Reserve buffer entries */
vf->buftbl_base = buftbl_base;
@@ -1350,7 +1358,7 @@ fail_pci:
fail_vfs:
cancel_work_sync(&nic_data->peer_work);
efx_siena_sriov_free_local(efx);
- kfree(efx->vf);
+ kfree(nic_data->vf);
fail_alloc:
efx_nic_free_buffer(efx, &nic_data->vfdi_status);
fail_status:
@@ -1361,7 +1369,7 @@ fail_cmd:
void efx_siena_sriov_fini(struct efx_nic *efx)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned int pos;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -1377,7 +1385,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
/* Flush all reconfiguration work */
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
cancel_work_sync(&vf->req);
cancel_work_sync(&vf->reset_work);
}
@@ -1388,7 +1396,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
/* Tear down back-end state */
efx_siena_sriov_vfs_fini(efx);
efx_siena_sriov_free_local(efx);
- kfree(efx->vf);
+ kfree(nic_data->vf);
efx_nic_free_buffer(efx, &nic_data->vfdi_status);
efx_siena_sriov_cmd(efx, false, NULL, NULL);
}
@@ -1396,7 +1404,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned qid, seq, type, data;
qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
@@ -1452,11 +1460,12 @@ error:
void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
{
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
if (vf_i > efx->vf_init_count)
return;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
netif_info(efx, hw, efx->net_dev,
"FLR on VF %s\n", vf->pci_name);
@@ -1467,21 +1476,23 @@ void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
vf->evq0_count = 0;
}
-void efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
+int efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
{
struct siena_nic_data *nic_data = efx->nic_data;
struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
if (!efx->vf_init_count)
- return;
+ return 0;
ether_addr_copy(vfdi_status->peers[0].mac_addr,
efx->net_dev->dev_addr);
queue_work(vfdi_workqueue, &nic_data->peer_work);
+
+ return 0;
}
void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned queue, qid;
queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
@@ -1500,7 +1511,7 @@ void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned ev_failed, queue, qid;
queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
@@ -1525,7 +1536,7 @@ void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
/* Called from napi. Schedule the reset work item */
void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned int rel;
if (map_vi_index(efx, dmaq, &vf, &rel))
@@ -1541,9 +1552,10 @@ void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
/* Reset all VFs */
void efx_siena_sriov_reset(struct efx_nic *efx)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
unsigned int vf_i;
struct efx_buffer buf;
- struct efx_vf *vf;
+ struct siena_vf *vf;
ASSERT_RTNL();
@@ -1557,7 +1569,7 @@ void efx_siena_sriov_reset(struct efx_nic *efx)
return;
for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
efx_siena_sriov_reset_vf(vf, &buf);
}
@@ -1573,7 +1585,6 @@ int efx_init_sriov(void)
vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
if (!vfdi_workqueue)
return -ENOMEM;
-
return 0;
}
@@ -1582,14 +1593,14 @@ void efx_fini_sriov(void)
destroy_workqueue(vfdi_workqueue);
}
-int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
mutex_lock(&vf->status_lock);
ether_addr_copy(vf->addr.mac_addr, mac);
@@ -1599,16 +1610,16 @@ int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
return 0;
}
-int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
u16 vlan, u8 qos)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
u16 tci;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
mutex_lock(&vf->status_lock);
tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
@@ -1619,16 +1630,16 @@ int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
return 0;
}
-int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
bool spoofchk)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
int rc;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
mutex_lock(&vf->txq_lock);
if (vf->txq_count == 0) {
@@ -1643,16 +1654,16 @@ int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
return rc;
}
-int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
struct ifla_vf_info *ivi)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
u16 tci;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
ivi->vf = vf_i;
ether_addr_copy(ivi->mac, vf->addr.mac_addr);
@@ -1666,3 +1677,12 @@ int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
return 0;
}
+bool efx_siena_sriov_wanted(struct efx_nic *efx)
+{
+ return efx->vf_count != 0;
+}
+
+int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs)
+{
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/siena_sriov.h b/drivers/net/ethernet/sfc/siena_sriov.h
new file mode 100644
index 000000000..d88d4dab1
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena_sriov.h
@@ -0,0 +1,79 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef SIENA_SRIOV_H
+#define SIENA_SRIOV_H
+
+#include "net_driver.h"
+
+/* On the SFC9000 family each port is associated with 1 PCI physical
+ * function (PF) handled by sfc and a configurable number of virtual
+ * functions (VFs) that may be handled by some other driver, often in
+ * a VM guest. The queue pointer registers are mapped in both PF and
+ * VF BARs such that an 8K region provides access to a single RX, TX
+ * and event queue (collectively a Virtual Interface, VI or VNIC).
+ *
+ * The PF has access to all 1024 VIs while VFs are mapped to VIs
+ * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
+ * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
+ * The number of VIs and the VI_SCALE value are configurable but must
+ * be established at boot time by firmware.
+ */
+
+/* Maximum VI_SCALE parameter supported by Siena */
+#define EFX_VI_SCALE_MAX 6
+/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
+ * so this is the smallest allowed value.
+ */
+#define EFX_VI_BASE 128U
+/* Maximum number of VFs allowed */
+#define EFX_VF_COUNT_MAX 127
+/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
+#define EFX_MAX_VF_EVQ_SIZE 8192UL
+/* The number of buffer table entries reserved for each VI on a VF */
+#define EFX_VF_BUFTBL_PER_VI \
+ ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \
+ sizeof(efx_qword_t) / EFX_BUF_SIZE)
+
+int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs);
+int efx_siena_sriov_init(struct efx_nic *efx);
+void efx_siena_sriov_fini(struct efx_nic *efx);
+int efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
+bool efx_siena_sriov_wanted(struct efx_nic *efx);
+void efx_siena_sriov_reset(struct efx_nic *efx);
+void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
+
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf,
+ u16 vlan, u8 qos);
+int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
+ bool spoofchk);
+int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf,
+ struct ifla_vf_info *ivf);
+
+#ifdef CONFIG_SFC_SRIOV
+
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
+{
+ return efx->vf_init_count != 0;
+}
+#else /* !CONFIG_SFC_SRIOV */
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
+{
+ return false;
+}
+#endif /* CONFIG_SFC_SRIOV */
+
+void efx_siena_sriov_probe(struct efx_nic *efx);
+void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+
+#endif /* SIENA_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c
new file mode 100644
index 000000000..816c44689
--- /dev/null
+++ b/drivers/net/ethernet/sfc/sriov.c
@@ -0,0 +1,83 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2014-2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/module.h>
+#include "net_driver.h"
+#include "nic.h"
+#include "sriov.h"
+
+int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_mac)
+ return efx->type->sriov_set_vf_mac(efx, vf_i, mac);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
+ u8 qos)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_vlan) {
+ if ((vlan & ~VLAN_VID_MASK) ||
+ (qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)))
+ return -EINVAL;
+
+ return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos);
+ } else {
+ return -EOPNOTSUPP;
+ }
+}
+
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+ bool spoofchk)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_spoofchk)
+ return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+ struct ifla_vf_info *ivi)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_get_vf_config)
+ return efx->type->sriov_get_vf_config(efx, vf_i, ivi);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
+ int link_state)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_link_state)
+ return efx->type->sriov_set_vf_link_state(efx, vf_i,
+ link_state);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_sriov_get_phys_port_id(struct net_device *net_dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_get_phys_port_id)
+ return efx->type->sriov_get_phys_port_id(efx, ppid);
+ else
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/sfc/sriov.h b/drivers/net/ethernet/sfc/sriov.h
new file mode 100644
index 000000000..400df5265
--- /dev/null
+++ b/drivers/net/ethernet/sfc/sriov.h
@@ -0,0 +1,31 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2014-2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_SRIOV_H
+#define EFX_SRIOV_H
+
+#include "net_driver.h"
+
+#ifdef CONFIG_SFC_SRIOV
+
+int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac);
+int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
+ u8 qos);
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+ bool spoofchk);
+int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+ struct ifla_vf_info *ivi);
+int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
+ int link_state);
+int efx_sriov_get_phys_port_id(struct net_device *net_dev,
+ struct netdev_phys_item_id *ppid);
+
+#endif /* CONFIG_SFC_SRIOV */
+
+#endif /* EFX_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index aaf298751..1833a0146 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -617,7 +617,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
- netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
+ tx_queue->pkts_compl += pkts_compl;
+ tx_queue->bytes_compl += bytes_compl;
if (pkts_compl > 1)
++tx_queue->merge_events;
diff --git a/drivers/net/ethernet/sgi/Kconfig b/drivers/net/ethernet/sgi/Kconfig
index e832f4666..fbbb21c13 100644
--- a/drivers/net/ethernet/sgi/Kconfig
+++ b/drivers/net/ethernet/sgi/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_SGI
default y
depends on (PCI && SGI_IP27) || SGI_IP32
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
@@ -24,9 +22,7 @@ config SGI_IOC3_ETH
select CRC32
select MII
---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card of this type, say Y here.
config SGI_O2MACE_ETH
tristate "SGI O2 MACE Fast Ethernet support"
diff --git a/drivers/net/ethernet/silan/Kconfig b/drivers/net/ethernet/silan/Kconfig
index 3409b3f97..ac982be38 100644
--- a/drivers/net/ethernet/silan/Kconfig
+++ b/drivers/net/ethernet/silan/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_SILAN
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/sis/Kconfig b/drivers/net/ethernet/sis/Kconfig
index 68d052b09..22ec98ec9 100644
--- a/drivers/net/ethernet/sis/Kconfig
+++ b/drivers/net/ethernet/sis/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_SIS
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/sis/sis900.h b/drivers/net/ethernet/sis/sis900.h
index 1341f33e6..7d430d322 100644
--- a/drivers/net/ethernet/sis/sis900.h
+++ b/drivers/net/ethernet/sis/sis900.h
@@ -56,7 +56,7 @@ enum sis900_configuration_register_bits {
EDB_MASTER_EN = 0x00002000
};
-enum sis900_eeprom_access_reigster_bits {
+enum sis900_eeprom_access_register_bits {
MDC = 0x00000040, MDDIR = 0x00000020, MDIO = 0x00000010, /* 7016 specific */
EECS = 0x00000008, EECLK = 0x00000004, EEDO = 0x00000002,
EEDI = 0x00000001
@@ -73,7 +73,7 @@ enum sis900_interrupt_register_bits {
RxERR = 0x00000004, RxDESC = 0x00000002, RxOK = 0x00000001
};
-enum sis900_interrupt_enable_reigster_bits {
+enum sis900_interrupt_enable_register_bits {
IE = 0x00000001
};
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 3e97a8b43..eb9230e20 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -9,9 +9,7 @@ config NET_VENDOR_SMSC
ISA || M32R || MAC || MIPS || MN10300 || NIOS2 || PCI || \
PCMCIA || SUPERH || XTENSA
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
@@ -29,8 +27,7 @@ config SMC9194
option if you have a DELL laptop with the docking station, or
another SMC9192/9194 based chipset. Say Y if you want it compiled
into the kernel, and read the file
- <file:Documentation/networking/smc9.txt> and the Ethernet-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
+ <file:Documentation/networking/smc9.txt>.
To compile this driver as a module, choose M here. The module
will be called smc9194.
@@ -46,8 +43,7 @@ config SMC91X
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
compiled into the kernel, and read the file
- <file:Documentation/networking/smc9.txt> and the Ethernet-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
+ <file:Documentation/networking/smc9.txt>.
This driver is also available as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
@@ -85,9 +81,7 @@ config SMC911X
---help---
This is a driver for SMSC's LAN911x series of Ethernet chipsets
including the new LAN9115, LAN9116, LAN9117, and LAN9118.
- Say Y if you want it compiled into the kernel,
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ Say Y here if you want it compiled into the kernel.
This driver is also available as a module. The module will be
called smc911x. If you want to compile it as a module, say M
@@ -122,9 +116,7 @@ config SMSC9420
select SMSC_PHY
---help---
This is a driver for SMSC's LAN9420 PCI ethernet adapter.
- Say Y if you want it compiled into the kernel,
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ Say Y here if you want it compiled into the kernel.
This driver is also available as a module. The module will be
called smsc9420. If you want to compile it as a module, say M
diff --git a/drivers/net/ethernet/stmicro/Kconfig b/drivers/net/ethernet/stmicro/Kconfig
index f4a80da00..1c1157d2b 100644
--- a/drivers/net/ethernet/stmicro/Kconfig
+++ b/drivers/net/ethernet/stmicro/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_STMICRO
default y
depends on HAS_IOMEM
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 7d3af190b..cec147d1d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -16,6 +16,7 @@ if STMMAC_ETH
config STMMAC_PLATFORM
tristate "STMMAC Platform bus support"
depends on STMMAC_ETH
+ select MFD_SYSCON
default y
---help---
This selects the platform specific bus support for the stmmac driver.
@@ -26,6 +27,95 @@ config STMMAC_PLATFORM
If unsure, say N.
+if STMMAC_PLATFORM
+
+config DWMAC_GENERIC
+ tristate "Generic driver for DWMAC"
+ default STMMAC_PLATFORM
+ ---help---
+ Generic DWMAC driver for platforms that don't require any
+ platform specific code to function or is using platform
+ data for setup.
+
+config DWMAC_IPQ806X
+ tristate "QCA IPQ806x DWMAC support"
+ default ARCH_QCOM
+ depends on OF
+ select MFD_SYSCON
+ help
+ Support for QCA IPQ806X DWMAC Ethernet.
+
+ This selects the IPQ806x SoC glue layer support for the stmmac
+ device driver. This driver does not use any of the hardware
+ acceleration features available on this SoC. Network devices
+ will behave like standard non-accelerated ethernet interfaces.
+
+config DWMAC_LPC18XX
+ tristate "NXP LPC18xx/43xx DWMAC support"
+ default ARCH_LPC18XX
+ depends on OF
+ select MFD_SYSCON
+ ---help---
+ Support for NXP LPC18xx/43xx DWMAC Ethernet.
+
+config DWMAC_MESON
+ tristate "Amlogic Meson dwmac support"
+ default ARCH_MESON
+ depends on OF
+ help
+ Support for Ethernet controller on Amlogic Meson SoCs.
+
+ This selects the Amlogic Meson SoC glue layer support for
+ the stmmac device driver. This driver is used for Meson6 and
+ Meson8 SoCs.
+
+config DWMAC_ROCKCHIP
+ tristate "Rockchip dwmac support"
+ default ARCH_ROCKCHIP
+ depends on OF
+ select MFD_SYSCON
+ help
+ Support for Ethernet controller on Rockchip RK3288 SoC.
+
+ This selects the Rockchip RK3288 SoC glue layer support for
+ the stmmac device driver.
+
+config DWMAC_SOCFPGA
+ tristate "SOCFPGA dwmac support"
+ default ARCH_SOCFPGA
+ depends on OF
+ select MFD_SYSCON
+ help
+ Support for ethernet controller on Altera SOCFPGA
+
+ This selects the Altera SOCFPGA SoC glue layer support
+ for the stmmac device driver. This driver is used for
+ arria5 and cyclone5 FPGA SoCs.
+
+config DWMAC_STI
+ tristate "STi GMAC support"
+ default ARCH_STI
+ depends on OF
+ select MFD_SYSCON
+ ---help---
+ Support for ethernet controller on STi SOCs.
+
+ This selects STi SoC glue layer support for the stmmac
+ device driver. This driver is used on for the STi series
+ SOCs GMAC ethernet controller.
+
+config DWMAC_SUNXI
+ tristate "Allwinner GMAC support"
+ default ARCH_SUNXI
+ depends on OF
+ ---help---
+ Support for Allwinner A20/A31 GMAC ethernet controllers.
+
+ This selects Allwinner SoC glue layer support for the
+ stmmac device driver. This driver is used for A20/A31
+ GMAC ethernet controller.
+endif
+
config STMMAC_PCI
tristate "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 73c2715a2..b3901616f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -4,9 +4,17 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y)
-obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
-stmmac-platform-objs:= stmmac_platform.o dwmac-meson.o dwmac-sunxi.o \
- dwmac-sti.o dwmac-socfpga.o dwmac-rk.o
+# Ordering matters. Generic driver must be last.
+obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
+obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
+obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
+obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o
+obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
+obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o
+obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
+obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
+obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
+stmmac-platform-objs:= stmmac_platform.o
obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index ad3996038..799c2929c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -158,6 +158,8 @@ struct dma_desc {
u32 buffer2_size:13;
u32 reserved4:3;
} etx; /* -- enhanced -- */
+
+ u64 all_flags;
} des01;
unsigned int des2;
unsigned int des3;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
new file mode 100644
index 000000000..e817a1a44
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -0,0 +1,41 @@
+/*
+ * Generic DWMAC platform driver
+ *
+ * Copyright (C) 2007-2011 STMicroelectronics Ltd
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+static const struct of_device_id dwmac_generic_match[] = {
+ { .compatible = "st,spear600-gmac"},
+ { .compatible = "snps,dwmac-3.610"},
+ { .compatible = "snps,dwmac-3.70a"},
+ { .compatible = "snps,dwmac-3.710"},
+ { .compatible = "snps,dwmac"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, dwmac_generic_match);
+
+static struct platform_driver dwmac_generic_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = of_match_ptr(dwmac_generic_match),
+ },
+};
+module_platform_driver(dwmac_generic_driver);
+
+MODULE_DESCRIPTION("Generic dwmac driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
new file mode 100644
index 000000000..f0e4bb4e3
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -0,0 +1,365 @@
+/*
+ * Qualcomm Atheros IPQ806x GMAC glue layer
+ *
+ * Copyright (C) 2015 The Linux Foundation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/stmmac.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+
+#include "stmmac_platform.h"
+
+#define NSS_COMMON_CLK_GATE 0x8
+#define NSS_COMMON_CLK_GATE_PTP_EN(x) BIT(0x10 + x)
+#define NSS_COMMON_CLK_GATE_RGMII_RX_EN(x) BIT(0x9 + (x * 2))
+#define NSS_COMMON_CLK_GATE_RGMII_TX_EN(x) BIT(0x8 + (x * 2))
+#define NSS_COMMON_CLK_GATE_GMII_RX_EN(x) BIT(0x4 + x)
+#define NSS_COMMON_CLK_GATE_GMII_TX_EN(x) BIT(0x0 + x)
+
+#define NSS_COMMON_CLK_DIV0 0xC
+#define NSS_COMMON_CLK_DIV_OFFSET(x) (x * 8)
+#define NSS_COMMON_CLK_DIV_MASK 0x7f
+
+#define NSS_COMMON_CLK_SRC_CTRL 0x14
+#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (x)
+/* Mode is coded on 1 bit but is different depending on the MAC ID:
+ * MAC0: QSGMII=0 RGMII=1
+ * MAC1: QSGMII=0 SGMII=0 RGMII=1
+ * MAC2 & MAC3: QSGMII=0 SGMII=1
+ */
+#define NSS_COMMON_CLK_SRC_CTRL_RGMII(x) 1
+#define NSS_COMMON_CLK_SRC_CTRL_SGMII(x) ((x >= 2) ? 1 : 0)
+
+#define NSS_COMMON_MACSEC_CTL 0x28
+#define NSS_COMMON_MACSEC_CTL_EXT_BYPASS_EN(x) (1 << x)
+
+#define NSS_COMMON_GMAC_CTL(x) (0x30 + (x * 4))
+#define NSS_COMMON_GMAC_CTL_CSYS_REQ BIT(19)
+#define NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL BIT(16)
+#define NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET 8
+#define NSS_COMMON_GMAC_CTL_IFG_OFFSET 0
+#define NSS_COMMON_GMAC_CTL_IFG_MASK 0x3f
+
+#define NSS_COMMON_CLK_DIV_RGMII_1000 1
+#define NSS_COMMON_CLK_DIV_RGMII_100 9
+#define NSS_COMMON_CLK_DIV_RGMII_10 99
+#define NSS_COMMON_CLK_DIV_SGMII_1000 0
+#define NSS_COMMON_CLK_DIV_SGMII_100 4
+#define NSS_COMMON_CLK_DIV_SGMII_10 49
+
+#define QSGMII_PCS_MODE_CTL 0x68
+#define QSGMII_PCS_MODE_CTL_AUTONEG_EN(x) BIT((x * 8) + 7)
+
+#define QSGMII_PCS_CAL_LCKDT_CTL 0x120
+#define QSGMII_PCS_CAL_LCKDT_CTL_RST BIT(19)
+
+/* Only GMAC1/2/3 support SGMII and their CTL register are not contiguous */
+#define QSGMII_PHY_SGMII_CTL(x) ((x == 1) ? 0x134 : \
+ (0x13c + (4 * (x - 2))))
+#define QSGMII_PHY_CDR_EN BIT(0)
+#define QSGMII_PHY_RX_FRONT_EN BIT(1)
+#define QSGMII_PHY_RX_SIGNAL_DETECT_EN BIT(2)
+#define QSGMII_PHY_TX_DRIVER_EN BIT(3)
+#define QSGMII_PHY_QSGMII_EN BIT(7)
+#define QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET 12
+#define QSGMII_PHY_PHASE_LOOP_GAIN_MASK 0x7
+#define QSGMII_PHY_RX_DC_BIAS_OFFSET 18
+#define QSGMII_PHY_RX_DC_BIAS_MASK 0x3
+#define QSGMII_PHY_RX_INPUT_EQU_OFFSET 20
+#define QSGMII_PHY_RX_INPUT_EQU_MASK 0x3
+#define QSGMII_PHY_CDR_PI_SLEW_OFFSET 22
+#define QSGMII_PHY_CDR_PI_SLEW_MASK 0x3
+#define QSGMII_PHY_TX_DRV_AMP_OFFSET 28
+#define QSGMII_PHY_TX_DRV_AMP_MASK 0xf
+
+struct ipq806x_gmac {
+ struct platform_device *pdev;
+ struct regmap *nss_common;
+ struct regmap *qsgmii_csr;
+ uint32_t id;
+ struct clk *core_clk;
+ phy_interface_t phy_mode;
+};
+
+static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+ struct device *dev = &gmac->pdev->dev;
+ int div;
+
+ switch (speed) {
+ case SPEED_1000:
+ div = NSS_COMMON_CLK_DIV_SGMII_1000;
+ break;
+
+ case SPEED_100:
+ div = NSS_COMMON_CLK_DIV_SGMII_100;
+ break;
+
+ case SPEED_10:
+ div = NSS_COMMON_CLK_DIV_SGMII_10;
+ break;
+
+ default:
+ dev_err(dev, "Speed %dMbps not supported in SGMII\n", speed);
+ return -EINVAL;
+ }
+
+ return div;
+}
+
+static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+ struct device *dev = &gmac->pdev->dev;
+ int div;
+
+ switch (speed) {
+ case SPEED_1000:
+ div = NSS_COMMON_CLK_DIV_RGMII_1000;
+ break;
+
+ case SPEED_100:
+ div = NSS_COMMON_CLK_DIV_RGMII_100;
+ break;
+
+ case SPEED_10:
+ div = NSS_COMMON_CLK_DIV_RGMII_10;
+ break;
+
+ default:
+ dev_err(dev, "Speed %dMbps not supported in RGMII\n", speed);
+ return -EINVAL;
+ }
+
+ return div;
+}
+
+static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+ uint32_t clk_bits, val;
+ int div;
+
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ div = get_clk_div_rgmii(gmac, speed);
+ clk_bits = NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
+ break;
+
+ case PHY_INTERFACE_MODE_SGMII:
+ div = get_clk_div_sgmii(gmac, speed);
+ clk_bits = NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
+ break;
+
+ default:
+ dev_err(&gmac->pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+ phy_modes(gmac->phy_mode));
+ return -EINVAL;
+ }
+
+ /* Disable the clocks */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+ val &= ~clk_bits;
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+ /* Set the divider */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_DIV0, &val);
+ val &= ~(NSS_COMMON_CLK_DIV_MASK
+ << NSS_COMMON_CLK_DIV_OFFSET(gmac->id));
+ val |= div << NSS_COMMON_CLK_DIV_OFFSET(gmac->id);
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_DIV0, val);
+
+ /* Enable the clock back */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+ val |= clk_bits;
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+ return 0;
+}
+
+static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
+{
+ struct device *dev = &gmac->pdev->dev;
+
+ gmac->phy_mode = of_get_phy_mode(dev->of_node);
+ if (gmac->phy_mode < 0) {
+ dev_err(dev, "missing phy mode property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) {
+ dev_err(dev, "missing qcom id property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* The GMACs are called 1 to 4 in the documentation, but to simplify the
+ * code and keep it consistent with the Linux convention, we'll number
+ * them from 0 to 3 here.
+ */
+ if (gmac->id < 0 || gmac->id > 3) {
+ dev_err(dev, "invalid gmac id\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ gmac->core_clk = devm_clk_get(dev, "stmmaceth");
+ if (IS_ERR(gmac->core_clk)) {
+ dev_err(dev, "missing stmmaceth clk property\n");
+ return gmac->core_clk;
+ }
+ clk_set_rate(gmac->core_clk, 266000000);
+
+ /* Setup the register map for the nss common registers */
+ gmac->nss_common = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "qcom,nss-common");
+ if (IS_ERR(gmac->nss_common)) {
+ dev_err(dev, "missing nss-common node\n");
+ return gmac->nss_common;
+ }
+
+ /* Setup the register map for the qsgmii csr registers */
+ gmac->qsgmii_csr = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "qcom,qsgmii-csr");
+ if (IS_ERR(gmac->qsgmii_csr)) {
+ dev_err(dev, "missing qsgmii-csr node\n");
+ return gmac->qsgmii_csr;
+ }
+
+ return NULL;
+}
+
+static void *ipq806x_gmac_setup(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ipq806x_gmac *gmac;
+ int val;
+ void *err;
+
+ gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return ERR_PTR(-ENOMEM);
+
+ gmac->pdev = pdev;
+
+ err = ipq806x_gmac_of_parse(gmac);
+ if (err) {
+ dev_err(dev, "device tree parsing error\n");
+ return err;
+ }
+
+ regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
+ QSGMII_PCS_CAL_LCKDT_CTL_RST);
+
+ /* Inter frame gap is set to 12 */
+ val = 12 << NSS_COMMON_GMAC_CTL_IFG_OFFSET |
+ 12 << NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET;
+ /* We also initiate an AXI low power exit request */
+ val |= NSS_COMMON_GMAC_CTL_CSYS_REQ;
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+ phy_modes(gmac->phy_mode));
+ return NULL;
+ }
+ regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
+
+ /* Configure the clock src according to the mode */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
+ val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
+ NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ val |= NSS_COMMON_CLK_SRC_CTRL_SGMII(gmac->id) <<
+ NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+ phy_modes(gmac->phy_mode));
+ return NULL;
+ }
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
+
+ /* Enable PTP clock */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+ val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id);
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+ if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+ regmap_write(gmac->qsgmii_csr, QSGMII_PHY_SGMII_CTL(gmac->id),
+ QSGMII_PHY_CDR_EN |
+ QSGMII_PHY_RX_FRONT_EN |
+ QSGMII_PHY_RX_SIGNAL_DETECT_EN |
+ QSGMII_PHY_TX_DRIVER_EN |
+ QSGMII_PHY_QSGMII_EN |
+ 0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
+ 0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET |
+ 0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
+ 0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
+ 0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
+ }
+
+ return gmac;
+}
+
+static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct ipq806x_gmac *gmac = priv;
+
+ ipq806x_gmac_set_speed(gmac, speed);
+}
+
+static const struct stmmac_of_data ipq806x_gmac_data = {
+ .has_gmac = 1,
+ .setup = ipq806x_gmac_setup,
+ .fix_mac_speed = ipq806x_gmac_fix_mac_speed,
+};
+
+static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
+ { .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
+
+static struct platform_driver ipq806x_gmac_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "ipq806x-gmac-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = ipq806x_gmac_dwmac_match,
+ },
+};
+module_platform_driver(ipq806x_gmac_dwmac_driver);
+
+MODULE_AUTHOR("Mathieu Olivari <mathieu@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm Atheros IPQ806x DWMAC specific glue layer");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
new file mode 100644
index 000000000..cb888d3eb
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -0,0 +1,99 @@
+/*
+ * DWMAC glue for NXP LPC18xx/LPC43xx Ethernet
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+/* Register defines for CREG syscon */
+#define LPC18XX_CREG_CREG6 0x12c
+# define LPC18XX_CREG_CREG6_ETHMODE_MASK 0x7
+# define LPC18XX_CREG_CREG6_ETHMODE_MII 0x0
+# define LPC18XX_CREG_CREG6_ETHMODE_RMII 0x4
+
+struct lpc18xx_dwmac_priv_data {
+ struct regmap *reg;
+ int interface;
+};
+
+static void *lpc18xx_dwmac_setup(struct platform_device *pdev)
+{
+ struct lpc18xx_dwmac_priv_data *dwmac;
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return ERR_PTR(-ENOMEM);
+
+ dwmac->interface = of_get_phy_mode(pdev->dev.of_node);
+ if (dwmac->interface < 0)
+ return ERR_PTR(dwmac->interface);
+
+ dwmac->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+ if (IS_ERR(dwmac->reg)) {
+ dev_err(&pdev->dev, "Syscon lookup failed\n");
+ return dwmac->reg;
+ }
+
+ return dwmac;
+}
+
+static int lpc18xx_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct lpc18xx_dwmac_priv_data *dwmac = priv;
+ u8 ethmode;
+
+ if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
+ ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
+ } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+ ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
+ } else {
+ dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(dwmac->reg, LPC18XX_CREG_CREG6,
+ LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
+
+ return 0;
+}
+
+static const struct stmmac_of_data lpc18xx_dwmac_data = {
+ .has_gmac = 1,
+ .setup = lpc18xx_dwmac_setup,
+ .init = lpc18xx_dwmac_init,
+};
+
+static const struct of_device_id lpc18xx_dwmac_match[] = {
+ { .compatible = "nxp,lpc1850-dwmac", .data = &lpc18xx_dwmac_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match);
+
+static struct platform_driver lpc18xx_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "lpc18xx-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = lpc18xx_dwmac_match,
+ },
+};
+module_platform_driver(lpc18xx_dwmac_driver);
+
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_DESCRIPTION("DWMAC glue for LPC18xx/43xx Ethernet");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index cca028d63..61a324a87 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -15,6 +15,7 @@
#include <linux/ethtool.h>
#include <linux/io.h>
#include <linux/ioport.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/stmmac.h>
@@ -63,7 +64,28 @@ static void *meson6_dwmac_setup(struct platform_device *pdev)
return dwmac;
}
-const struct stmmac_of_data meson6_dwmac_data = {
+static const struct stmmac_of_data meson6_dwmac_data = {
.setup = meson6_dwmac_setup,
.fix_mac_speed = meson6_dwmac_fix_mac_speed,
};
+
+static const struct of_device_id meson6_dwmac_match[] = {
+ { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
+ { }
+};
+MODULE_DEVICE_TABLE(of, meson6_dwmac_match);
+
+static struct platform_driver meson6_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "meson6-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = meson6_dwmac_match,
+ },
+};
+module_platform_driver(meson6_dwmac_driver);
+
+MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
+MODULE_DESCRIPTION("Amlogic Meson DWMAC glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 6249a4ec0..00a1e1e09 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -22,23 +22,36 @@
#include <linux/phy.h>
#include <linux/of_net.h>
#include <linux/gpio.h>
+#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include "stmmac_platform.h"
+
+struct rk_priv_data;
+struct rk_gmac_ops {
+ void (*set_to_rgmii)(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay);
+ void (*set_to_rmii)(struct rk_priv_data *bsp_priv);
+ void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
+ void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
+};
+
struct rk_priv_data {
struct platform_device *pdev;
int phy_iface;
struct regulator *regulator;
+ struct rk_gmac_ops *ops;
bool clk_enabled;
bool clock_input;
struct clk *clk_mac;
- struct clk *clk_mac_pll;
struct clk *gmac_clkin;
struct clk *mac_clk_rx;
struct clk *mac_clk_tx;
@@ -61,103 +74,228 @@ struct rk_priv_data {
#define RK3288_GRF_SOC_CON1 0x0248
#define RK3288_GRF_SOC_CON3 0x0250
-#define RK3288_GRF_GPIO3D_E 0x01ec
-#define RK3288_GRF_GPIO4A_E 0x01f0
-#define RK3288_GRF_GPIO4B_E 0x01f4
/*RK3288_GRF_SOC_CON1*/
-#define GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
-#define GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
-#define GMAC_FLOW_CTRL GRF_BIT(9)
-#define GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
-#define GMAC_SPEED_10M GRF_CLR_BIT(10)
-#define GMAC_SPEED_100M GRF_BIT(10)
-#define GMAC_RMII_CLK_25M GRF_BIT(11)
-#define GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
-#define GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
-#define GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
-#define GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
-#define GMAC_RMII_MODE GRF_BIT(14)
-#define GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
+#define RK3288_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(6) | GRF_CLR_BIT(7) | \
+ GRF_CLR_BIT(8))
+#define RK3288_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | \
+ GRF_BIT(8))
+#define RK3288_GMAC_FLOW_CTRL GRF_BIT(9)
+#define RK3288_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
+#define RK3288_GMAC_SPEED_10M GRF_CLR_BIT(10)
+#define RK3288_GMAC_SPEED_100M GRF_BIT(10)
+#define RK3288_GMAC_RMII_CLK_25M GRF_BIT(11)
+#define RK3288_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
+#define RK3288_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
+#define RK3288_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
+#define RK3288_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
+#define RK3288_GMAC_RMII_MODE GRF_BIT(14)
+#define RK3288_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
/*RK3288_GRF_SOC_CON3*/
-#define GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
-#define GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
-#define GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
-#define GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
-#define GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
-
-static void set_to_rgmii(struct rk_priv_data *bsp_priv,
- int tx_delay, int rx_delay)
+#define RK3288_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
+#define RK3288_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
+#define RK3288_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3288_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3288_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
+#define RK3288_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
{
struct device *dev = &bsp_priv->pdev->dev;
if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ dev_err(dev, "Missing rockchip,grf property\n");
return;
}
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- GMAC_PHY_INTF_SEL_RGMII | GMAC_RMII_MODE_CLR);
+ RK3288_GMAC_PHY_INTF_SEL_RGMII |
+ RK3288_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
- GMAC_RXCLK_DLY_ENABLE | GMAC_TXCLK_DLY_ENABLE |
- GMAC_CLK_RX_DL_CFG(rx_delay) |
- GMAC_CLK_TX_DL_CFG(tx_delay));
+ RK3288_GMAC_RXCLK_DLY_ENABLE |
+ RK3288_GMAC_TXCLK_DLY_ENABLE |
+ RK3288_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3288_GMAC_CLK_TX_DL_CFG(tx_delay));
}
-static void set_to_rmii(struct rk_priv_data *bsp_priv)
+static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv)
{
struct device *dev = &bsp_priv->pdev->dev;
if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ dev_err(dev, "Missing rockchip,grf property\n");
return;
}
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- GMAC_PHY_INTF_SEL_RMII | GMAC_RMII_MODE);
+ RK3288_GMAC_PHY_INTF_SEL_RMII | RK3288_GMAC_RMII_MODE);
}
-static void set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static void rk3288_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ dev_err(dev, "Missing rockchip,grf property\n");
return;
}
if (speed == 10)
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_2_5M);
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ RK3288_GMAC_CLK_2_5M);
else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_25M);
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ RK3288_GMAC_CLK_25M);
else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_125M);
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ RK3288_GMAC_CLK_125M);
else
dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
}
-static void set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ dev_err(dev, "Missing rockchip,grf property\n");
return;
}
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- GMAC_RMII_CLK_2_5M | GMAC_SPEED_10M);
+ RK3288_GMAC_RMII_CLK_2_5M |
+ RK3288_GMAC_SPEED_10M);
} else if (speed == 100) {
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- GMAC_RMII_CLK_25M | GMAC_SPEED_100M);
+ RK3288_GMAC_RMII_CLK_25M |
+ RK3288_GMAC_SPEED_100M);
} else {
dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
}
}
+struct rk_gmac_ops rk3288_ops = {
+ .set_to_rgmii = rk3288_set_to_rgmii,
+ .set_to_rmii = rk3288_set_to_rmii,
+ .set_rgmii_speed = rk3288_set_rgmii_speed,
+ .set_rmii_speed = rk3288_set_rmii_speed,
+};
+
+#define RK3368_GRF_SOC_CON15 0x043c
+#define RK3368_GRF_SOC_CON16 0x0440
+
+/* RK3368_GRF_SOC_CON15 */
+#define RK3368_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
+ GRF_CLR_BIT(11))
+#define RK3368_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
+ GRF_BIT(11))
+#define RK3368_GMAC_FLOW_CTRL GRF_BIT(8)
+#define RK3368_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
+#define RK3368_GMAC_SPEED_10M GRF_CLR_BIT(7)
+#define RK3368_GMAC_SPEED_100M GRF_BIT(7)
+#define RK3368_GMAC_RMII_CLK_25M GRF_BIT(3)
+#define RK3368_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
+#define RK3368_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
+#define RK3368_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
+#define RK3368_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3368_GMAC_RMII_MODE GRF_BIT(6)
+#define RK3368_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
+
+/* RK3368_GRF_SOC_CON16 */
+#define RK3368_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
+#define RK3368_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
+#define RK3368_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3368_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3368_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RK3368_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+ RK3368_GMAC_PHY_INTF_SEL_RGMII |
+ RK3368_GMAC_RMII_MODE_CLR);
+ regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16,
+ RK3368_GMAC_RXCLK_DLY_ENABLE |
+ RK3368_GMAC_TXCLK_DLY_ENABLE |
+ RK3368_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3368_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+ RK3368_GMAC_PHY_INTF_SEL_RMII | RK3368_GMAC_RMII_MODE);
+}
+
+static void rk3368_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+ RK3368_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+ RK3368_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+ RK3368_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+ RK3368_GMAC_RMII_CLK_2_5M |
+ RK3368_GMAC_SPEED_10M);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+ RK3368_GMAC_RMII_CLK_25M |
+ RK3368_GMAC_SPEED_100M);
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+struct rk_gmac_ops rk3368_ops = {
+ .set_to_rgmii = rk3368_set_to_rgmii,
+ .set_to_rmii = rk3368_set_to_rmii,
+ .set_rgmii_speed = rk3368_set_rgmii_speed,
+ .set_rmii_speed = rk3368_set_rmii_speed,
+};
+
static int gmac_clk_init(struct rk_priv_data *bsp_priv)
{
struct device *dev = &bsp_priv->pdev->dev;
@@ -166,49 +304,49 @@ static int gmac_clk_init(struct rk_priv_data *bsp_priv)
bsp_priv->mac_clk_rx = devm_clk_get(dev, "mac_clk_rx");
if (IS_ERR(bsp_priv->mac_clk_rx))
- dev_err(dev, "%s: cannot get clock %s\n",
- __func__, "mac_clk_rx");
+ dev_err(dev, "cannot get clock %s\n",
+ "mac_clk_rx");
bsp_priv->mac_clk_tx = devm_clk_get(dev, "mac_clk_tx");
if (IS_ERR(bsp_priv->mac_clk_tx))
- dev_err(dev, "%s: cannot get clock %s\n",
- __func__, "mac_clk_tx");
+ dev_err(dev, "cannot get clock %s\n",
+ "mac_clk_tx");
bsp_priv->aclk_mac = devm_clk_get(dev, "aclk_mac");
if (IS_ERR(bsp_priv->aclk_mac))
- dev_err(dev, "%s: cannot get clock %s\n",
- __func__, "aclk_mac");
+ dev_err(dev, "cannot get clock %s\n",
+ "aclk_mac");
bsp_priv->pclk_mac = devm_clk_get(dev, "pclk_mac");
if (IS_ERR(bsp_priv->pclk_mac))
- dev_err(dev, "%s: cannot get clock %s\n",
- __func__, "pclk_mac");
+ dev_err(dev, "cannot get clock %s\n",
+ "pclk_mac");
bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
if (IS_ERR(bsp_priv->clk_mac))
- dev_err(dev, "%s: cannot get clock %s\n",
- __func__, "stmmaceth");
+ dev_err(dev, "cannot get clock %s\n",
+ "stmmaceth");
if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
bsp_priv->clk_mac_ref = devm_clk_get(dev, "clk_mac_ref");
if (IS_ERR(bsp_priv->clk_mac_ref))
- dev_err(dev, "%s: cannot get clock %s\n",
- __func__, "clk_mac_ref");
+ dev_err(dev, "cannot get clock %s\n",
+ "clk_mac_ref");
if (!bsp_priv->clock_input) {
bsp_priv->clk_mac_refout =
devm_clk_get(dev, "clk_mac_refout");
if (IS_ERR(bsp_priv->clk_mac_refout))
- dev_err(dev, "%s: cannot get clock %s\n",
- __func__, "clk_mac_refout");
+ dev_err(dev, "cannot get clock %s\n",
+ "clk_mac_refout");
}
}
if (bsp_priv->clock_input) {
- dev_info(dev, "%s: clock input from PHY\n", __func__);
+ dev_info(dev, "clock input from PHY\n");
} else {
if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
- clk_set_rate(bsp_priv->clk_mac_pll, 50000000);
+ clk_set_rate(bsp_priv->clk_mac, 50000000);
}
return 0;
@@ -292,26 +430,25 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
struct device *dev = &bsp_priv->pdev->dev;
if (!ldo) {
- dev_err(dev, "%s: no regulator found\n", __func__);
+ dev_err(dev, "no regulator found\n");
return -1;
}
if (enable) {
ret = regulator_enable(ldo);
if (ret)
- dev_err(dev, "%s: fail to enable phy-supply\n",
- __func__);
+ dev_err(dev, "fail to enable phy-supply\n");
} else {
ret = regulator_disable(ldo);
if (ret)
- dev_err(dev, "%s: fail to disable phy-supply\n",
- __func__);
+ dev_err(dev, "fail to disable phy-supply\n");
}
return 0;
}
-static void *rk_gmac_setup(struct platform_device *pdev)
+static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
+ struct rk_gmac_ops *ops)
{
struct rk_priv_data *bsp_priv;
struct device *dev = &pdev->dev;
@@ -324,6 +461,7 @@ static void *rk_gmac_setup(struct platform_device *pdev)
return ERR_PTR(-ENOMEM);
bsp_priv->phy_iface = of_get_phy_mode(dev->of_node);
+ bsp_priv->ops = ops;
bsp_priv->regulator = devm_regulator_get_optional(dev, "phy");
if (IS_ERR(bsp_priv->regulator)) {
@@ -337,12 +475,11 @@ static void *rk_gmac_setup(struct platform_device *pdev)
ret = of_property_read_string(dev->of_node, "clock_in_out", &strings);
if (ret) {
- dev_err(dev, "%s: Can not read property: clock_in_out.\n",
- __func__);
+ dev_err(dev, "Can not read property: clock_in_out.\n");
bsp_priv->clock_input = true;
} else {
- dev_info(dev, "%s: clock input or output? (%s).\n",
- __func__, strings);
+ dev_info(dev, "clock input or output? (%s).\n",
+ strings);
if (!strcmp(strings, "input"))
bsp_priv->clock_input = true;
else
@@ -352,22 +489,22 @@ static void *rk_gmac_setup(struct platform_device *pdev)
ret = of_property_read_u32(dev->of_node, "tx_delay", &value);
if (ret) {
bsp_priv->tx_delay = 0x30;
- dev_err(dev, "%s: Can not read property: tx_delay.", __func__);
- dev_err(dev, "%s: set tx_delay to 0x%x\n",
- __func__, bsp_priv->tx_delay);
+ dev_err(dev, "Can not read property: tx_delay.");
+ dev_err(dev, "set tx_delay to 0x%x\n",
+ bsp_priv->tx_delay);
} else {
- dev_info(dev, "%s: TX delay(0x%x).\n", __func__, value);
+ dev_info(dev, "TX delay(0x%x).\n", value);
bsp_priv->tx_delay = value;
}
ret = of_property_read_u32(dev->of_node, "rx_delay", &value);
if (ret) {
bsp_priv->rx_delay = 0x10;
- dev_err(dev, "%s: Can not read property: rx_delay.", __func__);
- dev_err(dev, "%s: set rx_delay to 0x%x\n",
- __func__, bsp_priv->rx_delay);
+ dev_err(dev, "Can not read property: rx_delay.");
+ dev_err(dev, "set rx_delay to 0x%x\n",
+ bsp_priv->rx_delay);
} else {
- dev_info(dev, "%s: RX delay(0x%x).\n", __func__, value);
+ dev_info(dev, "RX delay(0x%x).\n", value);
bsp_priv->rx_delay = value;
}
@@ -377,13 +514,14 @@ static void *rk_gmac_setup(struct platform_device *pdev)
/*rmii or rgmii*/
if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
- dev_info(dev, "%s: init for RGMII\n", __func__);
- set_to_rgmii(bsp_priv, bsp_priv->tx_delay, bsp_priv->rx_delay);
+ dev_info(dev, "init for RGMII\n");
+ bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay,
+ bsp_priv->rx_delay);
} else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
- dev_info(dev, "%s: init for RMII\n", __func__);
- set_to_rmii(bsp_priv);
+ dev_info(dev, "init for RMII\n");
+ bsp_priv->ops->set_to_rmii(bsp_priv);
} else {
- dev_err(dev, "%s: NO interface defined!\n", __func__);
+ dev_err(dev, "NO interface defined!\n");
}
gmac_clk_init(bsp_priv);
@@ -391,6 +529,16 @@ static void *rk_gmac_setup(struct platform_device *pdev)
return bsp_priv;
}
+static void *rk3288_gmac_setup(struct platform_device *pdev)
+{
+ return rk_gmac_setup(pdev, &rk3288_ops);
+}
+
+static void *rk3368_gmac_setup(struct platform_device *pdev)
+{
+ return rk_gmac_setup(pdev, &rk3368_ops);
+}
+
static int rk_gmac_init(struct platform_device *pdev, void *priv)
{
struct rk_priv_data *bsp_priv = priv;
@@ -421,17 +569,47 @@ static void rk_fix_speed(void *priv, unsigned int speed)
struct device *dev = &bsp_priv->pdev->dev;
if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII)
- set_rgmii_speed(bsp_priv, speed);
+ bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
- set_rmii_speed(bsp_priv, speed);
+ bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
else
dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
}
-const struct stmmac_of_data rk3288_gmac_data = {
+static const struct stmmac_of_data rk3288_gmac_data = {
.has_gmac = 1,
.fix_mac_speed = rk_fix_speed,
- .setup = rk_gmac_setup,
+ .setup = rk3288_gmac_setup,
.init = rk_gmac_init,
.exit = rk_gmac_exit,
};
+
+static const struct stmmac_of_data rk3368_gmac_data = {
+ .has_gmac = 1,
+ .fix_mac_speed = rk_fix_speed,
+ .setup = rk3368_gmac_setup,
+ .init = rk_gmac_init,
+ .exit = rk_gmac_exit,
+};
+
+static const struct of_device_id rk_gmac_dwmac_match[] = {
+ { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
+ { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_gmac_data},
+ { }
+};
+MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
+
+static struct platform_driver rk_gmac_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "rk_gmac-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = rk_gmac_dwmac_match,
+ },
+};
+module_platform_driver(rk_gmac_dwmac_driver);
+
+MODULE_AUTHOR("Chen-Zhi (Roger Chen) <roger.chen@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip RK3288 DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 5a36bd2c7..8141c5b84 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -257,9 +257,28 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
return ret;
}
-const struct stmmac_of_data socfpga_gmac_data = {
+static const struct stmmac_of_data socfpga_gmac_data = {
.setup = socfpga_dwmac_probe,
.init = socfpga_dwmac_init,
.exit = socfpga_dwmac_exit,
.fix_mac_speed = socfpga_dwmac_fix_mac_speed,
};
+
+static const struct of_device_id socfpga_dwmac_match[] = {
+ { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
+
+static struct platform_driver socfpga_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "socfpga-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = socfpga_dwmac_match,
+ },
+};
+module_platform_driver(socfpga_dwmac_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index bb6e2dc61..a2e8111c5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -17,6 +17,7 @@
#include <linux/stmmac.h>
#include <linux/phy.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/of.h>
@@ -351,16 +352,40 @@ static void *sti_dwmac_setup(struct platform_device *pdev)
return dwmac;
}
-const struct stmmac_of_data stih4xx_dwmac_data = {
+static const struct stmmac_of_data stih4xx_dwmac_data = {
.fix_mac_speed = stih4xx_fix_retime_src,
.setup = sti_dwmac_setup,
.init = stix4xx_init,
.exit = sti_dwmac_exit,
};
-const struct stmmac_of_data stid127_dwmac_data = {
+static const struct stmmac_of_data stid127_dwmac_data = {
.fix_mac_speed = stid127_fix_retime_src,
.setup = sti_dwmac_setup,
.init = stid127_init,
.exit = sti_dwmac_exit,
};
+
+static const struct of_device_id sti_dwmac_match[] = {
+ { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
+ { .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data},
+ { .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data},
+ { .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data},
+ { }
+};
+MODULE_DEVICE_TABLE(of, sti_dwmac_match);
+
+static struct platform_driver sti_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "sti-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sti_dwmac_match,
+ },
+};
+module_platform_driver(sti_dwmac_driver);
+
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics DWMAC Specific Glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index c5ea9ab75..15048ca39 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -18,7 +18,9 @@
#include <linux/stmmac.h>
#include <linux/clk.h>
+#include <linux/module.h>
#include <linux/phy.h>
+#include <linux/platform_device.h>
#include <linux/of_net.h>
#include <linux/regulator/consumer.h>
@@ -132,7 +134,7 @@ static void sun7i_fix_speed(void *priv, unsigned int speed)
/* of_data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers. */
-const struct stmmac_of_data sun7i_gmac_data = {
+static const struct stmmac_of_data sun7i_gmac_data = {
.has_gmac = 1,
.tx_coe = 1,
.fix_mac_speed = sun7i_fix_speed,
@@ -140,3 +142,24 @@ const struct stmmac_of_data sun7i_gmac_data = {
.init = sun7i_gmac_init,
.exit = sun7i_gmac_exit,
};
+
+static const struct of_device_id sun7i_dwmac_match[] = {
+ { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
+
+static struct platform_driver sun7i_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "sun7i-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sun7i_dwmac_match,
+ },
+};
+module_platform_driver(sun7i_dwmac_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner sunxi DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 1e2bcf5f8..7d944449f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -240,6 +240,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end)
{
+ p->des01.all_flags = 0;
p->des01.erx.own = 1;
p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
@@ -254,7 +255,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
{
- p->des01.etx.own = 0;
+ p->des01.all_flags = 0;
if (mode == STMMAC_CHAIN_MODE)
ehn_desc_tx_set_on_chain(p, end);
else
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 08c483bd2..3f20bb1fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -73,7 +73,7 @@
#define MMC_RX_OCTETCOUNT_G 0x00000188
#define MMC_RX_BROADCASTFRAME_G 0x0000018c
#define MMC_RX_MULTICASTFRAME_G 0x00000190
-#define MMC_RX_CRC_ERRROR 0x00000194
+#define MMC_RX_CRC_ERROR 0x00000194
#define MMC_RX_ALIGN_ERROR 0x00000198
#define MMC_RX_RUN_ERROR 0x0000019C
#define MMC_RX_JABBER_ERROR 0x000001A0
@@ -196,7 +196,7 @@ void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc)
mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
- mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERRROR);
+ mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERROR);
mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 35ad4f427..48c345644 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -123,6 +123,7 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
int end)
{
+ p->des01.all_flags = 0;
p->des01.rx.own = 1;
p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
@@ -137,7 +138,7 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
{
- p->des01.tx.own = 0;
+ p->des01.all_flags = 0;
if (mode == STMMAC_CHAIN_MODE)
ndesc_tx_set_on_chain(p, end);
else
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 73bab983e..1f3b33a6c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -34,6 +34,14 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/reset.h>
+struct stmmac_resources {
+ void __iomem *addr;
+ const char *mac;
+ int wol_irq;
+ int lpi_irq;
+ int irq;
+};
+
struct stmmac_tx_info {
dma_addr_t buf;
bool map_as_page;
@@ -135,9 +143,9 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_resume(struct net_device *ndev);
int stmmac_suspend(struct net_device *ndev);
int stmmac_dvr_remove(struct net_device *ndev);
-struct stmmac_priv *stmmac_dvr_probe(struct device *device,
- struct plat_stmmacenet_data *plat_dat,
- void __iomem *addr);
+int stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res);
void stmmac_disable_eee_mode(struct stmmac_priv *priv);
bool stmmac_eee_init(struct stmmac_priv *priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 2c5ce2bac..864b476f7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -52,6 +52,7 @@
#include "stmmac_ptp.h"
#include "stmmac.h"
#include <linux/reset.h>
+#include <linux/of_mdio.h>
#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
@@ -816,18 +817,25 @@ static int stmmac_init_phy(struct net_device *dev)
priv->speed = 0;
priv->oldduplex = -1;
- if (priv->plat->phy_bus_name)
- snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
- priv->plat->phy_bus_name, priv->plat->bus_id);
- else
- snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
- priv->plat->bus_id);
+ if (priv->plat->phy_node) {
+ phydev = of_phy_connect(dev, priv->plat->phy_node,
+ &stmmac_adjust_link, 0, interface);
+ } else {
+ if (priv->plat->phy_bus_name)
+ snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
+ priv->plat->phy_bus_name, priv->plat->bus_id);
+ else
+ snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
+ priv->plat->bus_id);
- snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
- priv->plat->phy_addr);
- pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt);
+ snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ priv->plat->phy_addr);
+ pr_debug("stmmac_init_phy: trying to attach to %s\n",
+ phy_id_fmt);
- phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
+ phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
+ interface);
+ }
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -848,7 +856,7 @@ static int stmmac_init_phy(struct net_device *dev)
* device as well.
* Note: phydev->phy_id is the result of reading the UID PHY registers.
*/
- if (phydev->phy_id == 0) {
+ if (!priv->plat->phy_node && phydev->phy_id == 0) {
phy_disconnect(phydev);
return -ENODEV;
}
@@ -975,13 +983,11 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
{
struct sk_buff *skb;
- skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
- flags);
+ skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
if (!skb) {
pr_err("%s: Rx init fails; skb is NULL\n", __func__);
return -ENOMEM;
}
- skb_reserve(skb, NET_IP_ALIGN);
priv->rx_skbuff[i] = skb;
priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
priv->dma_buf_sz,
@@ -1189,41 +1195,41 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
goto err_tx_skbuff;
if (priv->extend_desc) {
- priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
+ priv->dma_erx = dma_zalloc_coherent(priv->device, rxsize *
+ sizeof(struct
+ dma_extended_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
if (!priv->dma_erx)
goto err_dma;
- priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_tx_phy,
- GFP_KERNEL);
+ priv->dma_etx = dma_zalloc_coherent(priv->device, txsize *
+ sizeof(struct
+ dma_extended_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
if (!priv->dma_etx) {
dma_free_coherent(priv->device, priv->dma_rx_size *
- sizeof(struct dma_extended_desc),
- priv->dma_erx, priv->dma_rx_phy);
+ sizeof(struct dma_extended_desc),
+ priv->dma_erx, priv->dma_rx_phy);
goto err_dma;
}
} else {
- priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
- sizeof(struct dma_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
+ priv->dma_rx = dma_zalloc_coherent(priv->device, rxsize *
+ sizeof(struct dma_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
if (!priv->dma_rx)
goto err_dma;
- priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
- sizeof(struct dma_desc),
- &priv->dma_tx_phy,
- GFP_KERNEL);
+ priv->dma_tx = dma_zalloc_coherent(priv->device, txsize *
+ sizeof(struct dma_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
if (!priv->dma_tx) {
dma_free_coherent(priv->device, priv->dma_rx_size *
- sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
+ sizeof(struct dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
goto err_dma;
}
}
@@ -2800,16 +2806,15 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
* stmmac_dvr_probe
* @device: device pointer
* @plat_dat: platform data pointer
- * @addr: iobase memory address
+ * @res: stmmac resource pointer
* Description: this is the main probe function used to
* call the alloc_etherdev, allocate the priv structure.
* Return:
- * on success the new private structure is returned, otherwise the error
- * pointer.
+ * returns 0 on success, otherwise errno.
*/
-struct stmmac_priv *stmmac_dvr_probe(struct device *device,
- struct plat_stmmacenet_data *plat_dat,
- void __iomem *addr)
+int stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res)
{
int ret = 0;
struct net_device *ndev = NULL;
@@ -2817,7 +2822,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
ndev = alloc_etherdev(sizeof(struct stmmac_priv));
if (!ndev)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
SET_NETDEV_DEV(ndev, device);
@@ -2828,8 +2833,17 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
stmmac_set_ethtool_ops(ndev);
priv->pause = pause;
priv->plat = plat_dat;
- priv->ioaddr = addr;
- priv->dev->base_addr = (unsigned long)addr;
+ priv->ioaddr = res->addr;
+ priv->dev->base_addr = (unsigned long)res->addr;
+
+ priv->dev->irq = res->irq;
+ priv->wol_irq = res->wol_irq;
+ priv->lpi_irq = res->lpi_irq;
+
+ if (res->mac)
+ memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
+
+ dev_set_drvdata(device, priv->dev);
/* Verify driver arguments */
stmmac_verify_args();
@@ -2944,7 +2958,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
}
}
- return priv;
+ return 0;
error_mdio_register:
unregister_netdev(ndev);
@@ -2957,7 +2971,7 @@ error_pclk_get:
error_clk_get:
free_netdev(ndev);
- return ERR_PTR(ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 3bca90871..d71a721ea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -163,7 +163,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
{
struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
struct plat_stmmacenet_data *plat;
- struct stmmac_priv *priv;
+ struct stmmac_resources res;
int i;
int ret;
@@ -214,19 +214,12 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
pci_enable_msi(pdev);
- priv = stmmac_dvr_probe(&pdev->dev, plat, pcim_iomap_table(pdev)[i]);
- if (IS_ERR(priv)) {
- dev_err(&pdev->dev, "%s: main driver probe failed\n", __func__);
- return PTR_ERR(priv);
- }
- priv->dev->irq = pdev->irq;
- priv->wol_irq = pdev->irq;
-
- pci_set_drvdata(pdev, priv->dev);
+ memset(&res, 0, sizeof(res));
+ res.addr = pcim_iomap_table(pdev)[i];
+ res.wol_irq = pdev->irq;
+ res.irq = pdev->irq;
- dev_dbg(&pdev->dev, "STMMAC PCI driver registration completed\n");
-
- return 0;
+ return stmmac_dvr_probe(&pdev->dev, plat, &res);
}
/**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 68aec5c46..bcdc8955c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -28,29 +28,11 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
+#include <linux/of_mdio.h>
#include "stmmac.h"
#include "stmmac_platform.h"
-static const struct of_device_id stmmac_dt_ids[] = {
- /* SoC specific glue layers should come before generic bindings */
- { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
- { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
- { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
- { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
- { .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data},
- { .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data},
- { .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data},
- { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
- { .compatible = "st,spear600-gmac"},
- { .compatible = "snps,dwmac-3.610"},
- { .compatible = "snps,dwmac-3.70a"},
- { .compatible = "snps,dwmac-3.710"},
- { .compatible = "snps,dwmac"},
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
-
#ifdef CONFIG_OF
/**
@@ -129,14 +111,9 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
struct device_node *np = pdev->dev.of_node;
struct stmmac_dma_cfg *dma_cfg;
const struct of_device_id *device;
+ struct device *dev = &pdev->dev;
- if (!np)
- return -ENODEV;
-
- device = of_match_device(stmmac_dt_ids, &pdev->dev);
- if (!device)
- return -ENODEV;
-
+ device = of_match_device(dev->driver->of_match_table, dev);
if (device->data) {
const struct stmmac_of_data *data = device->data;
plat->has_gmac = data->has_gmac;
@@ -168,13 +145,24 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
/* Default to phy auto-detection */
plat->phy_addr = -1;
+ /* If we find a phy-handle property, use it as the PHY */
+ plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
+ /* If phy-handle is not specified, check if we have a fixed-phy */
+ if (!plat->phy_node && of_phy_is_fixed_link(np)) {
+ if ((of_phy_register_fixed_link(np) < 0))
+ return -ENODEV;
+
+ plat->phy_node = of_node_get(np);
+ }
+
/* "snps,phy-addr" is not a standard property. Mark it as deprecated
* and warn of its use. Remove this when phy node support is added.
*/
if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
- if (plat->phy_bus_name)
+ if (plat->phy_node || plat->phy_bus_name)
plat->mdio_bus_data = NULL;
else
plat->mdio_bus_data =
@@ -232,8 +220,10 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
if (of_find_property(np, "snps,pbl", NULL)) {
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
- if (!dma_cfg)
+ if (!dma_cfg) {
+ of_node_put(np);
return -ENOMEM;
+ }
plat->dma_cfg = dma_cfg;
of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
dma_cfg->fixed_burst =
@@ -268,27 +258,26 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
* the necessary platform resources, invoke custom helper (if required) and
* invoke the main probe function.
*/
-static int stmmac_pltfr_probe(struct platform_device *pdev)
+int stmmac_pltfr_probe(struct platform_device *pdev)
{
+ struct stmmac_resources stmmac_res;
int ret = 0;
struct resource *res;
struct device *dev = &pdev->dev;
- void __iomem *addr = NULL;
- struct stmmac_priv *priv = NULL;
struct plat_stmmacenet_data *plat_dat = NULL;
- const char *mac = NULL;
- int irq, wol_irq, lpi_irq;
+
+ memset(&stmmac_res, 0, sizeof(stmmac_res));
/* Get IRQ information early to have an ability to ask for deferred
* probe if needed before we went too far with resource allocation.
*/
- irq = platform_get_irq_byname(pdev, "macirq");
- if (irq < 0) {
- if (irq != -EPROBE_DEFER) {
+ stmmac_res.irq = platform_get_irq_byname(pdev, "macirq");
+ if (stmmac_res.irq < 0) {
+ if (stmmac_res.irq != -EPROBE_DEFER) {
dev_err(dev,
"MAC IRQ configuration information not found\n");
}
- return irq;
+ return stmmac_res.irq;
}
/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
@@ -298,21 +287,21 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
* In case the wake up interrupt is not passed from the platform
* so the driver will continue to use the mac irq (ndev->irq)
*/
- wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
- if (wol_irq < 0) {
- if (wol_irq == -EPROBE_DEFER)
+ stmmac_res.wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+ if (stmmac_res.wol_irq < 0) {
+ if (stmmac_res.wol_irq == -EPROBE_DEFER)
return -EPROBE_DEFER;
- wol_irq = irq;
+ stmmac_res.wol_irq = stmmac_res.irq;
}
- lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
- if (lpi_irq == -EPROBE_DEFER)
+ stmmac_res.lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+ if (stmmac_res.lpi_irq == -EPROBE_DEFER)
return -EPROBE_DEFER;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
+ stmmac_res.addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(stmmac_res.addr))
+ return PTR_ERR(stmmac_res.addr);
plat_dat = dev_get_platdata(&pdev->dev);
@@ -332,7 +321,7 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
plat_dat->unicast_filter_entries = 1;
if (pdev->dev.of_node) {
- ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
+ ret = stmmac_probe_config_dt(pdev, plat_dat, &stmmac_res.mac);
if (ret) {
pr_err("%s: main dt probe failed", __func__);
return ret;
@@ -353,27 +342,9 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
return ret;
}
- priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
- if (IS_ERR(priv)) {
- pr_err("%s: main driver probe failed", __func__);
- return PTR_ERR(priv);
- }
-
- /* Copy IRQ values to priv structure which is now avaialble */
- priv->dev->irq = irq;
- priv->wol_irq = wol_irq;
- priv->lpi_irq = lpi_irq;
-
- /* Get MAC address if available (DT) */
- if (mac)
- memcpy(priv->dev->dev_addr, mac, ETH_ALEN);
-
- platform_set_drvdata(pdev, priv->dev);
-
- pr_debug("STMMAC platform driver registration completed");
-
- return 0;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
+EXPORT_SYMBOL_GPL(stmmac_pltfr_probe);
/**
* stmmac_pltfr_remove
@@ -381,7 +352,7 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
* Description: this function calls the main to free the net resources
* and calls the platforms hook and release the resources (e.g. mem).
*/
-static int stmmac_pltfr_remove(struct platform_device *pdev)
+int stmmac_pltfr_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -395,6 +366,7 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
return ret;
}
+EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
#ifdef CONFIG_PM_SLEEP
/**
@@ -438,21 +410,10 @@ static int stmmac_pltfr_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops,
- stmmac_pltfr_suspend, stmmac_pltfr_resume);
-
-static struct platform_driver stmmac_pltfr_driver = {
- .probe = stmmac_pltfr_probe,
- .remove = stmmac_pltfr_remove,
- .driver = {
- .name = STMMAC_RESOURCE_NAME,
- .pm = &stmmac_pltfr_pm_ops,
- .of_match_table = of_match_ptr(stmmac_dt_ids),
- },
-};
-
-module_platform_driver(stmmac_pltfr_driver);
+SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
+ stmmac_pltfr_resume);
+EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
-MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 093eb99e5..71da86d7b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -19,11 +19,8 @@
#ifndef __STMMAC_PLATFORM_H__
#define __STMMAC_PLATFORM_H__
-extern const struct stmmac_of_data meson6_dwmac_data;
-extern const struct stmmac_of_data sun7i_gmac_data;
-extern const struct stmmac_of_data stih4xx_dwmac_data;
-extern const struct stmmac_of_data stid127_dwmac_data;
-extern const struct stmmac_of_data socfpga_gmac_data;
-extern const struct stmmac_of_data rk3288_gmac_data;
+int stmmac_pltfr_probe(struct platform_device *pdev);
+int stmmac_pltfr_remove(struct platform_device *pdev);
+extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
#endif /* __STMMAC_PLATFORM_H__ */
diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig
index 3074aa374..dee94b676 100644
--- a/drivers/net/ethernet/sun/Kconfig
+++ b/drivers/net/ethernet/sun/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_SUN
default y
depends on SUN3 || SBUS || PCI || SUN_LDOMS
---help---
- If you have a network (Ethernet) card belonging to this class, say
- Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index d034a70f6..8d2b15c67 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -3058,7 +3058,6 @@ static void cas_init_mac(struct cas *cp)
/* setup core arbitration weight register */
writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
- /* XXX Use pci_dma_burst_advice() */
#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
/* set the infinite burst register for chips that don't have
* pci issues.
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 0c5842aeb..ab6051a43 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6658,10 +6658,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
struct sk_buff *skb_new;
skb_new = skb_realloc_headroom(skb, len);
- if (!skb_new) {
- rp->tx_errors++;
+ if (!skb_new)
goto out_drop;
- }
kfree_skb(skb);
skb = skb_new;
} else
diff --git a/drivers/net/ethernet/tehuti/Kconfig b/drivers/net/ethernet/tehuti/Kconfig
index 1fc027eda..b17f0ca3f 100644
--- a/drivers/net/ethernet/tehuti/Kconfig
+++ b/drivers/net/ethernet/tehuti/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_TEHUTI
default y
depends on PCI
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 631e0afd0..e7f0b7d95 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_TI
default y
depends on PCI || EISA || AR7 || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
@@ -110,9 +108,7 @@ config TLAN
depends on (PCI || EISA)
---help---
If you have a PCI Ethernet network card based on the ThunderLAN chip
- which is supported by this driver, say Y and read the
- Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ which is supported by this driver, say Y here.
Devices currently supported by this driver are Compaq Netelligent,
Compaq NetFlex and Olicom cards. Please read the file
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index b536b4c82..d155bf257 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -138,19 +138,6 @@ do { \
#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
-#define cpsw_enable_irq(priv) \
- do { \
- u32 i; \
- for (i = 0; i < priv->num_irqs; i++) \
- enable_irq(priv->irqs_table[i]); \
- } while (0)
-#define cpsw_disable_irq(priv) \
- do { \
- u32 i; \
- for (i = 0; i < priv->num_irqs; i++) \
- disable_irq_nosync(priv->irqs_table[i]); \
- } while (0)
-
#define cpsw_slave_index(priv) \
((priv->data.dual_emac) ? priv->emac_port : \
priv->data.active_slave)
@@ -509,9 +496,11 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = {
(func)(slave++, ##arg); \
} while (0)
#define cpsw_get_slave_ndev(priv, __slave_no__) \
- (priv->slaves[__slave_no__].ndev)
+ ((__slave_no__ < priv->data.slaves) ? \
+ priv->slaves[__slave_no__].ndev : NULL)
#define cpsw_get_slave_priv(priv, __slave_no__) \
- ((priv->slaves[__slave_no__].ndev) ? \
+ (((__slave_no__ < priv->data.slaves) && \
+ (priv->slaves[__slave_no__].ndev)) ? \
netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \
#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \
@@ -781,7 +770,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
cpsw_intr_disable(priv);
if (priv->irq_enabled == true) {
- cpsw_disable_irq(priv);
+ disable_irq_nosync(priv->irqs_table[0]);
priv->irq_enabled = false;
}
@@ -804,9 +793,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
static int cpsw_poll(struct napi_struct *napi, int budget)
{
struct cpsw_priv *priv = napi_to_priv(napi);
- int num_tx, num_rx;
-
- num_tx = cpdma_chan_process(priv->txch, 128);
+ int num_rx;
num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) {
@@ -817,13 +804,12 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
prim_cpsw = cpsw_get_slave_priv(priv, 0);
if (prim_cpsw->irq_enabled == false) {
prim_cpsw->irq_enabled = true;
- cpsw_enable_irq(priv);
+ enable_irq(priv->irqs_table[0]);
}
}
- if (num_rx || num_tx)
- cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
- num_rx, num_tx);
+ if (num_rx)
+ cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx);
return num_rx;
}
@@ -1333,7 +1319,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (prim_cpsw->irq_enabled == false) {
if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
prim_cpsw->irq_enabled = true;
- cpsw_enable_irq(prim_cpsw);
+ enable_irq(prim_cpsw->irqs_table[0]);
}
}
@@ -1361,7 +1347,6 @@ static int cpsw_ndo_stop(struct net_device *ndev)
if (cpsw_common_res_usage_state(priv) <= 1) {
cpts_unregister(priv->cpts);
cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
cpdma_ctlr_stop(priv->dma);
cpsw_ale_stop(priv->ale);
}
@@ -1456,7 +1441,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
if (priv->cpts->rx_enable)
ctrl |= CTRL_V2_RX_TS_BITS;
- break;
+ break;
case CPSW_VERSION_3:
default:
ctrl &= ~CTRL_V3_ALL_TS_MASK;
@@ -1466,7 +1451,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
if (priv->cpts->rx_enable)
ctrl |= CTRL_V3_RX_TS_BITS;
- break;
+ break;
}
mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
@@ -1589,10 +1574,8 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
ndev->stats.tx_errors++;
cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
cpdma_chan_stop(priv->txch);
cpdma_chan_start(priv->txch);
- cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
}
@@ -1629,10 +1612,8 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
struct cpsw_priv *priv = netdev_priv(ndev);
cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
cpsw_rx_interrupt(priv->irqs_table[0], priv);
cpsw_tx_interrupt(priv->irqs_table[1], priv);
- cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
}
#endif
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 6e927b458..43b061bd8 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -268,39 +268,6 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
}
EXPORT_SYMBOL_GPL(cpsw_ale_flush_multicast);
-static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
- int port_mask)
-{
- int port;
-
- port = cpsw_ale_get_port_num(ale_entry);
- if ((BIT(port) & port_mask) == 0)
- return; /* ports dont intersect, not interested */
- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
-}
-
-int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
-{
- u32 ale_entry[ALE_ENTRY_WORDS];
- int ret, idx;
-
- for (idx = 0; idx < ale->params.ale_entries; idx++) {
- cpsw_ale_read(ale, idx, ale_entry);
- ret = cpsw_ale_get_entry_type(ale_entry);
- if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
- continue;
-
- if (cpsw_ale_get_mcast(ale_entry))
- cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
- else
- cpsw_ale_flush_ucast(ale, ale_entry, port_mask);
-
- cpsw_ale_write(ale, idx, ale_entry);
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_flush);
-
static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
int flags, u16 vid)
{
@@ -752,18 +719,6 @@ static void cpsw_ale_timer(unsigned long arg)
}
}
-int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout)
-{
- del_timer_sync(&ale->timer);
- ale->ageout = ageout * HZ;
- if (ale->ageout) {
- ale->timer.expires = jiffies + ale->ageout;
- add_timer(&ale->timer);
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_set_ageout);
-
void cpsw_ale_start(struct cpsw_ale *ale)
{
u32 rev;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index af1e7ecd8..a7001894f 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -90,8 +90,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale);
void cpsw_ale_start(struct cpsw_ale *ale);
void cpsw_ale_stop(struct cpsw_ale *ale);
-int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
-int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
int flags, u16 vid);
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index bbacf5ccc..bb1bb7212 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -85,7 +85,6 @@ struct netcp_intf {
struct list_head rxhook_list_head;
unsigned int rx_queue_id;
void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
- u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
struct napi_struct rx_napi;
struct napi_struct tx_napi;
@@ -223,6 +222,7 @@ void *netcp_device_find_module(struct netcp_device *netcp_device,
/* SGMII functions */
int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
+bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set);
int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port);
int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 43efc3a0c..4755838c6 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -34,6 +34,7 @@
#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
#define NETCP_NAPI_WEIGHT 64
#define NETCP_TX_TIMEOUT (5 * HZ)
+#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
#define NETCP_MAX_MCAST_ADDR 16
@@ -537,7 +538,7 @@ int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
static void netcp_frag_free(bool is_frag, void *ptr)
{
if (is_frag)
- put_page(virt_to_head_page(ptr));
+ skb_free_frag(ptr);
else
kfree(ptr);
}
@@ -698,7 +699,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
}
}
- netcp->ndev->last_rx = jiffies;
netcp->ndev->stats.rx_packets++;
netcp->ndev->stats.rx_bytes += skb->len;
@@ -805,30 +805,28 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
if (likely(fdq == 0)) {
unsigned int primary_buf_len;
/* Allocate a primary receive queue entry */
- buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET;
+ buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
primary_buf_len = SKB_DATA_ALIGN(buf_len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- if (primary_buf_len <= PAGE_SIZE) {
- bufptr = netdev_alloc_frag(primary_buf_len);
- pad[1] = primary_buf_len;
- } else {
- bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
- GFP_DMA32 | __GFP_COLD);
- pad[1] = 0;
- }
+ bufptr = netdev_alloc_frag(primary_buf_len);
+ pad[1] = primary_buf_len;
if (unlikely(!bufptr)) {
- dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n");
+ dev_warn_ratelimited(netcp->ndev_dev,
+ "Primary RX buffer alloc failed\n");
goto fail;
}
dma = dma_map_single(netcp->dev, bufptr, buf_len,
DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(netcp->dev, dma)))
+ goto fail;
+
pad[0] = (u32)bufptr;
} else {
/* Allocate a secondary receive queue entry */
- page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD);
+ page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
if (unlikely(!page)) {
dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
goto fail;
@@ -1011,7 +1009,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
/* Map the linear buffer */
dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
- if (unlikely(!dma_addr)) {
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
return NULL;
}
@@ -1547,8 +1545,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
knav_queue_disable_notify(netcp->rx_queue);
/* open Rx FDQs */
- for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
- netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) {
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
+ ++i) {
snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
@@ -1618,11 +1616,11 @@ static int netcp_ndo_open(struct net_device *ndev)
}
mutex_unlock(&netcp_modules_lock);
- netcp_rxpool_refill(netcp);
napi_enable(&netcp->rx_napi);
napi_enable(&netcp->tx_napi);
knav_queue_enable_notify(netcp->tx_compl_q);
knav_queue_enable_notify(netcp->rx_queue);
+ netcp_rxpool_refill(netcp);
netif_tx_wake_all_queues(ndev);
dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
return 0;
@@ -1942,14 +1940,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
netcp->rx_queue_depths[0] = 128;
}
- ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
- netcp->rx_buffer_sizes,
- KNAV_DMA_FDQ_PER_CHAN);
- if (ret) {
- dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
- netcp->rx_buffer_sizes[0] = 1536;
- }
-
ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
if (ret < 0) {
dev_err(dev, "missing \"rx-pool\" parameter\n");
@@ -2113,6 +2103,7 @@ probe_quit:
static int netcp_remove(struct platform_device *pdev)
{
struct netcp_device *netcp_device = platform_get_drvdata(pdev);
+ struct netcp_intf *netcp_intf, *netcp_tmp;
struct netcp_inst_modpriv *inst_modpriv, *tmp;
struct netcp_module *module;
@@ -2124,10 +2115,17 @@ static int netcp_remove(struct platform_device *pdev)
list_del(&inst_modpriv->inst_list);
kfree(inst_modpriv);
}
- WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n",
- pdev->name);
- devm_kfree(&pdev->dev, netcp_device);
+ /* now that all modules are removed, clean up the interfaces */
+ list_for_each_entry_safe(netcp_intf, netcp_tmp,
+ &netcp_device->interface_head,
+ interface_list) {
+ netcp_delete_interface(netcp_device, netcp_intf->ndev);
+ }
+
+ WARN(!list_empty(&netcp_device->interface_head),
+ "%s interface list not empty!\n", pdev->name);
+
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 9b7e0a34c..1974a8ae7 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1901,11 +1901,28 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
}
+static void gbe_sgmii_rtreset(struct gbe_priv *priv,
+ struct gbe_slave *slave, bool set)
+{
+ void __iomem *sgmii_port_regs;
+
+ if (SLAVE_LINK_IS_XGMII(slave))
+ return;
+
+ if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
+ sgmii_port_regs = priv->sgmii_port34_regs;
+ else
+ sgmii_port_regs = priv->sgmii_port_regs;
+
+ netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
+}
+
static void gbe_slave_stop(struct gbe_intf *intf)
{
struct gbe_priv *gbe_dev = intf->gbe_dev;
struct gbe_slave *slave = intf->slave;
+ gbe_sgmii_rtreset(gbe_dev, slave, true);
gbe_port_reset(slave);
/* Disable forwarding */
cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
@@ -1947,6 +1964,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
gbe_sgmii_config(priv, slave);
gbe_port_reset(slave);
+ gbe_sgmii_rtreset(priv, slave, false);
gbe_port_config(priv, slave, priv->rx_packet_max);
gbe_set_slave_mac(slave, gbe_intf);
/* enable forwarding */
@@ -2490,10 +2508,9 @@ static void free_secondary_ports(struct gbe_priv *gbe_dev)
{
struct gbe_slave *slave;
- for (;;) {
+ while (!list_empty(&gbe_dev->secondary_slaves)) {
slave = first_sec_slave(gbe_dev);
- if (!slave)
- break;
+
if (slave->phy)
phy_disconnect(slave->phy);
list_del(&slave->slave_list);
@@ -2839,14 +2856,13 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
&gbe_dev->dma_chan_name);
if (ret < 0) {
dev_err(dev, "missing \"tx-channel\" parameter\n");
- ret = -ENODEV;
- goto quit;
+ return -EINVAL;
}
if (!strcmp(node->name, "gbe")) {
ret = get_gbe_resource_version(gbe_dev, node);
if (ret)
- goto quit;
+ return ret;
dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
@@ -2857,22 +2873,20 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
else
ret = -ENODEV;
- if (ret)
- goto quit;
} else if (!strcmp(node->name, "xgbe")) {
ret = set_xgbe_ethss10_priv(gbe_dev, node);
if (ret)
- goto quit;
+ return ret;
ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
gbe_dev->ss_regs);
- if (ret)
- goto quit;
} else {
dev_err(dev, "unknown GBE node(%s)\n", node->name);
ret = -ENODEV;
- goto quit;
}
+ if (ret)
+ return ret;
+
interfaces = of_get_child_by_name(node, "interfaces");
if (!interfaces)
dev_err(dev, "could not find interfaces\n");
@@ -2880,11 +2894,11 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
if (ret)
- goto quit;
+ return ret;
ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
if (ret)
- goto quit;
+ return ret;
/* Create network interfaces */
INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
@@ -2899,6 +2913,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
break;
}
+ of_node_put(interfaces);
if (!gbe_dev->num_slaves)
dev_warn(dev, "No network interface configured\n");
@@ -2911,9 +2926,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
of_node_put(secondary_ports);
if (!gbe_dev->num_slaves) {
- dev_err(dev, "No network interface or secondary ports configured\n");
+ dev_err(dev,
+ "No network interface or secondary ports configured\n");
ret = -ENODEV;
- goto quit;
+ goto free_sec_ports;
}
memset(&ale_params, 0, sizeof(ale_params));
@@ -2927,7 +2943,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
if (!gbe_dev->ale) {
dev_err(gbe_dev->dev, "error initializing ale engine\n");
ret = -ENODEV;
- goto quit;
+ goto free_sec_ports;
} else {
dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
}
@@ -2943,14 +2959,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
*inst_priv = gbe_dev;
return 0;
-quit:
- if (gbe_dev->hw_stats)
- devm_kfree(dev, gbe_dev->hw_stats);
- cpsw_ale_destroy(gbe_dev->ale);
- if (gbe_dev->ss_regs)
- devm_iounmap(dev, gbe_dev->ss_regs);
- of_node_put(interfaces);
- devm_kfree(dev, gbe_dev);
+free_sec_ports:
+ free_secondary_ports(gbe_dev);
return ret;
}
@@ -3023,12 +3033,9 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
free_secondary_ports(gbe_dev);
if (!list_empty(&gbe_dev->gbe_intf_head))
- dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n");
+ dev_alert(gbe_dev->dev,
+ "unreleased ethss interfaces present\n");
- devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
- devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
- memset(gbe_dev, 0x00, sizeof(*gbe_dev));
- devm_kfree(gbe_dev->dev, gbe_dev);
return 0;
}
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c
index dbeb14266..5d8419f65 100644
--- a/drivers/net/ethernet/ti/netcp_sgmii.c
+++ b/drivers/net/ethernet/ti/netcp_sgmii.c
@@ -18,6 +18,9 @@
#include "netcp.h"
+#define SGMII_SRESET_RESET BIT(0)
+#define SGMII_SRESET_RTRESET BIT(1)
+
#define SGMII_REG_STATUS_LOCK BIT(4)
#define SGMII_REG_STATUS_LINK BIT(0)
#define SGMII_REG_STATUS_AUTONEG BIT(2)
@@ -51,12 +54,35 @@ static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val)
int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port)
{
/* Soft reset */
- sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1);
- while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0)
+ sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port),
+ SGMII_SRESET_RESET);
+
+ while ((sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) &
+ SGMII_SRESET_RESET) != 0x0)
;
+
return 0;
}
+/* port is 0 based */
+bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set)
+{
+ u32 reg;
+ bool oldval;
+
+ /* Initiate a soft reset */
+ reg = sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port));
+ oldval = (reg & SGMII_SRESET_RTRESET) != 0x0;
+ if (set)
+ reg |= SGMII_SRESET_RTRESET;
+ else
+ reg &= ~SGMII_SRESET_RTRESET;
+ sgmii_write_reg(sgmii_ofs, SGMII_SRESET_REG(port), reg);
+ wmb();
+
+ return oldval;
+}
+
int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port)
{
u32 status = 0, link = 0;
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 3d8f60d96..6f0a4495c 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -721,9 +721,6 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
if (!hash_default)
__inv_buffer(buf, len);
- /* ISSUE: Is this needed? */
- dev->last_rx = jiffies;
-
#ifdef TILE_NET_DUMP_PACKETS
dump_packet(buf, len, "rx");
#endif /* TILE_NET_DUMP_PACKETS */
diff --git a/drivers/net/ethernet/toshiba/Kconfig b/drivers/net/ethernet/toshiba/Kconfig
index 5d244b6b5..6f1d5b623 100644
--- a/drivers/net/ethernet/toshiba/Kconfig
+++ b/drivers/net/ethernet/toshiba/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_TOSHIBA
default y
depends on PCI && (PPC_IBM_CELL_BLADE || MIPS) || PPC_PS3
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index ac62a5e24..79f0ec4e5 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -102,6 +102,18 @@ static void gelic_card_get_ether_port_status(struct gelic_card *card,
}
}
+/**
+ * gelic_descr_get_status -- returns the status of a descriptor
+ * @descr: descriptor to look at
+ *
+ * returns the status as in the dmac_cmd_status field of the descriptor
+ */
+static enum gelic_descr_dma_status
+gelic_descr_get_status(struct gelic_descr *descr)
+{
+ return be32_to_cpu(descr->dmac_cmd_status) & GELIC_DESCR_DMA_STAT_MASK;
+}
+
static int gelic_card_set_link_mode(struct gelic_card *card, int mode)
{
int status;
@@ -278,18 +290,6 @@ void gelic_card_down(struct gelic_card *card)
}
/**
- * gelic_descr_get_status -- returns the status of a descriptor
- * @descr: descriptor to look at
- *
- * returns the status as in the dmac_cmd_status field of the descriptor
- */
-static enum gelic_descr_dma_status
-gelic_descr_get_status(struct gelic_descr *descr)
-{
- return be32_to_cpu(descr->dmac_cmd_status) & GELIC_DESCR_DMA_STAT_MASK;
-}
-
-/**
* gelic_card_free_chain - free descriptor chain
* @card: card structure
* @descr_in: address of desc
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index a7d010f72..5180394f0 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -604,8 +604,7 @@ spider_net_set_multi(struct net_device *netdev)
int i;
u32 reg;
struct spider_net_card *card = netdev_priv(netdev);
- unsigned long bitmask[SPIDER_NET_MULTICAST_HASHES / BITS_PER_LONG] =
- {0, };
+ DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES) = {};
spider_net_set_promisc(card);
diff --git a/drivers/net/ethernet/tundra/Kconfig b/drivers/net/ethernet/tundra/Kconfig
index cf7d69b62..81d845e4e 100644
--- a/drivers/net/ethernet/tundra/Kconfig
+++ b/drivers/net/ethernet/tundra/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_TUNDRA
default y
depends on TSI108_BRIDGE
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index f66ddaee0..2f1264b88 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -6,9 +6,7 @@ config NET_VENDOR_VIA
bool "VIA devices"
default y
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
@@ -19,7 +17,8 @@ if NET_VENDOR_VIA
config VIA_RHINE
tristate "VIA Rhine support"
- depends on (PCI || USE_OF)
+ depends on (PCI || OF_IRQ)
+ depends on HAS_DMA
select CRC32
select MII
---help---
@@ -43,7 +42,8 @@ config VIA_RHINE_MMIO
config VIA_VELOCITY
tristate "VIA Velocity support"
- depends on (PCI || USE_OF)
+ depends on (PCI || (OF_ADDRESS && OF_IRQ))
+ depends on HAS_DMA
select CRC32
select CRC_CCITT
select MII
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index de2850497..a83263743 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -96,7 +96,6 @@ static const int multicast_filter_limit = 32;
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
@@ -472,8 +471,7 @@ struct rhine_private {
/* Frequently used values: keep some adjacent for cache effect. */
u32 quirks;
- struct rx_desc *rx_head_desc;
- unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int cur_rx;
unsigned int cur_tx, dirty_tx;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
struct rhine_stats rx_stats;
@@ -1213,17 +1211,61 @@ static void free_ring(struct net_device* dev)
}
-static void alloc_rbufs(struct net_device *dev)
+struct rhine_skb_dma {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+};
+
+static inline int rhine_skb_dma_init(struct net_device *dev,
+ struct rhine_skb_dma *sd)
{
struct rhine_private *rp = netdev_priv(dev);
struct device *hwdev = dev->dev.parent;
- dma_addr_t next;
+ const int size = rp->rx_buf_sz;
+
+ sd->skb = netdev_alloc_skb(dev, size);
+ if (!sd->skb)
+ return -ENOMEM;
+
+ sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
+ netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
+ dev_kfree_skb_any(sd->skb);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void rhine_reset_rbufs(struct rhine_private *rp)
+{
int i;
- rp->dirty_rx = rp->cur_rx = 0;
+ rp->cur_rx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++)
+ rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+}
+
+static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
+ struct rhine_skb_dma *sd, int entry)
+{
+ rp->rx_skbuff_dma[entry] = sd->dma;
+ rp->rx_skbuff[entry] = sd->skb;
+
+ rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
+ dma_wmb();
+}
+
+static void free_rbufs(struct net_device* dev);
+
+static int alloc_rbufs(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ dma_addr_t next;
+ int rc, i;
rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
- rp->rx_head_desc = &rp->rx_ring[0];
next = rp->rx_ring_dma;
/* Init the ring entries */
@@ -1239,23 +1281,20 @@ static void alloc_rbufs(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
- rp->rx_skbuff[i] = skb;
- if (skb == NULL)
- break;
+ struct rhine_skb_dma sd;
- rp->rx_skbuff_dma[i] =
- dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
- rp->rx_skbuff_dma[i] = 0;
- dev_kfree_skb(skb);
- break;
+ rc = rhine_skb_dma_init(dev, &sd);
+ if (rc < 0) {
+ free_rbufs(dev);
+ goto out;
}
- rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
- rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+
+ rhine_skb_dma_nic_store(rp, &sd, i);
}
- rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ rhine_reset_rbufs(rp);
+out:
+ return rc;
}
static void free_rbufs(struct net_device* dev)
@@ -1659,16 +1698,18 @@ static int rhine_open(struct net_device *dev)
rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
if (rc)
- return rc;
+ goto out;
netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
rc = alloc_ring(dev);
- if (rc) {
- free_irq(rp->irq, dev);
- return rc;
- }
- alloc_rbufs(dev);
+ if (rc < 0)
+ goto out_free_irq;
+
+ rc = alloc_rbufs(dev);
+ if (rc < 0)
+ goto out_free_ring;
+
alloc_tbufs(dev);
rhine_chip_reset(dev);
rhine_task_enable(rp);
@@ -1680,7 +1721,14 @@ static int rhine_open(struct net_device *dev)
netif_start_queue(dev);
- return 0;
+out:
+ return rc;
+
+out_free_ring:
+ free_ring(dev);
+out_free_irq:
+ free_irq(rp->irq, dev);
+ goto out;
}
static void rhine_reset_task(struct work_struct *work)
@@ -1700,9 +1748,9 @@ static void rhine_reset_task(struct work_struct *work)
/* clear all descriptors */
free_tbufs(dev);
- free_rbufs(dev);
alloc_tbufs(dev);
- alloc_rbufs(dev);
+
+ rhine_reset_rbufs(rp);
/* Reinitialize the hardware. */
rhine_chip_reset(dev);
@@ -1730,6 +1778,11 @@ static void rhine_tx_timeout(struct net_device *dev)
schedule_work(&rp->reset_task);
}
+static inline bool rhine_tx_queue_full(struct rhine_private *rp)
+{
+ return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
+}
+
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1800,11 +1853,17 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
netdev_sent_queue(dev, skb->len);
/* lock eth irq */
- wmb();
+ dma_wmb();
rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
wmb();
rp->cur_tx++;
+ /*
+ * Nobody wants cur_tx write to rot for ages after the NIC will have
+ * seen the transmit request, especially as the transmit completion
+ * handler could miss it.
+ */
+ smp_wmb();
/* Non-x86 Todo: explicitly flush cache lines here. */
@@ -1817,8 +1876,14 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
ioaddr + ChipCmd1);
IOSYNC;
- if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
+ /* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */
+ if (rhine_tx_queue_full(rp)) {
netif_stop_queue(dev);
+ smp_rmb();
+ /* Rejuvenate. */
+ if (!rhine_tx_queue_full(rp))
+ netif_wake_queue(dev);
+ }
netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
rp->cur_tx - 1, entry);
@@ -1866,13 +1931,24 @@ static void rhine_tx(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
struct device *hwdev = dev->dev.parent;
- int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
unsigned int pkts_compl = 0, bytes_compl = 0;
+ unsigned int dirty_tx = rp->dirty_tx;
+ unsigned int cur_tx;
struct sk_buff *skb;
+ /*
+ * The race with rhine_start_tx does not matter here as long as the
+ * driver enforces a value of cur_tx that was relevant when the
+ * packet was scheduled to the network chipset.
+ * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx.
+ */
+ smp_rmb();
+ cur_tx = rp->cur_tx;
/* find and cleanup dirty tx descriptors */
- while (rp->dirty_tx != rp->cur_tx) {
- txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
+ while (dirty_tx != cur_tx) {
+ unsigned int entry = dirty_tx % TX_RING_SIZE;
+ u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
+
netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
entry, txstatus);
if (txstatus & DescOwn)
@@ -1921,12 +1997,23 @@ static void rhine_tx(struct net_device *dev)
pkts_compl++;
dev_consume_skb_any(skb);
rp->tx_skbuff[entry] = NULL;
- entry = (++rp->dirty_tx) % TX_RING_SIZE;
+ dirty_tx++;
}
+ rp->dirty_tx = dirty_tx;
+ /* Pity we can't rely on the nearby BQL completion implicit barrier. */
+ smp_wmb();
+
netdev_completed_queue(dev, pkts_compl, bytes_compl);
- if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
+
+ /* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */
+ if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
netif_wake_queue(dev);
+ smp_rmb();
+ /* Rejuvenate. */
+ if (rhine_tx_queue_full(rp))
+ netif_stop_queue(dev);
+ }
}
/**
@@ -1944,22 +2031,33 @@ static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
return be16_to_cpup((__be16 *)trailer);
}
+static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
+ int data_size)
+{
+ dma_rmb();
+ if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) {
+ u16 vlan_tci;
+
+ vlan_tci = rhine_get_vlan_tci(skb, data_size);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+ }
+}
+
/* Process up to limit frames from receive ring */
static int rhine_rx(struct net_device *dev, int limit)
{
struct rhine_private *rp = netdev_priv(dev);
struct device *hwdev = dev->dev.parent;
- int count;
int entry = rp->cur_rx % RX_RING_SIZE;
+ int count;
netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
- entry, le32_to_cpu(rp->rx_head_desc->rx_status));
+ entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
/* If EOP is set on the next entry, it's a new packet. Send it up. */
for (count = 0; count < limit; ++count) {
- struct rx_desc *desc = rp->rx_head_desc;
+ struct rx_desc *desc = rp->rx_ring + entry;
u32 desc_status = le32_to_cpu(desc->rx_status);
- u32 desc_length = le32_to_cpu(desc->desc_length);
int data_size = desc_status >> 16;
if (desc_status & DescOwn)
@@ -1975,10 +2073,6 @@ static int rhine_rx(struct net_device *dev, int limit)
"entry %#x length %d status %08x!\n",
entry, data_size,
desc_status);
- netdev_warn(dev,
- "Oversized Ethernet frame %p vs %p\n",
- rp->rx_head_desc,
- &rp->rx_ring[entry]);
dev->stats.rx_length_errors++;
} else if (desc_status & RxErr) {
/* There was a error. */
@@ -2000,16 +2094,17 @@ static int rhine_rx(struct net_device *dev, int limit)
}
}
} else {
- struct sk_buff *skb = NULL;
/* Length should omit the CRC */
int pkt_len = data_size - 4;
- u16 vlan_tci = 0;
+ struct sk_buff *skb;
/* Check if the packet is long enough to accept without
copying to a minimally-sized skbuff. */
- if (pkt_len < rx_copybreak)
+ if (pkt_len < rx_copybreak) {
skb = netdev_alloc_skb_ip_align(dev, pkt_len);
- if (skb) {
+ if (unlikely(!skb))
+ goto drop;
+
dma_sync_single_for_cpu(hwdev,
rp->rx_skbuff_dma[entry],
rp->rx_buf_sz,
@@ -2018,32 +2113,31 @@ static int rhine_rx(struct net_device *dev, int limit)
skb_copy_to_linear_data(skb,
rp->rx_skbuff[entry]->data,
pkt_len);
- skb_put(skb, pkt_len);
+
dma_sync_single_for_device(hwdev,
rp->rx_skbuff_dma[entry],
rp->rx_buf_sz,
DMA_FROM_DEVICE);
} else {
+ struct rhine_skb_dma sd;
+
+ if (unlikely(rhine_skb_dma_init(dev, &sd) < 0))
+ goto drop;
+
skb = rp->rx_skbuff[entry];
- if (skb == NULL) {
- netdev_err(dev, "Inconsistent Rx descriptor chain\n");
- break;
- }
- rp->rx_skbuff[entry] = NULL;
- skb_put(skb, pkt_len);
+
dma_unmap_single(hwdev,
rp->rx_skbuff_dma[entry],
rp->rx_buf_sz,
DMA_FROM_DEVICE);
+ rhine_skb_dma_nic_store(rp, &sd, entry);
}
- if (unlikely(desc_length & DescTag))
- vlan_tci = rhine_get_vlan_tci(skb, data_size);
-
+ skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, dev);
- if (unlikely(desc_length & DescTag))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+ rhine_rx_vlan_tag(skb, desc, data_size);
+
netif_receive_skb(skb);
u64_stats_update_begin(&rp->rx_stats.syncp);
@@ -2051,35 +2145,16 @@ static int rhine_rx(struct net_device *dev, int limit)
rp->rx_stats.packets++;
u64_stats_update_end(&rp->rx_stats.syncp);
}
+give_descriptor_to_nic:
+ desc->rx_status = cpu_to_le32(DescOwn);
entry = (++rp->cur_rx) % RX_RING_SIZE;
- rp->rx_head_desc = &rp->rx_ring[entry];
- }
-
- /* Refill the Rx ring buffers. */
- for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
- struct sk_buff *skb;
- entry = rp->dirty_rx % RX_RING_SIZE;
- if (rp->rx_skbuff[entry] == NULL) {
- skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
- rp->rx_skbuff[entry] = skb;
- if (skb == NULL)
- break; /* Better luck next round. */
- rp->rx_skbuff_dma[entry] =
- dma_map_single(hwdev, skb->data,
- rp->rx_buf_sz,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(hwdev,
- rp->rx_skbuff_dma[entry])) {
- dev_kfree_skb(skb);
- rp->rx_skbuff_dma[entry] = 0;
- break;
- }
- rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
- }
- rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
}
return count;
+
+drop:
+ dev->stats.rx_dropped++;
+ goto give_descriptor_to_nic;
}
static void rhine_restart_tx(struct net_device *dev) {
@@ -2484,9 +2559,8 @@ static int rhine_resume(struct device *device)
enable_mmio(rp->pioaddr, rp->quirks);
rhine_power_init(dev);
free_tbufs(dev);
- free_rbufs(dev);
alloc_tbufs(dev);
- alloc_rbufs(dev);
+ rhine_reset_rbufs(rp);
rhine_task_enable(rp);
spin_lock_bh(&rp->lock);
init_registers(dev);
diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig
index b4d281626..f98b91d21 100644
--- a/drivers/net/ethernet/wiznet/Kconfig
+++ b/drivers/net/ethernet/wiznet/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_WIZNET
depends on HAS_IOMEM
default y
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 7b90a5eba..4f5c024c6 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_XILINX
default y
depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index af2694dc6..5a1068df7 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -62,12 +62,12 @@
u32 temac_ior(struct temac_local *lp, int offset)
{
- return in_be32((u32 *)(lp->regs + offset));
+ return in_be32(lp->regs + offset);
}
void temac_iow(struct temac_local *lp, int offset, u32 value)
{
- out_be32((u32 *) (lp->regs + offset), value);
+ out_be32(lp->regs + offset, value);
}
int temac_indirect_busywait(struct temac_local *lp)
@@ -124,7 +124,7 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
*/
static u32 temac_dma_in32(struct temac_local *lp, int reg)
{
- return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
+ return in_be32(lp->sdma_regs + (reg << 2));
}
/**
@@ -134,7 +134,7 @@ static u32 temac_dma_in32(struct temac_local *lp, int reg)
*/
static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
{
- out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
+ out_be32(lp->sdma_regs + (reg << 2), value);
}
/* DMA register access functions can be DCR based or memory mapped.
@@ -400,7 +400,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
mutex_unlock(&lp->indirect_mutex);
}
-struct temac_option {
+static struct temac_option {
int flg;
u32 opt;
u32 reg;
@@ -587,7 +587,7 @@ static void temac_device_reset(struct net_device *ndev)
ndev->trans_start = jiffies; /* prevent tx timeout */
}
-void temac_adjust_link(struct net_device *ndev)
+static void temac_adjust_link(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct phy_device *phy = lp->phy_dev;
@@ -688,10 +688,8 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (temac_check_tx_bd_space(lp, num_frag)) {
- if (!netif_queue_stopped(ndev)) {
+ if (!netif_queue_stopped(ndev))
netif_stop_queue(ndev);
- return NETDEV_TX_BUSY;
- }
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 4c9b4fa1d..7cb9abac9 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -11,16 +11,16 @@
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/if_vlan.h>
/* Packet size info */
#define XAE_HDR_SIZE 14 /* Size of Ethernet header */
-#define XAE_HDR_VLAN_SIZE 18 /* Size of an Ethernet hdr + VLAN */
#define XAE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
#define XAE_MTU 1500 /* Max MTU of an Ethernet frame */
#define XAE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
#define XAE_MAX_FRAME_SIZE (XAE_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
-#define XAE_MAX_VLAN_FRAME_SIZE (XAE_MTU + XAE_HDR_VLAN_SIZE + XAE_TRL_SIZE)
+#define XAE_MAX_VLAN_FRAME_SIZE (XAE_MTU + VLAN_ETH_HLEN + XAE_TRL_SIZE)
#define XAE_MAX_JUMBO_FRAME_SIZE (XAE_JUMBO_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
/* Configuration options */
@@ -38,18 +38,21 @@
#define XAE_OPTION_FLOW_CONTROL (1 << 4)
/* Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not
- * stripped. Default: disabled (set) */
+ * stripped. Default: disabled (set)
+ */
#define XAE_OPTION_FCS_STRIP (1 << 5)
/* Generate FCS field and add PAD automatically for outgoing frames.
- * Default: enabled (set) */
+ * Default: enabled (set)
+ */
#define XAE_OPTION_FCS_INSERT (1 << 6)
/* Enable Length/Type error checking for incoming frames. When this option is
* set, the MAC will filter frames that have a mismatched type/length field
* and if XAE_OPTION_REPORT_RXERR is set, the user is notified when these
* types of frames are encountered. When this option is cleared, the MAC will
- * allow these types of frames to be received. Default: enabled (set) */
+ * allow these types of frames to be received. Default: enabled (set)
+ */
#define XAE_OPTION_LENTYPE_ERR (1 << 7)
/* Enable the transmitter. Default: enabled (set) */
@@ -159,12 +162,12 @@
#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
#define XAE_MDIO_MIS_OFFSET 0x00000600 /* MII Management Interrupt Status */
-#define XAE_MDIO_MIP_OFFSET 0x00000620 /* MII Mgmt Interrupt Pending
- * register offset */
-#define XAE_MDIO_MIE_OFFSET 0x00000640 /* MII Management Interrupt Enable
- * register offset */
-#define XAE_MDIO_MIC_OFFSET 0x00000660 /* MII Management Interrupt Clear
- * register offset. */
+/* MII Mgmt Interrupt Pending register offset */
+#define XAE_MDIO_MIP_OFFSET 0x00000620
+/* MII Management Interrupt Enable register offset */
+#define XAE_MDIO_MIE_OFFSET 0x00000640
+/* MII Management Interrupt Clear register offset. */
+#define XAE_MDIO_MIC_OFFSET 0x00000660
#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
@@ -176,18 +179,17 @@
#define XAE_MCAST_TABLE_OFFSET 0x00020000 /* Multicast table address */
/* Bit Masks for Axi Ethernet RAF register */
-#define XAE_RAF_MCSTREJ_MASK 0x00000002 /* Reject receive multicast
- * destination address */
-#define XAE_RAF_BCSTREJ_MASK 0x00000004 /* Reject receive broadcast
- * destination address */
+/* Reject receive multicast destination address */
+#define XAE_RAF_MCSTREJ_MASK 0x00000002
+/* Reject receive broadcast destination address */
+#define XAE_RAF_BCSTREJ_MASK 0x00000004
#define XAE_RAF_TXVTAGMODE_MASK 0x00000018 /* Tx VLAN TAG mode */
#define XAE_RAF_RXVTAGMODE_MASK 0x00000060 /* Rx VLAN TAG mode */
#define XAE_RAF_TXVSTRPMODE_MASK 0x00000180 /* Tx VLAN STRIP mode */
#define XAE_RAF_RXVSTRPMODE_MASK 0x00000600 /* Rx VLAN STRIP mode */
#define XAE_RAF_NEWFNCENBL_MASK 0x00000800 /* New function mode */
-#define XAE_RAF_EMULTIFLTRENBL_MASK 0x00001000 /* Exteneded Multicast
- * Filtering mode
- */
+/* Exteneded Multicast Filtering mode */
+#define XAE_RAF_EMULTIFLTRENBL_MASK 0x00001000
#define XAE_RAF_STATSRST_MASK 0x00002000 /* Stats. Counter Reset */
#define XAE_RAF_RXBADFRMEN_MASK 0x00004000 /* Recv Bad Frame Enable */
#define XAE_RAF_TXVTAGMODE_SHIFT 3 /* Tx Tag mode shift bits */
@@ -197,15 +199,16 @@
/* Bit Masks for Axi Ethernet TPF and IFGP registers */
#define XAE_TPF_TPFV_MASK 0x0000FFFF /* Tx pause frame value */
-#define XAE_IFGP0_IFGP_MASK 0x0000007F /* Transmit inter-frame
- * gap adjustment value */
+/* Transmit inter-frame gap adjustment value */
+#define XAE_IFGP0_IFGP_MASK 0x0000007F
/* Bit Masks for Axi Ethernet IS, IE and IP registers, Same masks apply
- * for all 3 registers. */
-#define XAE_INT_HARDACSCMPLT_MASK 0x00000001 /* Hard register access
- * complete */
-#define XAE_INT_AUTONEG_MASK 0x00000002 /* Auto negotiation
- * complete */
+ * for all 3 registers.
+ */
+/* Hard register access complete */
+#define XAE_INT_HARDACSCMPLT_MASK 0x00000001
+/* Auto negotiation complete */
+#define XAE_INT_AUTONEG_MASK 0x00000002
#define XAE_INT_RXCMPIT_MASK 0x00000004 /* Rx complete */
#define XAE_INT_RXRJECT_MASK 0x00000008 /* Rx frame rejected */
#define XAE_INT_RXFIFOOVR_MASK 0x00000010 /* Rx fifo overrun */
@@ -215,10 +218,9 @@
#define XAE_INT_PHYRSTCMPLT_MASK 0x00000100 /* Phy Reset complete */
#define XAE_INT_ALL_MASK 0x0000003F /* All the ints */
+/* INT bits that indicate receive errors */
#define XAE_INT_RECV_ERROR_MASK \
- (XAE_INT_RXRJECT_MASK | XAE_INT_RXFIFOOVR_MASK) /* INT bits that
- * indicate receive
- * errors */
+ (XAE_INT_RXRJECT_MASK | XAE_INT_RXFIFOOVR_MASK)
/* Bit masks for Axi Ethernet VLAN TPID Word 0 register */
#define XAE_TPID_0_MASK 0x0000FFFF /* TPID 0 */
@@ -231,27 +233,28 @@
/* Bit masks for Axi Ethernet RCW1 register */
#define XAE_RCW1_RST_MASK 0x80000000 /* Reset */
#define XAE_RCW1_JUM_MASK 0x40000000 /* Jumbo frame enable */
-#define XAE_RCW1_FCS_MASK 0x20000000 /* In-Band FCS enable
- * (FCS not stripped) */
+/* In-Band FCS enable (FCS not stripped) */
+#define XAE_RCW1_FCS_MASK 0x20000000
#define XAE_RCW1_RX_MASK 0x10000000 /* Receiver enable */
#define XAE_RCW1_VLAN_MASK 0x08000000 /* VLAN frame enable */
-#define XAE_RCW1_LT_DIS_MASK 0x02000000 /* Length/type field valid check
- * disable */
-#define XAE_RCW1_CL_DIS_MASK 0x01000000 /* Control frame Length check
- * disable */
-#define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF /* Pause frame source address
- * bits [47:32]. Bits [31:0] are
- * stored in register RCW0 */
+/* Length/type field valid check disable */
+#define XAE_RCW1_LT_DIS_MASK 0x02000000
+/* Control frame Length check disable */
+#define XAE_RCW1_CL_DIS_MASK 0x01000000
+/* Pause frame source address bits [47:32]. Bits [31:0] are
+ * stored in register RCW0
+ */
+#define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF
/* Bit masks for Axi Ethernet TC register */
#define XAE_TC_RST_MASK 0x80000000 /* Reset */
#define XAE_TC_JUM_MASK 0x40000000 /* Jumbo frame enable */
-#define XAE_TC_FCS_MASK 0x20000000 /* In-Band FCS enable
- * (FCS not generated) */
+/* In-Band FCS enable (FCS not generated) */
+#define XAE_TC_FCS_MASK 0x20000000
#define XAE_TC_TX_MASK 0x10000000 /* Transmitter enable */
#define XAE_TC_VLAN_MASK 0x08000000 /* VLAN frame enable */
-#define XAE_TC_IFG_MASK 0x02000000 /* Inter-frame gap adjustment
- * enable */
+/* Inter-frame gap adjustment enable */
+#define XAE_TC_IFG_MASK 0x02000000
/* Bit masks for Axi Ethernet FCC register */
#define XAE_FCC_FCRX_MASK 0x20000000 /* Rx flow control enable */
@@ -301,10 +304,10 @@
#define XAE_MDIO_INT_MIIM_RDY_MASK 0x00000001 /* MIIM Interrupt */
/* Bit masks for Axi Ethernet UAW1 register */
-#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF /* Station address bits
- * [47:32]; Station address
- * bits [31:0] are stored in
- * register UAW0 */
+/* Station address bits [47:32]; Station address
+ * bits [31:0] are stored in register UAW0
+ */
+#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF
/* Bit masks for Axi Ethernet FMI register */
#define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */
@@ -320,8 +323,8 @@
#define XAE_PHY_TYPE_SGMII 4
#define XAE_PHY_TYPE_1000BASE_X 5
-#define XAE_MULTICAST_CAM_TABLE_NUM 4 /* Total number of entries in the
- * hardware multicast table. */
+ /* Total number of entries in the hardware multicast table. */
+#define XAE_MULTICAST_CAM_TABLE_NUM 4
/* Axi Ethernet Synthesis features */
#define XAE_FEATURE_PARTIAL_RX_CSUM (1 << 0)
@@ -407,8 +410,11 @@ struct axidma_bd {
* Txed/Rxed in the existing hardware. If jumbo option is
* supported, the maximum frame size would be 9k. Else it is
* 1522 bytes (assuming support for basic VLAN)
- * @jumbo_support: Stores hardware configuration for jumbo support. If hardware
- * can handle jumbo packets, this entry will be 1, else 0.
+ * @rxmem: Stores rx memory size for jumbo frame handling.
+ * @csum_offload_on_tx_path: Stores the checksum selection on TX side.
+ * @csum_offload_on_rx_path: Stores the checksum selection on RX side.
+ * @coalesce_count_rx: Store the irq coalesce on RX side.
+ * @coalesce_count_tx: Store the irq coalesce on TX side.
*/
struct axienet_local {
struct net_device *ndev;
@@ -446,7 +452,7 @@ struct axienet_local {
u32 rx_bd_ci;
u32 max_frm_size;
- u32 jumbo_support;
+ u32 rxmem;
int csum_offload_on_tx_path;
int csum_offload_on_rx_path;
@@ -472,7 +478,7 @@ struct axienet_option {
* @lp: Pointer to axienet local structure
* @offset: Address offset from the base address of Axi Ethernet core
*
- * returns: The contents of the Axi Ethernet register
+ * Return: The contents of the Axi Ethernet register
*
* This function returns the contents of the corresponding register.
*/
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 28b7e7d9c..d95f9aae9 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -117,7 +117,7 @@ static struct axienet_option axienet_options[] = {
* @lp: Pointer to axienet local structure
* @reg: Address offset from the base address of the Axi DMA core
*
- * returns: The contents of the Axi DMA register
+ * Return: The contents of the Axi DMA register
*
* This function returns the contents of the corresponding Axi DMA register.
*/
@@ -179,8 +179,7 @@ static void axienet_dma_bd_release(struct net_device *ndev)
* axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
* @ndev: Pointer to the net_device structure
*
- * returns: 0, on success
- * -ENOMEM, on failure
+ * Return: 0, on success -ENOMEM, on failure
*
* This function is called to initialize the Rx and Tx DMA descriptor
* rings. This initializes the descriptors with required default values
@@ -198,9 +197,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
- /*
- * Allocate the Tx and Rx buffer descriptors.
- */
+ /* Allocate the Tx and Rx buffer descriptors. */
lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
&lp->tx_bd_p, GFP_KERNEL);
@@ -263,7 +260,8 @@ static int axienet_dma_bd_init(struct net_device *ndev)
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.*/
+ * halted state. This will make the Rx side ready for reception.
+ */
axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
@@ -273,7 +271,8 @@ static int axienet_dma_bd_init(struct net_device *ndev)
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting */
+ * tail pointer register that the Tx channel will start transmitting.
+ */
axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
@@ -320,7 +319,7 @@ static void axienet_set_mac_address(struct net_device *ndev, void *address)
* @ndev: Pointer to the net_device structure
* @p: 6 byte Address to be written as MAC address
*
- * returns: 0 for all conditions. Presently, there is no failure case.
+ * Return: 0 for all conditions. Presently, there is no failure case.
*
* This function is called to initialize the MAC address of the Axi Ethernet
* core. It calls the core specific axienet_set_mac_address. This is the
@@ -354,7 +353,8 @@ static void axienet_set_multicast_list(struct net_device *ndev)
netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
/* We must make the kernel realize we had to move into
* promiscuous mode. If it was a promiscuous mode request
- * the flag is already set. If not we set it. */
+ * the flag is already set. If not we set it.
+ */
ndev->flags |= IFF_PROMISC;
reg = axienet_ior(lp, XAE_FMI_OFFSET);
reg |= XAE_FMI_PM_MASK;
@@ -438,14 +438,15 @@ static void __axienet_device_reset(struct axienet_local *lp,
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
* commands/transfers will be flushed or completed during this
- * reset process. */
+ * reset process.
+ */
axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
timeout = DELAY_OF_ONE_MILLISEC;
while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
udelay(1);
if (--timeout == 0) {
- dev_err(dev, "axienet_device_reset DMA "
- "reset timeout!\n");
+ netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
+ __func__);
break;
}
}
@@ -471,19 +472,21 @@ static void axienet_device_reset(struct net_device *ndev)
__axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
+ lp->options |= XAE_OPTION_VLAN;
lp->options &= (~XAE_OPTION_JUMBO);
if ((ndev->mtu > XAE_MTU) &&
- (ndev->mtu <= XAE_JUMBO_MTU) &&
- (lp->jumbo_support)) {
- lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE +
- XAE_TRL_SIZE;
- lp->options |= XAE_OPTION_JUMBO;
+ (ndev->mtu <= XAE_JUMBO_MTU)) {
+ lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
+ XAE_TRL_SIZE;
+
+ if (lp->max_frm_size <= lp->rxmem)
+ lp->options |= XAE_OPTION_JUMBO;
}
if (axienet_dma_bd_init(ndev)) {
- dev_err(&ndev->dev, "axienet_device_reset descriptor "
- "allocation failed\n");
+ netdev_err(ndev, "%s: descriptor allocation failed\n",
+ __func__);
}
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
@@ -497,7 +500,8 @@ static void axienet_device_reset(struct net_device *ndev)
axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
/* Sync default options with HW but leave receiver and
- * transmitter disabled.*/
+ * transmitter disabled.
+ */
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_set_mac_address(ndev, NULL);
@@ -558,8 +562,8 @@ static void axienet_adjust_link(struct net_device *ndev)
lp->last_link = link_state;
phy_print_status(phy);
} else {
- dev_err(&ndev->dev, "Error setting Axi Ethernet "
- "mac speed\n");
+ netdev_err(ndev,
+ "Error setting Axi Ethernet mac speed\n");
}
}
}
@@ -617,7 +621,7 @@ static void axienet_start_xmit_done(struct net_device *ndev)
* @lp: Pointer to the axienet_local structure
* @num_frag: The number of BDs to check for
*
- * returns: 0, on success
+ * Return: 0, on success
* NETDEV_TX_BUSY, if any of the descriptors are not free
*
* This function is invoked before BDs are allocated and transmission starts.
@@ -640,7 +644,7 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
* @skb: sk_buff pointer that contains data to be Txed.
* @ndev: Pointer to net_device structure.
*
- * returns: NETDEV_TX_OK, on success
+ * Return: NETDEV_TX_OK, on success
* NETDEV_TX_BUSY, if any of the descriptors are not free
*
* This function is invoked from upper layers to initiate transmission. The
@@ -726,15 +730,15 @@ static void axienet_recv(struct net_device *ndev)
u32 csumstatus;
u32 size = 0;
u32 packets = 0;
- dma_addr_t tail_p;
+ dma_addr_t tail_p = 0;
struct axienet_local *lp = netdev_priv(ndev);
struct sk_buff *skb, *new_skb;
struct axidma_bd *cur_p;
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
skb = (struct sk_buff *) (cur_p->sw_id_offset);
length = cur_p->app4 & 0x0000FFFF;
@@ -786,7 +790,8 @@ static void axienet_recv(struct net_device *ndev)
ndev->stats.rx_packets += packets;
ndev->stats.rx_bytes += size;
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+ if (tail_p)
+ axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
}
/**
@@ -794,7 +799,7 @@ static void axienet_recv(struct net_device *ndev)
* @irq: irq number
* @_ndev: net_device pointer
*
- * returns: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED for all cases.
*
* This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
* to complete the BD processing.
@@ -808,6 +813,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
axienet_start_xmit_done(lp->ndev);
goto out;
}
@@ -831,9 +837,9 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
tasklet_schedule(&lp->dma_err_tasklet);
+ axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
}
out:
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
return IRQ_HANDLED;
}
@@ -842,7 +848,7 @@ out:
* @irq: irq number
* @_ndev: net_device pointer
*
- * returns: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED for all cases.
*
* This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
* processing.
@@ -856,6 +862,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
axienet_recv(lp->ndev);
goto out;
}
@@ -879,9 +886,9 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
tasklet_schedule(&lp->dma_err_tasklet);
+ axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
}
out:
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
return IRQ_HANDLED;
}
@@ -891,7 +898,7 @@ static void axienet_dma_err_handler(unsigned long data);
* axienet_open - Driver open routine.
* @ndev: Pointer to net_device structure
*
- * returns: 0, on success.
+ * Return: 0, on success.
* -ENODEV, if PHY cannot be connected to
* non-zero error value on failure
*
@@ -914,7 +921,8 @@ static int axienet_open(struct net_device *ndev)
/* Disable the MDIO interface till Axi Ethernet Reset is completed.
* When we do an Axi Ethernet reset, it resets the complete core
* including the MDIO. If MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards. */
+ * process is started, MDIO will be broken afterwards.
+ */
axienet_iow(lp, XAE_MDIO_MC_OFFSET,
(mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
axienet_device_reset(ndev);
@@ -925,14 +933,20 @@ static int axienet_open(struct net_device *ndev)
return ret;
if (lp->phy_node) {
- lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+ if (lp->phy_type == XAE_PHY_TYPE_GMII) {
+ lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
axienet_adjust_link, 0,
PHY_INTERFACE_MODE_GMII);
- if (!lp->phy_dev) {
- dev_err(lp->dev, "of_phy_connect() failed\n");
- return -ENODEV;
+ } else if (lp->phy_type == XAE_PHY_TYPE_RGMII_2_0) {
+ lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+ axienet_adjust_link, 0,
+ PHY_INTERFACE_MODE_RGMII_ID);
}
- phy_start(lp->phy_dev);
+
+ if (!lp->phy_dev)
+ dev_err(lp->dev, "of_phy_connect() failed\n");
+ else
+ phy_start(lp->phy_dev);
}
/* Enable tasklets for Axi DMA error handling */
@@ -965,7 +979,7 @@ err_tx_irq:
* axienet_stop - Driver stop routine.
* @ndev: Pointer to net_device structure
*
- * returns: 0, on success.
+ * Return: 0, on success.
*
* This is the driver stop routine. It calls phy_disconnect to stop the PHY
* device. It also removes the interrupt handlers and disables the interrupts.
@@ -1005,7 +1019,7 @@ static int axienet_stop(struct net_device *ndev)
* @ndev: Pointer to net_device structure
* @new_mtu: New mtu value to be applied
*
- * returns: Always returns 0 (success).
+ * Return: Always returns 0 (success).
*
* This is the change mtu driver routine. It checks if the Axi Ethernet
* hardware supports jumbo frames before changing the mtu. This can be
@@ -1017,15 +1031,15 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
if (netif_running(ndev))
return -EBUSY;
- if (lp->jumbo_support) {
- if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
- return -EINVAL;
- ndev->mtu = new_mtu;
- } else {
- if ((new_mtu > XAE_MTU) || (new_mtu < 64))
- return -EINVAL;
- ndev->mtu = new_mtu;
- }
+
+ if ((new_mtu + VLAN_ETH_HLEN +
+ XAE_TRL_SIZE) > lp->rxmem)
+ return -EINVAL;
+
+ if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
+ return -EINVAL;
+
+ ndev->mtu = new_mtu;
return 0;
}
@@ -1072,6 +1086,8 @@ static const struct net_device_ops axienet_netdev_ops = {
* not be found, the function returns -ENODEV. This function calls the
* relevant PHY ethtool API to get the PHY settings.
* Issue "ethtool ethX" under linux prompt to execute this function.
+ *
+ * Return: 0 on success, -ENODEV if PHY doesn't exist
*/
static int axienet_ethtools_get_settings(struct net_device *ndev,
struct ethtool_cmd *ecmd)
@@ -1093,6 +1109,8 @@ static int axienet_ethtools_get_settings(struct net_device *ndev,
* relevant PHY ethtool API to set the PHY.
* Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
* function.
+ *
+ * Return: 0 on success, -ENODEV if PHY doesn't exist
*/
static int axienet_ethtools_set_settings(struct net_device *ndev,
struct ethtool_cmd *ecmd)
@@ -1127,6 +1145,8 @@ static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
*
* This implements ethtool command for getting the total register length
* information.
+ *
+ * Return: the total regs length
*/
static int axienet_ethtools_get_regs_len(struct net_device *ndev)
{
@@ -1213,11 +1233,13 @@ axienet_ethtools_get_pauseparam(struct net_device *ndev,
* axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
* settings.
* @ndev: Pointer to net_device structure
- * @epauseparam:Pointer to ethtool_pauseparam structure
+ * @epauseparm:Pointer to ethtool_pauseparam structure
*
* This implements ethtool command for enabling flow control on Rx and Tx
* paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
* function.
+ *
+ * Return: 0 on success, -EFAULT if device is running
*/
static int
axienet_ethtools_set_pauseparam(struct net_device *ndev,
@@ -1227,8 +1249,8 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
struct axienet_local *lp = netdev_priv(ndev);
if (netif_running(ndev)) {
- printk(KERN_ERR "%s: Please stop netif before applying "
- "configruation\n", ndev->name);
+ netdev_err(ndev,
+ "Please stop netif before applying configuration\n");
return -EFAULT;
}
@@ -1254,6 +1276,8 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
* This implements ethtool command for getting the DMA interrupt coalescing
* count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
* execute this function.
+ *
+ * Return: 0 always
*/
static int axienet_ethtools_get_coalesce(struct net_device *ndev,
struct ethtool_coalesce *ecoalesce)
@@ -1277,6 +1301,8 @@ static int axienet_ethtools_get_coalesce(struct net_device *ndev,
* This implements ethtool command for setting the DMA interrupt coalescing
* count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
* prompt to execute this function.
+ *
+ * Return: 0, on success, Non-zero error value on failure.
*/
static int axienet_ethtools_set_coalesce(struct net_device *ndev,
struct ethtool_coalesce *ecoalesce)
@@ -1284,8 +1310,8 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
struct axienet_local *lp = netdev_priv(ndev);
if (netif_running(ndev)) {
- printk(KERN_ERR "%s: Please stop netif before applying "
- "configruation\n", ndev->name);
+ netdev_err(ndev,
+ "Please stop netif before applying configuration\n");
return -EFAULT;
}
@@ -1354,7 +1380,8 @@ static void axienet_dma_err_handler(unsigned long data)
/* Disable the MDIO interface till Axi Ethernet Reset is completed.
* When we do an Axi Ethernet reset, it resets the complete core
* including the MDIO. So if MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards. */
+ * process is started, MDIO will be broken afterwards.
+ */
axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
~XAE_MDIO_MC_MDIOEN_MASK));
@@ -1425,7 +1452,8 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.*/
+ * halted state. This will make the Rx side ready for reception.
+ */
axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
@@ -1435,7 +1463,8 @@ static void axienet_dma_err_handler(unsigned long data)
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting */
+ * tail pointer register that the Tx channel will start transmitting
+ */
axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
@@ -1451,7 +1480,8 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
/* Sync default options with HW but leave receiver and
- * transmitter disabled.*/
+ * transmitter disabled.
+ */
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_set_mac_address(ndev, NULL);
@@ -1460,11 +1490,10 @@ static void axienet_dma_err_handler(unsigned long data)
}
/**
- * axienet_of_probe - Axi Ethernet probe function.
- * @op: Pointer to platform device structure.
- * @match: Pointer to device id structure
+ * axienet_probe - Axi Ethernet probe function.
+ * @pdev: Pointer to platform device structure.
*
- * returns: 0, on success
+ * Return: 0, on success
* Non-zero error value on failure.
*
* This is the probe routine for Axi Ethernet driver. This is called before
@@ -1472,22 +1501,23 @@ static void axienet_dma_err_handler(unsigned long data)
* device. Parses through device tree and populates fields of
* axienet_local. It registers the Ethernet device.
*/
-static int axienet_of_probe(struct platform_device *op)
+static int axienet_probe(struct platform_device *pdev)
{
- __be32 *p;
- int size, ret = 0;
+ int ret;
struct device_node *np;
struct axienet_local *lp;
struct net_device *ndev;
- const void *addr;
+ u8 mac_addr[6];
+ struct resource *ethres, dmares;
+ u32 value;
ndev = alloc_etherdev(sizeof(*lp));
if (!ndev)
return -ENOMEM;
- platform_set_drvdata(op, ndev);
+ platform_set_drvdata(pdev, ndev);
- SET_NETDEV_DEV(ndev, &op->dev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
ndev->features = NETIF_F_SG;
ndev->netdev_ops = &axienet_netdev_ops;
@@ -1495,21 +1525,23 @@ static int axienet_of_probe(struct platform_device *op)
lp = netdev_priv(ndev);
lp->ndev = ndev;
- lp->dev = &op->dev;
+ lp->dev = &pdev->dev;
lp->options = XAE_OPTION_DEFAULTS;
/* Map device registers */
- lp->regs = of_iomap(op->dev.of_node, 0);
- if (!lp->regs) {
- dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
- ret = -ENOMEM;
- goto nodev;
+ ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
+ if (IS_ERR(lp->regs)) {
+ dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
+ ret = PTR_ERR(lp->regs);
+ goto free_netdev;
}
+
/* Setup checksum offload, but default to off if not specified */
lp->features = 0;
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
- if (p) {
- switch (be32_to_cpup(p)) {
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
+ if (!ret) {
+ switch (value) {
case 1:
lp->csum_offload_on_tx_path =
XAE_FEATURE_PARTIAL_TX_CSUM;
@@ -1528,9 +1560,9 @@ static int axienet_of_probe(struct platform_device *op)
lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
}
}
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
- if (p) {
- switch (be32_to_cpup(p)) {
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
+ if (!ret) {
+ switch (value) {
case 1:
lp->csum_offload_on_rx_path =
XAE_FEATURE_PARTIAL_RX_CSUM;
@@ -1546,82 +1578,77 @@ static int axienet_of_probe(struct platform_device *op)
}
}
/* For supporting jumbo frames, the Axi Ethernet hardware must have
- * a larger Rx/Tx Memory. Typically, the size must be more than or
- * equal to 16384 bytes, so that we can enable jumbo option and start
- * supporting jumbo frames. Here we check for memory allocated for
- * Rx/Tx in the hardware from the device-tree and accordingly set
- * flags. */
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL);
- if (p) {
- if ((be32_to_cpup(p)) >= 0x4000)
- lp->jumbo_support = 1;
- }
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
- if (p)
- lp->phy_type = be32_to_cpup(p);
+ * a larger Rx/Tx Memory. Typically, the size must be large so that
+ * we can enable jumbo option and start supporting jumbo frames.
+ * Here we check for memory allocated for Rx/Tx in the hardware from
+ * the device-tree and accordingly set flags.
+ */
+ of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
+ of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &lp->phy_type);
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
- np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
- if (!np) {
- dev_err(&op->dev, "could not find DMA node\n");
- ret = -ENODEV;
- goto err_iounmap;
+ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
+ if (IS_ERR(np)) {
+ dev_err(&pdev->dev, "could not find DMA node\n");
+ ret = PTR_ERR(np);
+ goto free_netdev;
}
- lp->dma_regs = of_iomap(np, 0);
- if (lp->dma_regs) {
- dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs);
- } else {
- dev_err(&op->dev, "unable to map DMA registers\n");
- of_node_put(np);
+ ret = of_address_to_resource(np, 0, &dmares);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get DMA resource\n");
+ goto free_netdev;
+ }
+ lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
+ if (IS_ERR(lp->dma_regs)) {
+ dev_err(&pdev->dev, "could not map DMA regs\n");
+ ret = PTR_ERR(lp->dma_regs);
+ goto free_netdev;
}
lp->rx_irq = irq_of_parse_and_map(np, 1);
lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
- dev_err(&op->dev, "could not determine irqs\n");
+ dev_err(&pdev->dev, "could not determine irqs\n");
ret = -ENOMEM;
- goto err_iounmap_2;
+ goto free_netdev;
}
/* Retrieve the MAC address */
- addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
- if ((!addr) || (size != 6)) {
- dev_err(&op->dev, "could not find MAC address\n");
- ret = -ENODEV;
- goto err_iounmap_2;
+ ret = of_property_read_u8_array(pdev->dev.of_node,
+ "local-mac-address", mac_addr, 6);
+ if (ret) {
+ dev_err(&pdev->dev, "could not find MAC address\n");
+ goto free_netdev;
}
- axienet_set_mac_address(ndev, (void *) addr);
+ axienet_set_mac_address(ndev, (void *)mac_addr);
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
- lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
- ret = axienet_mdio_setup(lp, op->dev.of_node);
- if (ret)
- dev_warn(&op->dev, "error registering MDIO bus\n");
+ lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (lp->phy_node) {
+ ret = axienet_mdio_setup(lp, pdev->dev.of_node);
+ if (ret)
+ dev_warn(&pdev->dev, "error registering MDIO bus\n");
+ }
ret = register_netdev(lp->ndev);
if (ret) {
dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
- goto err_iounmap_2;
+ goto free_netdev;
}
return 0;
-err_iounmap_2:
- if (lp->dma_regs)
- iounmap(lp->dma_regs);
-err_iounmap:
- iounmap(lp->regs);
-nodev:
+free_netdev:
free_netdev(ndev);
- ndev = NULL;
+
return ret;
}
-static int axienet_of_remove(struct platform_device *op)
+static int axienet_remove(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(op);
+ struct net_device *ndev = platform_get_drvdata(pdev);
struct axienet_local *lp = netdev_priv(ndev);
axienet_mdio_teardown(lp);
@@ -1630,24 +1657,21 @@ static int axienet_of_remove(struct platform_device *op)
of_node_put(lp->phy_node);
lp->phy_node = NULL;
- iounmap(lp->regs);
- if (lp->dma_regs)
- iounmap(lp->dma_regs);
free_netdev(ndev);
return 0;
}
-static struct platform_driver axienet_of_driver = {
- .probe = axienet_of_probe,
- .remove = axienet_of_remove,
+static struct platform_driver axienet_driver = {
+ .probe = axienet_probe,
+ .remove = axienet_remove,
.driver = {
.name = "xilinx_axienet",
.of_match_table = axienet_of_match,
},
};
-module_platform_driver(axienet_of_driver);
+module_platform_driver(axienet_driver);
MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
MODULE_AUTHOR("Xilinx");
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 3b67d60d4..2a5a16834 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -37,7 +37,7 @@ int axienet_mdio_wait_until_ready(struct axienet_local *lp)
* @phy_id: Address of the PHY device
* @reg: PHY register to read
*
- * returns: The register contents on success, -ETIMEDOUT on a timeout
+ * Return: The register contents on success, -ETIMEDOUT on a timeout
*
* Reads the contents of the requested register from the requested PHY
* address by first writing the details into MCR register. After a while
@@ -80,7 +80,7 @@ static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* @reg: PHY register to write to
* @val: Value to be written into the register
*
- * returns: 0 on success, -ETIMEDOUT on a timeout
+ * Return: 0 on success, -ETIMEDOUT on a timeout
*
* Writes the value to the requested register by first writing the value
* into MWD register. The the MCR register is then appropriately setup
@@ -119,7 +119,7 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
* @lp: Pointer to axienet local data structure.
* @np: Pointer to device node
*
- * returns: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
+ * Return: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
* mdiobus_alloc (to allocate memory for mii bus structure) fails.
*
* Sets up the MDIO interface by initializing the MDIO clock and enabling the
@@ -161,19 +161,19 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
np1 = of_find_node_by_name(NULL, "cpu");
if (!np1) {
- printk(KERN_WARNING "%s(): Could not find CPU device node.",
- __func__);
- printk(KERN_WARNING "Setting MDIO clock divisor to "
- "default %d\n", DEFAULT_CLOCK_DIVISOR);
+ netdev_warn(lp->ndev, "Could not find CPU device node.\n");
+ netdev_warn(lp->ndev,
+ "Setting MDIO clock divisor to default %d\n",
+ DEFAULT_CLOCK_DIVISOR);
clk_div = DEFAULT_CLOCK_DIVISOR;
goto issue;
}
property_p = (u32 *) of_get_property(np1, "clock-frequency", NULL);
if (!property_p) {
- printk(KERN_WARNING "%s(): Could not find CPU property: "
- "clock-frequency.", __func__);
- printk(KERN_WARNING "Setting MDIO clock divisor to "
- "default %d\n", DEFAULT_CLOCK_DIVISOR);
+ netdev_warn(lp->ndev, "clock-frequency property not found.\n");
+ netdev_warn(lp->ndev,
+ "Setting MDIO clock divisor to default %d\n",
+ DEFAULT_CLOCK_DIVISOR);
clk_div = DEFAULT_CLOCK_DIVISOR;
of_node_put(np1);
goto issue;
@@ -183,12 +183,14 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
/* If there is any remainder from the division of
* fHOST / (MAX_MDIO_FREQ * 2), then we need to add
- * 1 to the clock divisor or we will surely be above 2.5 MHz */
+ * 1 to the clock divisor or we will surely be above 2.5 MHz
+ */
if (host_clock % (MAX_MDIO_FREQ * 2))
clk_div++;
- printk(KERN_DEBUG "%s(): Setting MDIO clock divisor to %u based "
- "on %u Hz host clock.\n", __func__, clk_div, host_clock);
+ netdev_dbg(lp->ndev,
+ "Setting MDIO clock divisor to %u/%u Hz host clock.\n",
+ clk_div, host_clock);
of_node_put(np1);
issue:
diff --git a/drivers/net/ethernet/xircom/Kconfig b/drivers/net/ethernet/xircom/Kconfig
index 69f56a6de..d6208a4c9 100644
--- a/drivers/net/ethernet/xircom/Kconfig
+++ b/drivers/net/ethernet/xircom/Kconfig
@@ -7,9 +7,7 @@ config NET_VENDOR_XIRCOM
default y
depends on PCMCIA
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index b81bc9fca..af3432fe9 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -8,9 +8,7 @@ config NET_VENDOR_XSCALE
depends on NET_VENDOR_INTEL && (ARM && ARCH_IXP4XX && \
IXP4XX_NPE && IXP4XX_QMGR)
---help---
- If you have a network (Ethernet) card belonging to this class, say Y
- and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question does not directly affect the
kernel: saying N will just cause the configurator to skip all